diff --git a/charts/cluster-autoscaler/Chart.yaml b/charts/cluster-autoscaler/Chart.yaml index 46681589a527..c871126adba8 100644 --- a/charts/cluster-autoscaler/Chart.yaml +++ b/charts/cluster-autoscaler/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v2 -appVersion: 1.27.1 +appVersion: 1.27.2 description: Scales Kubernetes worker nodes within autoscaling groups. engine: gotpl home: https://github.com/kubernetes/autoscaler @@ -11,4 +11,4 @@ name: cluster-autoscaler sources: - https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler type: application -version: 9.29.0 +version: 9.29.1 diff --git a/charts/cluster-autoscaler/README.md b/charts/cluster-autoscaler/README.md index 2683e8543b61..ff3aba2e4fff 100644 --- a/charts/cluster-autoscaler/README.md +++ b/charts/cluster-autoscaler/README.md @@ -320,7 +320,10 @@ Though enough for the majority of installations, the default PodSecurityPolicy _ ### VerticalPodAutoscaler -The chart can install a [`VerticalPodAutoscaler`](https://github.com/kubernetes/autoscaler/blob/master/vertical-pod-autoscaler/README.md) for the Deployment if needed. A VPA can help minimize wasted resources when usage spikes periodically or remediate containers that are being OOMKilled. +The CA Helm Chart can install a [`VerticalPodAutoscaler`](https://github.com/kubernetes/autoscaler/blob/master/vertical-pod-autoscaler/README.md) object from Chart version `9.27.0` +onwards for the Cluster Autoscaler Deployment to scale the CA as appropriate, but for that, we +need to install the VPA to the cluster separately. A VPA can help minimize wasted resources +when usage spikes periodically or remediate containers that are being OOMKilled. The following example snippet can be used to install VPA that allows scaling down from the default recommendations of the deployment template: @@ -383,7 +386,7 @@ vpa: | image.pullPolicy | string | `"IfNotPresent"` | Image pull policy | | image.pullSecrets | list | `[]` | Image pull secrets | | image.repository | string | `"registry.k8s.io/autoscaling/cluster-autoscaler"` | Image repository | -| image.tag | string | `"v1.27.1"` | Image tag | +| image.tag | string | `"v1.27.2"` | Image tag | | kubeTargetVersionOverride | string | `""` | Allow overriding the `.Capabilities.KubeVersion.GitVersion` check. Useful for `helm template` commands. | | magnumCABundlePath | string | `"/etc/kubernetes/ca-bundle.crt"` | Path to the host's CA bundle, from `ca-file` in the cloud-config file. | | magnumClusterName | string | `""` | Cluster name or ID in Magnum. Required if `cloudProvider=magnum` and not setting `autoDiscovery.clusterName`. | diff --git a/charts/cluster-autoscaler/README.md.gotmpl b/charts/cluster-autoscaler/README.md.gotmpl index b3a042b0de82..611ad6bb121f 100644 --- a/charts/cluster-autoscaler/README.md.gotmpl +++ b/charts/cluster-autoscaler/README.md.gotmpl @@ -320,7 +320,10 @@ Though enough for the majority of installations, the default PodSecurityPolicy _ ### VerticalPodAutoscaler -The chart can install a [`VerticalPodAutoscaler`](https://github.com/kubernetes/autoscaler/blob/master/vertical-pod-autoscaler/README.md) for the Deployment if needed. A VPA can help minimize wasted resources when usage spikes periodically or remediate containers that are being OOMKilled. +The CA Helm Chart can install a [`VerticalPodAutoscaler`](https://github.com/kubernetes/autoscaler/blob/master/vertical-pod-autoscaler/README.md) object from Chart version `9.27.0` +onwards for the Cluster Autoscaler Deployment to scale the CA as appropriate, but for that, we +need to install the VPA to the cluster separately. A VPA can help minimize wasted resources +when usage spikes periodically or remediate containers that are being OOMKilled. The following example snippet can be used to install VPA that allows scaling down from the default recommendations of the deployment template: diff --git a/charts/cluster-autoscaler/values.yaml b/charts/cluster-autoscaler/values.yaml index 048de6dc46a6..f414ae65e7c6 100644 --- a/charts/cluster-autoscaler/values.yaml +++ b/charts/cluster-autoscaler/values.yaml @@ -230,7 +230,7 @@ image: # image.repository -- Image repository repository: registry.k8s.io/autoscaling/cluster-autoscaler # image.tag -- Image tag - tag: v1.27.1 + tag: v1.27.2 # image.pullPolicy -- Image pull policy pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets. diff --git a/cluster-autoscaler/cloudprovider/aws/aws_sdk_provider.go b/cluster-autoscaler/cloudprovider/aws/aws_sdk_provider.go index 1b1e64925c5f..b1f33442f71b 100644 --- a/cluster-autoscaler/cloudprovider/aws/aws_sdk_provider.go +++ b/cluster-autoscaler/cloudprovider/aws/aws_sdk_provider.go @@ -27,7 +27,9 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/aws/aws-sdk-go/aws" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/aws/aws-sdk-go/aws/ec2metadata" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/aws/aws-sdk-go/aws/endpoints" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/aws/aws-sdk-go/aws/request" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/aws/aws-sdk-go/aws/session" + "k8s.io/autoscaler/cluster-autoscaler/version" provider_aws "k8s.io/cloud-provider-aws/pkg/providers/v1" "k8s.io/klog/v2" ) @@ -61,11 +63,14 @@ func createAWSSDKProvider(configReader io.Reader) (*awsSDKProvider, error) { } sess, err := session.NewSession(config) - if err != nil { return nil, err } + // add cluster-autoscaler to the user-agent to make it easier to identify + agent := fmt.Sprintf("cluster-autoscaler/v%s", version.ClusterAutoscalerVersion) + sess.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler(agent)) + provider := &awsSDKProvider{ session: sess, } diff --git a/cluster-autoscaler/cloudprovider/brightbox/Makefile b/cluster-autoscaler/cloudprovider/brightbox/Makefile index 14c20c9dd550..d7391aad7e5d 100644 --- a/cluster-autoscaler/cloudprovider/brightbox/Makefile +++ b/cluster-autoscaler/cloudprovider/brightbox/Makefile @@ -1,5 +1,5 @@ export BUILD_TAGS=brightbox -export REGISTRY=brightbox +export REGISTRY?=brightbox export GOARCH?=$(shell go env GOARCH) ifndef TAG override TAG=dev diff --git a/cluster-autoscaler/cloudprovider/brightbox/README.md b/cluster-autoscaler/cloudprovider/brightbox/README.md index 270fd1a0e5d4..2823105845bf 100644 --- a/cluster-autoscaler/cloudprovider/brightbox/README.md +++ b/cluster-autoscaler/cloudprovider/brightbox/README.md @@ -161,8 +161,8 @@ $ make build This builds an autoscaler containing only the Brightbox Cloud provider, tagged as `brightbox/cluster-autoscaler-brightbox:dev`. To build any -other version add a TAG variable +other version add a TAG variable and/or a REGISTRY variable ``` -make build TAG=1.1x +make build TAG=1.1x REGISTRY=cr.brightbox.com/acc-xxxxx/ ``` diff --git a/cluster-autoscaler/cloudprovider/brightbox/brightbox_node_group.go b/cluster-autoscaler/cloudprovider/brightbox/brightbox_node_group.go index 6ce89bbbf98f..75d03de79caf 100644 --- a/cluster-autoscaler/cloudprovider/brightbox/brightbox_node_group.go +++ b/cluster-autoscaler/cloudprovider/brightbox/brightbox_node_group.go @@ -359,24 +359,32 @@ func makeNodeGroupFromAPIDetails( if mapData["server_group"] == "" { return nil, cloudprovider.ErrIllegalConfiguration } + ng := brightboxNodeGroup{ + id: mapData["server_group"], + minSize: minSize, + maxSize: maxSize, + Cloud: cloudclient, + } + imageID := mapData["image"] + if !(len(imageID) == 9 && strings.HasPrefix(imageID, "img-")) { + image, err := ng.GetImageByName(imageID) + if err != nil || image == nil { + return nil, cloudprovider.ErrIllegalConfiguration + } + imageID = image.Id + } userData := mapData["user_data"] options := &brightbox.ServerOptions{ - Image: mapData["image"], + Image: imageID, Name: &name, ServerType: mapData["type"], Zone: mapData["zone"], UserData: &userData, ServerGroups: mergeServerGroups(mapData), } - result := brightboxNodeGroup{ - id: mapData["server_group"], - minSize: minSize, - maxSize: maxSize, - serverOptions: options, - Cloud: cloudclient, - } - klog.V(4).Info(result.Debug()) - return &result, nil + ng.serverOptions = options + klog.V(4).Info(ng.Debug()) + return &ng, nil } func mergeServerGroups(data map[string]string) []string { diff --git a/cluster-autoscaler/cloudprovider/brightbox/k8ssdk/brightbox_interface.go b/cluster-autoscaler/cloudprovider/brightbox/k8ssdk/brightbox_interface.go index 5fffa2025143..131e1c5a541b 100644 --- a/cluster-autoscaler/cloudprovider/brightbox/k8ssdk/brightbox_interface.go +++ b/cluster-autoscaler/cloudprovider/brightbox/k8ssdk/brightbox_interface.go @@ -18,6 +18,8 @@ import ( "context" "fmt" "net/http" + "regexp" + "sort" "strings" brightbox "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/brightbox/gobrightbox" @@ -166,6 +168,36 @@ func (c *Cloud) GetServerType(identifier string) (*brightbox.ServerType, error) return client.ServerType(identifier) } +// GetImageByName obtains the most recent available image that matches the supplied pattern +func (c *Cloud) GetImageByName(name string) (*brightbox.Image, error) { + klog.V(4).Infof("GetImageByName %q", name) + client, err := c.CloudClient() + if err != nil { + return nil, err + } + klog.V(6).Info("GetImageByName compiling regexp") + nameRe, err := regexp.Compile(name) + if err != nil { + return nil, err + } + klog.V(6).Info("GetImageByName retrieving images") + images, err := client.Images() + if err != nil { + return nil, err + } + klog.V(6).Info("GetImageByName filtering images") + filteredImages := filter( + images, + func(i brightbox.Image) bool { + return i.Official && + i.Status == status.Available && + nameRe.MatchString(i.Name) + }, + ) + klog.V(6).Infof("GetImageByName images selected (%+v)", filteredImages) + return mostRecent(filteredImages), nil +} + // GetConfigMaps obtains the list of Config Maps on the account func (c *Cloud) GetConfigMaps() ([]brightbox.ConfigMap, error) { klog.V(4).Info("GetConfigMaps") @@ -555,3 +587,27 @@ func ErrorIfAcmeNotComplete(acme *brightbox.LoadBalancerAcme) error { } return nil } + +// Returns the most recent item out of a slice of items with Dates +// or nil if there are no items +func mostRecent(items []brightbox.Image) *brightbox.Image { + if len(items) == 0 { + return nil + } + sortedItems := items + sort.Slice(items, func(i, j int) bool { + return items[i].CreatedAt.Unix() > items[j].CreatedAt.Unix() + }) + return &sortedItems[0] +} + +// filter returns a new slice with all elements from the +// input elements for which the provided predicate function returns true. +func filter[T any](input []T, pred func(T) bool) (output []T) { + for _, v := range input { + if pred(v) { + output = append(output, v) + } + } + return output +} diff --git a/cluster-autoscaler/cloudprovider/brightbox/k8ssdk/cloud_access.go b/cluster-autoscaler/cloudprovider/brightbox/k8ssdk/cloud_access.go index d1ad5d55056b..f544edf39ac6 100644 --- a/cluster-autoscaler/cloudprovider/brightbox/k8ssdk/cloud_access.go +++ b/cluster-autoscaler/cloudprovider/brightbox/k8ssdk/cloud_access.go @@ -92,6 +92,9 @@ type CloudAccess interface { // DestroyCloudIP issues a request to destroy the cloud ip DestroyCloudIP(identifier string) error + // ConfigMaps retrieves a list of all config maps + Images() ([]brightbox.Image, error) + // ConfigMaps retrieves a list of all config maps ConfigMaps() ([]brightbox.ConfigMap, error) diff --git a/cluster-autoscaler/cloudprovider/brightbox/k8ssdk/mocks/CloudAccess.go b/cluster-autoscaler/cloudprovider/brightbox/k8ssdk/mocks/CloudAccess.go index 04e09aeb4f35..271f2e728f5a 100644 --- a/cluster-autoscaler/cloudprovider/brightbox/k8ssdk/mocks/CloudAccess.go +++ b/cluster-autoscaler/cloudprovider/brightbox/k8ssdk/mocks/CloudAccess.go @@ -12,10 +12,12 @@ // See the License for the specific language governing permissions and // limitations under the License. +// Code generated by mockery v2.20.0. DO NOT EDIT. + package mocks import ( - brightbox "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/brightbox/gobrightbox" + gobrightbox "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/brightbox/gobrightbox" mock "github.com/stretchr/testify/mock" ) @@ -26,19 +28,22 @@ type CloudAccess struct { } // AddServersToServerGroup provides a mock function with given fields: identifier, serverIds -func (_m *CloudAccess) AddServersToServerGroup(identifier string, serverIds []string) (*brightbox.ServerGroup, error) { +func (_m *CloudAccess) AddServersToServerGroup(identifier string, serverIds []string) (*gobrightbox.ServerGroup, error) { ret := _m.Called(identifier, serverIds) - var r0 *brightbox.ServerGroup - if rf, ok := ret.Get(0).(func(string, []string) *brightbox.ServerGroup); ok { + var r0 *gobrightbox.ServerGroup + var r1 error + if rf, ok := ret.Get(0).(func(string, []string) (*gobrightbox.ServerGroup, error)); ok { + return rf(identifier, serverIds) + } + if rf, ok := ret.Get(0).(func(string, []string) *gobrightbox.ServerGroup); ok { r0 = rf(identifier, serverIds) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*brightbox.ServerGroup) + r0 = ret.Get(0).(*gobrightbox.ServerGroup) } } - var r1 error if rf, ok := ret.Get(1).(func(string, []string) error); ok { r1 = rf(identifier, serverIds) } else { @@ -49,19 +54,22 @@ func (_m *CloudAccess) AddServersToServerGroup(identifier string, serverIds []st } // CloudIP provides a mock function with given fields: identifier -func (_m *CloudAccess) CloudIP(identifier string) (*brightbox.CloudIP, error) { +func (_m *CloudAccess) CloudIP(identifier string) (*gobrightbox.CloudIP, error) { ret := _m.Called(identifier) - var r0 *brightbox.CloudIP - if rf, ok := ret.Get(0).(func(string) *brightbox.CloudIP); ok { + var r0 *gobrightbox.CloudIP + var r1 error + if rf, ok := ret.Get(0).(func(string) (*gobrightbox.CloudIP, error)); ok { + return rf(identifier) + } + if rf, ok := ret.Get(0).(func(string) *gobrightbox.CloudIP); ok { r0 = rf(identifier) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*brightbox.CloudIP) + r0 = ret.Get(0).(*gobrightbox.CloudIP) } } - var r1 error if rf, ok := ret.Get(1).(func(string) error); ok { r1 = rf(identifier) } else { @@ -72,19 +80,22 @@ func (_m *CloudAccess) CloudIP(identifier string) (*brightbox.CloudIP, error) { } // CloudIPs provides a mock function with given fields: -func (_m *CloudAccess) CloudIPs() ([]brightbox.CloudIP, error) { +func (_m *CloudAccess) CloudIPs() ([]gobrightbox.CloudIP, error) { ret := _m.Called() - var r0 []brightbox.CloudIP - if rf, ok := ret.Get(0).(func() []brightbox.CloudIP); ok { + var r0 []gobrightbox.CloudIP + var r1 error + if rf, ok := ret.Get(0).(func() ([]gobrightbox.CloudIP, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() []gobrightbox.CloudIP); ok { r0 = rf() } else { if ret.Get(0) != nil { - r0 = ret.Get(0).([]brightbox.CloudIP) + r0 = ret.Get(0).([]gobrightbox.CloudIP) } } - var r1 error if rf, ok := ret.Get(1).(func() error); ok { r1 = rf() } else { @@ -95,19 +106,22 @@ func (_m *CloudAccess) CloudIPs() ([]brightbox.CloudIP, error) { } // ConfigMap provides a mock function with given fields: identifier -func (_m *CloudAccess) ConfigMap(identifier string) (*brightbox.ConfigMap, error) { +func (_m *CloudAccess) ConfigMap(identifier string) (*gobrightbox.ConfigMap, error) { ret := _m.Called(identifier) - var r0 *brightbox.ConfigMap - if rf, ok := ret.Get(0).(func(string) *brightbox.ConfigMap); ok { + var r0 *gobrightbox.ConfigMap + var r1 error + if rf, ok := ret.Get(0).(func(string) (*gobrightbox.ConfigMap, error)); ok { + return rf(identifier) + } + if rf, ok := ret.Get(0).(func(string) *gobrightbox.ConfigMap); ok { r0 = rf(identifier) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*brightbox.ConfigMap) + r0 = ret.Get(0).(*gobrightbox.ConfigMap) } } - var r1 error if rf, ok := ret.Get(1).(func(string) error); ok { r1 = rf(identifier) } else { @@ -118,19 +132,22 @@ func (_m *CloudAccess) ConfigMap(identifier string) (*brightbox.ConfigMap, error } // ConfigMaps provides a mock function with given fields: -func (_m *CloudAccess) ConfigMaps() ([]brightbox.ConfigMap, error) { +func (_m *CloudAccess) ConfigMaps() ([]gobrightbox.ConfigMap, error) { ret := _m.Called() - var r0 []brightbox.ConfigMap - if rf, ok := ret.Get(0).(func() []brightbox.ConfigMap); ok { + var r0 []gobrightbox.ConfigMap + var r1 error + if rf, ok := ret.Get(0).(func() ([]gobrightbox.ConfigMap, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() []gobrightbox.ConfigMap); ok { r0 = rf() } else { if ret.Get(0) != nil { - r0 = ret.Get(0).([]brightbox.ConfigMap) + r0 = ret.Get(0).([]gobrightbox.ConfigMap) } } - var r1 error if rf, ok := ret.Get(1).(func() error); ok { r1 = rf() } else { @@ -141,20 +158,23 @@ func (_m *CloudAccess) ConfigMaps() ([]brightbox.ConfigMap, error) { } // CreateCloudIP provides a mock function with given fields: newCloudIP -func (_m *CloudAccess) CreateCloudIP(newCloudIP *brightbox.CloudIPOptions) (*brightbox.CloudIP, error) { +func (_m *CloudAccess) CreateCloudIP(newCloudIP *gobrightbox.CloudIPOptions) (*gobrightbox.CloudIP, error) { ret := _m.Called(newCloudIP) - var r0 *brightbox.CloudIP - if rf, ok := ret.Get(0).(func(*brightbox.CloudIPOptions) *brightbox.CloudIP); ok { + var r0 *gobrightbox.CloudIP + var r1 error + if rf, ok := ret.Get(0).(func(*gobrightbox.CloudIPOptions) (*gobrightbox.CloudIP, error)); ok { + return rf(newCloudIP) + } + if rf, ok := ret.Get(0).(func(*gobrightbox.CloudIPOptions) *gobrightbox.CloudIP); ok { r0 = rf(newCloudIP) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*brightbox.CloudIP) + r0 = ret.Get(0).(*gobrightbox.CloudIP) } } - var r1 error - if rf, ok := ret.Get(1).(func(*brightbox.CloudIPOptions) error); ok { + if rf, ok := ret.Get(1).(func(*gobrightbox.CloudIPOptions) error); ok { r1 = rf(newCloudIP) } else { r1 = ret.Error(1) @@ -164,20 +184,23 @@ func (_m *CloudAccess) CreateCloudIP(newCloudIP *brightbox.CloudIPOptions) (*bri } // CreateFirewallPolicy provides a mock function with given fields: policyOptions -func (_m *CloudAccess) CreateFirewallPolicy(policyOptions *brightbox.FirewallPolicyOptions) (*brightbox.FirewallPolicy, error) { +func (_m *CloudAccess) CreateFirewallPolicy(policyOptions *gobrightbox.FirewallPolicyOptions) (*gobrightbox.FirewallPolicy, error) { ret := _m.Called(policyOptions) - var r0 *brightbox.FirewallPolicy - if rf, ok := ret.Get(0).(func(*brightbox.FirewallPolicyOptions) *brightbox.FirewallPolicy); ok { + var r0 *gobrightbox.FirewallPolicy + var r1 error + if rf, ok := ret.Get(0).(func(*gobrightbox.FirewallPolicyOptions) (*gobrightbox.FirewallPolicy, error)); ok { + return rf(policyOptions) + } + if rf, ok := ret.Get(0).(func(*gobrightbox.FirewallPolicyOptions) *gobrightbox.FirewallPolicy); ok { r0 = rf(policyOptions) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*brightbox.FirewallPolicy) + r0 = ret.Get(0).(*gobrightbox.FirewallPolicy) } } - var r1 error - if rf, ok := ret.Get(1).(func(*brightbox.FirewallPolicyOptions) error); ok { + if rf, ok := ret.Get(1).(func(*gobrightbox.FirewallPolicyOptions) error); ok { r1 = rf(policyOptions) } else { r1 = ret.Error(1) @@ -187,20 +210,23 @@ func (_m *CloudAccess) CreateFirewallPolicy(policyOptions *brightbox.FirewallPol } // CreateFirewallRule provides a mock function with given fields: ruleOptions -func (_m *CloudAccess) CreateFirewallRule(ruleOptions *brightbox.FirewallRuleOptions) (*brightbox.FirewallRule, error) { +func (_m *CloudAccess) CreateFirewallRule(ruleOptions *gobrightbox.FirewallRuleOptions) (*gobrightbox.FirewallRule, error) { ret := _m.Called(ruleOptions) - var r0 *brightbox.FirewallRule - if rf, ok := ret.Get(0).(func(*brightbox.FirewallRuleOptions) *brightbox.FirewallRule); ok { + var r0 *gobrightbox.FirewallRule + var r1 error + if rf, ok := ret.Get(0).(func(*gobrightbox.FirewallRuleOptions) (*gobrightbox.FirewallRule, error)); ok { + return rf(ruleOptions) + } + if rf, ok := ret.Get(0).(func(*gobrightbox.FirewallRuleOptions) *gobrightbox.FirewallRule); ok { r0 = rf(ruleOptions) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*brightbox.FirewallRule) + r0 = ret.Get(0).(*gobrightbox.FirewallRule) } } - var r1 error - if rf, ok := ret.Get(1).(func(*brightbox.FirewallRuleOptions) error); ok { + if rf, ok := ret.Get(1).(func(*gobrightbox.FirewallRuleOptions) error); ok { r1 = rf(ruleOptions) } else { r1 = ret.Error(1) @@ -210,20 +236,23 @@ func (_m *CloudAccess) CreateFirewallRule(ruleOptions *brightbox.FirewallRuleOpt } // CreateLoadBalancer provides a mock function with given fields: newDetails -func (_m *CloudAccess) CreateLoadBalancer(newDetails *brightbox.LoadBalancerOptions) (*brightbox.LoadBalancer, error) { +func (_m *CloudAccess) CreateLoadBalancer(newDetails *gobrightbox.LoadBalancerOptions) (*gobrightbox.LoadBalancer, error) { ret := _m.Called(newDetails) - var r0 *brightbox.LoadBalancer - if rf, ok := ret.Get(0).(func(*brightbox.LoadBalancerOptions) *brightbox.LoadBalancer); ok { + var r0 *gobrightbox.LoadBalancer + var r1 error + if rf, ok := ret.Get(0).(func(*gobrightbox.LoadBalancerOptions) (*gobrightbox.LoadBalancer, error)); ok { + return rf(newDetails) + } + if rf, ok := ret.Get(0).(func(*gobrightbox.LoadBalancerOptions) *gobrightbox.LoadBalancer); ok { r0 = rf(newDetails) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*brightbox.LoadBalancer) + r0 = ret.Get(0).(*gobrightbox.LoadBalancer) } } - var r1 error - if rf, ok := ret.Get(1).(func(*brightbox.LoadBalancerOptions) error); ok { + if rf, ok := ret.Get(1).(func(*gobrightbox.LoadBalancerOptions) error); ok { r1 = rf(newDetails) } else { r1 = ret.Error(1) @@ -233,20 +262,23 @@ func (_m *CloudAccess) CreateLoadBalancer(newDetails *brightbox.LoadBalancerOpti } // CreateServer provides a mock function with given fields: newServer -func (_m *CloudAccess) CreateServer(newServer *brightbox.ServerOptions) (*brightbox.Server, error) { +func (_m *CloudAccess) CreateServer(newServer *gobrightbox.ServerOptions) (*gobrightbox.Server, error) { ret := _m.Called(newServer) - var r0 *brightbox.Server - if rf, ok := ret.Get(0).(func(*brightbox.ServerOptions) *brightbox.Server); ok { + var r0 *gobrightbox.Server + var r1 error + if rf, ok := ret.Get(0).(func(*gobrightbox.ServerOptions) (*gobrightbox.Server, error)); ok { + return rf(newServer) + } + if rf, ok := ret.Get(0).(func(*gobrightbox.ServerOptions) *gobrightbox.Server); ok { r0 = rf(newServer) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*brightbox.Server) + r0 = ret.Get(0).(*gobrightbox.Server) } } - var r1 error - if rf, ok := ret.Get(1).(func(*brightbox.ServerOptions) error); ok { + if rf, ok := ret.Get(1).(func(*gobrightbox.ServerOptions) error); ok { r1 = rf(newServer) } else { r1 = ret.Error(1) @@ -256,20 +288,23 @@ func (_m *CloudAccess) CreateServer(newServer *brightbox.ServerOptions) (*bright } // CreateServerGroup provides a mock function with given fields: newServerGroup -func (_m *CloudAccess) CreateServerGroup(newServerGroup *brightbox.ServerGroupOptions) (*brightbox.ServerGroup, error) { +func (_m *CloudAccess) CreateServerGroup(newServerGroup *gobrightbox.ServerGroupOptions) (*gobrightbox.ServerGroup, error) { ret := _m.Called(newServerGroup) - var r0 *brightbox.ServerGroup - if rf, ok := ret.Get(0).(func(*brightbox.ServerGroupOptions) *brightbox.ServerGroup); ok { + var r0 *gobrightbox.ServerGroup + var r1 error + if rf, ok := ret.Get(0).(func(*gobrightbox.ServerGroupOptions) (*gobrightbox.ServerGroup, error)); ok { + return rf(newServerGroup) + } + if rf, ok := ret.Get(0).(func(*gobrightbox.ServerGroupOptions) *gobrightbox.ServerGroup); ok { r0 = rf(newServerGroup) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*brightbox.ServerGroup) + r0 = ret.Get(0).(*gobrightbox.ServerGroup) } } - var r1 error - if rf, ok := ret.Get(1).(func(*brightbox.ServerGroupOptions) error); ok { + if rf, ok := ret.Get(1).(func(*gobrightbox.ServerGroupOptions) error); ok { r1 = rf(newServerGroup) } else { r1 = ret.Error(1) @@ -349,19 +384,48 @@ func (_m *CloudAccess) DestroyServerGroup(identifier string) error { } // FirewallPolicies provides a mock function with given fields: -func (_m *CloudAccess) FirewallPolicies() ([]brightbox.FirewallPolicy, error) { +func (_m *CloudAccess) FirewallPolicies() ([]gobrightbox.FirewallPolicy, error) { ret := _m.Called() - var r0 []brightbox.FirewallPolicy - if rf, ok := ret.Get(0).(func() []brightbox.FirewallPolicy); ok { + var r0 []gobrightbox.FirewallPolicy + var r1 error + if rf, ok := ret.Get(0).(func() ([]gobrightbox.FirewallPolicy, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() []gobrightbox.FirewallPolicy); ok { r0 = rf() } else { if ret.Get(0) != nil { - r0 = ret.Get(0).([]brightbox.FirewallPolicy) + r0 = ret.Get(0).([]gobrightbox.FirewallPolicy) } } + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Images provides a mock function with given fields: +func (_m *CloudAccess) Images() ([]gobrightbox.Image, error) { + ret := _m.Called() + + var r0 []gobrightbox.Image var r1 error + if rf, ok := ret.Get(0).(func() ([]gobrightbox.Image, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() []gobrightbox.Image); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]gobrightbox.Image) + } + } + if rf, ok := ret.Get(1).(func() error); ok { r1 = rf() } else { @@ -372,19 +436,22 @@ func (_m *CloudAccess) FirewallPolicies() ([]brightbox.FirewallPolicy, error) { } // LoadBalancer provides a mock function with given fields: identifier -func (_m *CloudAccess) LoadBalancer(identifier string) (*brightbox.LoadBalancer, error) { +func (_m *CloudAccess) LoadBalancer(identifier string) (*gobrightbox.LoadBalancer, error) { ret := _m.Called(identifier) - var r0 *brightbox.LoadBalancer - if rf, ok := ret.Get(0).(func(string) *brightbox.LoadBalancer); ok { + var r0 *gobrightbox.LoadBalancer + var r1 error + if rf, ok := ret.Get(0).(func(string) (*gobrightbox.LoadBalancer, error)); ok { + return rf(identifier) + } + if rf, ok := ret.Get(0).(func(string) *gobrightbox.LoadBalancer); ok { r0 = rf(identifier) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*brightbox.LoadBalancer) + r0 = ret.Get(0).(*gobrightbox.LoadBalancer) } } - var r1 error if rf, ok := ret.Get(1).(func(string) error); ok { r1 = rf(identifier) } else { @@ -395,19 +462,22 @@ func (_m *CloudAccess) LoadBalancer(identifier string) (*brightbox.LoadBalancer, } // LoadBalancers provides a mock function with given fields: -func (_m *CloudAccess) LoadBalancers() ([]brightbox.LoadBalancer, error) { +func (_m *CloudAccess) LoadBalancers() ([]gobrightbox.LoadBalancer, error) { ret := _m.Called() - var r0 []brightbox.LoadBalancer - if rf, ok := ret.Get(0).(func() []brightbox.LoadBalancer); ok { + var r0 []gobrightbox.LoadBalancer + var r1 error + if rf, ok := ret.Get(0).(func() ([]gobrightbox.LoadBalancer, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() []gobrightbox.LoadBalancer); ok { r0 = rf() } else { if ret.Get(0) != nil { - r0 = ret.Get(0).([]brightbox.LoadBalancer) + r0 = ret.Get(0).([]gobrightbox.LoadBalancer) } } - var r1 error if rf, ok := ret.Get(1).(func() error); ok { r1 = rf() } else { @@ -432,19 +502,22 @@ func (_m *CloudAccess) MapCloudIP(identifier string, destination string) error { } // RemoveServersFromServerGroup provides a mock function with given fields: identifier, serverIds -func (_m *CloudAccess) RemoveServersFromServerGroup(identifier string, serverIds []string) (*brightbox.ServerGroup, error) { +func (_m *CloudAccess) RemoveServersFromServerGroup(identifier string, serverIds []string) (*gobrightbox.ServerGroup, error) { ret := _m.Called(identifier, serverIds) - var r0 *brightbox.ServerGroup - if rf, ok := ret.Get(0).(func(string, []string) *brightbox.ServerGroup); ok { + var r0 *gobrightbox.ServerGroup + var r1 error + if rf, ok := ret.Get(0).(func(string, []string) (*gobrightbox.ServerGroup, error)); ok { + return rf(identifier, serverIds) + } + if rf, ok := ret.Get(0).(func(string, []string) *gobrightbox.ServerGroup); ok { r0 = rf(identifier, serverIds) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*brightbox.ServerGroup) + r0 = ret.Get(0).(*gobrightbox.ServerGroup) } } - var r1 error if rf, ok := ret.Get(1).(func(string, []string) error); ok { r1 = rf(identifier, serverIds) } else { @@ -455,19 +528,22 @@ func (_m *CloudAccess) RemoveServersFromServerGroup(identifier string, serverIds } // Server provides a mock function with given fields: identifier -func (_m *CloudAccess) Server(identifier string) (*brightbox.Server, error) { +func (_m *CloudAccess) Server(identifier string) (*gobrightbox.Server, error) { ret := _m.Called(identifier) - var r0 *brightbox.Server - if rf, ok := ret.Get(0).(func(string) *brightbox.Server); ok { + var r0 *gobrightbox.Server + var r1 error + if rf, ok := ret.Get(0).(func(string) (*gobrightbox.Server, error)); ok { + return rf(identifier) + } + if rf, ok := ret.Get(0).(func(string) *gobrightbox.Server); ok { r0 = rf(identifier) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*brightbox.Server) + r0 = ret.Get(0).(*gobrightbox.Server) } } - var r1 error if rf, ok := ret.Get(1).(func(string) error); ok { r1 = rf(identifier) } else { @@ -478,19 +554,22 @@ func (_m *CloudAccess) Server(identifier string) (*brightbox.Server, error) { } // ServerGroup provides a mock function with given fields: identifier -func (_m *CloudAccess) ServerGroup(identifier string) (*brightbox.ServerGroup, error) { +func (_m *CloudAccess) ServerGroup(identifier string) (*gobrightbox.ServerGroup, error) { ret := _m.Called(identifier) - var r0 *brightbox.ServerGroup - if rf, ok := ret.Get(0).(func(string) *brightbox.ServerGroup); ok { + var r0 *gobrightbox.ServerGroup + var r1 error + if rf, ok := ret.Get(0).(func(string) (*gobrightbox.ServerGroup, error)); ok { + return rf(identifier) + } + if rf, ok := ret.Get(0).(func(string) *gobrightbox.ServerGroup); ok { r0 = rf(identifier) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*brightbox.ServerGroup) + r0 = ret.Get(0).(*gobrightbox.ServerGroup) } } - var r1 error if rf, ok := ret.Get(1).(func(string) error); ok { r1 = rf(identifier) } else { @@ -501,19 +580,22 @@ func (_m *CloudAccess) ServerGroup(identifier string) (*brightbox.ServerGroup, e } // ServerGroups provides a mock function with given fields: -func (_m *CloudAccess) ServerGroups() ([]brightbox.ServerGroup, error) { +func (_m *CloudAccess) ServerGroups() ([]gobrightbox.ServerGroup, error) { ret := _m.Called() - var r0 []brightbox.ServerGroup - if rf, ok := ret.Get(0).(func() []brightbox.ServerGroup); ok { + var r0 []gobrightbox.ServerGroup + var r1 error + if rf, ok := ret.Get(0).(func() ([]gobrightbox.ServerGroup, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() []gobrightbox.ServerGroup); ok { r0 = rf() } else { if ret.Get(0) != nil { - r0 = ret.Get(0).([]brightbox.ServerGroup) + r0 = ret.Get(0).([]gobrightbox.ServerGroup) } } - var r1 error if rf, ok := ret.Get(1).(func() error); ok { r1 = rf() } else { @@ -524,19 +606,22 @@ func (_m *CloudAccess) ServerGroups() ([]brightbox.ServerGroup, error) { } // ServerType provides a mock function with given fields: identifier -func (_m *CloudAccess) ServerType(identifier string) (*brightbox.ServerType, error) { +func (_m *CloudAccess) ServerType(identifier string) (*gobrightbox.ServerType, error) { ret := _m.Called(identifier) - var r0 *brightbox.ServerType - if rf, ok := ret.Get(0).(func(string) *brightbox.ServerType); ok { + var r0 *gobrightbox.ServerType + var r1 error + if rf, ok := ret.Get(0).(func(string) (*gobrightbox.ServerType, error)); ok { + return rf(identifier) + } + if rf, ok := ret.Get(0).(func(string) *gobrightbox.ServerType); ok { r0 = rf(identifier) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*brightbox.ServerType) + r0 = ret.Get(0).(*gobrightbox.ServerType) } } - var r1 error if rf, ok := ret.Get(1).(func(string) error); ok { r1 = rf(identifier) } else { @@ -547,19 +632,22 @@ func (_m *CloudAccess) ServerType(identifier string) (*brightbox.ServerType, err } // ServerTypes provides a mock function with given fields: -func (_m *CloudAccess) ServerTypes() ([]brightbox.ServerType, error) { +func (_m *CloudAccess) ServerTypes() ([]gobrightbox.ServerType, error) { ret := _m.Called() - var r0 []brightbox.ServerType - if rf, ok := ret.Get(0).(func() []brightbox.ServerType); ok { + var r0 []gobrightbox.ServerType + var r1 error + if rf, ok := ret.Get(0).(func() ([]gobrightbox.ServerType, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() []gobrightbox.ServerType); ok { r0 = rf() } else { if ret.Get(0) != nil { - r0 = ret.Get(0).([]brightbox.ServerType) + r0 = ret.Get(0).([]gobrightbox.ServerType) } } - var r1 error if rf, ok := ret.Get(1).(func() error); ok { r1 = rf() } else { @@ -584,20 +672,23 @@ func (_m *CloudAccess) UnMapCloudIP(identifier string) error { } // UpdateFirewallRule provides a mock function with given fields: ruleOptions -func (_m *CloudAccess) UpdateFirewallRule(ruleOptions *brightbox.FirewallRuleOptions) (*brightbox.FirewallRule, error) { +func (_m *CloudAccess) UpdateFirewallRule(ruleOptions *gobrightbox.FirewallRuleOptions) (*gobrightbox.FirewallRule, error) { ret := _m.Called(ruleOptions) - var r0 *brightbox.FirewallRule - if rf, ok := ret.Get(0).(func(*brightbox.FirewallRuleOptions) *brightbox.FirewallRule); ok { + var r0 *gobrightbox.FirewallRule + var r1 error + if rf, ok := ret.Get(0).(func(*gobrightbox.FirewallRuleOptions) (*gobrightbox.FirewallRule, error)); ok { + return rf(ruleOptions) + } + if rf, ok := ret.Get(0).(func(*gobrightbox.FirewallRuleOptions) *gobrightbox.FirewallRule); ok { r0 = rf(ruleOptions) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*brightbox.FirewallRule) + r0 = ret.Get(0).(*gobrightbox.FirewallRule) } } - var r1 error - if rf, ok := ret.Get(1).(func(*brightbox.FirewallRuleOptions) error); ok { + if rf, ok := ret.Get(1).(func(*gobrightbox.FirewallRuleOptions) error); ok { r1 = rf(ruleOptions) } else { r1 = ret.Error(1) @@ -607,20 +698,23 @@ func (_m *CloudAccess) UpdateFirewallRule(ruleOptions *brightbox.FirewallRuleOpt } // UpdateLoadBalancer provides a mock function with given fields: newDetails -func (_m *CloudAccess) UpdateLoadBalancer(newDetails *brightbox.LoadBalancerOptions) (*brightbox.LoadBalancer, error) { +func (_m *CloudAccess) UpdateLoadBalancer(newDetails *gobrightbox.LoadBalancerOptions) (*gobrightbox.LoadBalancer, error) { ret := _m.Called(newDetails) - var r0 *brightbox.LoadBalancer - if rf, ok := ret.Get(0).(func(*brightbox.LoadBalancerOptions) *brightbox.LoadBalancer); ok { + var r0 *gobrightbox.LoadBalancer + var r1 error + if rf, ok := ret.Get(0).(func(*gobrightbox.LoadBalancerOptions) (*gobrightbox.LoadBalancer, error)); ok { + return rf(newDetails) + } + if rf, ok := ret.Get(0).(func(*gobrightbox.LoadBalancerOptions) *gobrightbox.LoadBalancer); ok { r0 = rf(newDetails) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*brightbox.LoadBalancer) + r0 = ret.Get(0).(*gobrightbox.LoadBalancer) } } - var r1 error - if rf, ok := ret.Get(1).(func(*brightbox.LoadBalancerOptions) error); ok { + if rf, ok := ret.Get(1).(func(*gobrightbox.LoadBalancerOptions) error); ok { r1 = rf(newDetails) } else { r1 = ret.Error(1) @@ -628,3 +722,18 @@ func (_m *CloudAccess) UpdateLoadBalancer(newDetails *brightbox.LoadBalancerOpti return r0, r1 } + +type mockConstructorTestingTNewCloudAccess interface { + mock.TestingT + Cleanup(func()) +} + +// NewCloudAccess creates a new instance of CloudAccess. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewCloudAccess(t mockConstructorTestingTNewCloudAccess) *CloudAccess { + mock := &CloudAccess{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_utils.go b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_utils.go index 7d007363d090..27292d49e8ea 100644 --- a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_utils.go +++ b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_utils.go @@ -172,16 +172,6 @@ func normalizedProviderString(s string) normalizedProviderID { return normalizedProviderID(split[len(split)-1]) } -func scaleFromZeroAnnotationsEnabled(annotations map[string]string) bool { - cpu := annotations[cpuKey] - mem := annotations[memoryKey] - - if cpu != "" && mem != "" { - return true - } - return false -} - func parseKey(annotations map[string]string, key string) (resource.Quantity, error) { if val, exists := annotations[key]; exists && val != "" { return resource.ParseQuantity(val) diff --git a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_utils_test.go b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_utils_test.go index ab929d8cb870..c4d01c56e365 100644 --- a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_utils_test.go +++ b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_utils_test.go @@ -430,47 +430,6 @@ func TestUtilNormalizedProviderID(t *testing.T) { } } -func TestScaleFromZeroEnabled(t *testing.T) { - for _, tc := range []struct { - description string - enabled bool - annotations map[string]string - }{{ - description: "nil annotations", - enabled: false, - }, { - description: "empty annotations", - annotations: map[string]string{}, - enabled: false, - }, { - description: "non-matching annotation", - annotations: map[string]string{"foo": "bar"}, - enabled: false, - }, { - description: "matching key, incomplete annotations", - annotations: map[string]string{ - "foo": "bar", - cpuKey: "1", - }, - enabled: false, - }, { - description: "matching key, complete annotations", - annotations: map[string]string{ - "foo": "bar", - cpuKey: "1", - memoryKey: "2Mi", - }, - enabled: true, - }} { - t.Run(tc.description, func(t *testing.T) { - got := scaleFromZeroAnnotationsEnabled(tc.annotations) - if tc.enabled != got { - t.Errorf("expected %t, got %t", tc.enabled, got) - } - }) - } -} - func TestParseCPUCapacity(t *testing.T) { for _, tc := range []struct { description string diff --git a/cluster-autoscaler/cloudprovider/gce/mig_info_provider.go b/cluster-autoscaler/cloudprovider/gce/mig_info_provider.go index 815e95b1e17d..abee9c4660ce 100644 --- a/cluster-autoscaler/cloudprovider/gce/mig_info_provider.go +++ b/cluster-autoscaler/cloudprovider/gce/mig_info_provider.go @@ -298,14 +298,29 @@ func (c *cachingMigInfoProvider) fillMigInfoCache() error { migs[piece], errors[piece] = c.gceClient.FetchAllMigs(zones[piece]) }) + failedZones := map[string]error{} + failedZoneCount := 0 for idx, err := range errors { if err != nil { klog.Errorf("Error listing migs from zone %v; err=%v", zones[idx], err) - return fmt.Errorf("%v", errors) + failedZones[zones[idx]] = err + failedZoneCount++ } } + if failedZoneCount > 0 && failedZoneCount == len(zones) { + return fmt.Errorf("%v", errors) + } + registeredMigRefs := c.getRegisteredMigRefs() + + for migRef := range registeredMigRefs { + err, ok := failedZones[migRef.Zone] + if ok { + c.migLister.HandleMigIssue(migRef, err) + } + } + for idx, zone := range zones { for _, zoneMig := range migs[idx] { zoneMigRef := GceRef{ diff --git a/cluster-autoscaler/cloudprovider/oci/examples/oci-ip-cluster-autoscaler-w-config.yaml b/cluster-autoscaler/cloudprovider/oci/examples/oci-ip-cluster-autoscaler-w-config.yaml index 594f04e96246..d4f3368f4bca 100644 --- a/cluster-autoscaler/cloudprovider/oci/examples/oci-ip-cluster-autoscaler-w-config.yaml +++ b/cluster-autoscaler/cloudprovider/oci/examples/oci-ip-cluster-autoscaler-w-config.yaml @@ -16,7 +16,7 @@ metadata: k8s-app: cluster-autoscaler rules: - apiGroups: ["storage.k8s.io"] - resources: ["csidriver", "csistoragecapacities"] + resources: ["csidrivers", "csistoragecapacities"] verbs: ["watch", "list"] - apiGroups: [""] resources: ["events", "endpoints"] diff --git a/cluster-autoscaler/cloudprovider/oci/examples/oci-ip-cluster-autoscaler-w-principals.yaml b/cluster-autoscaler/cloudprovider/oci/examples/oci-ip-cluster-autoscaler-w-principals.yaml index 1f5e7b26e000..39b9f8f4b749 100644 --- a/cluster-autoscaler/cloudprovider/oci/examples/oci-ip-cluster-autoscaler-w-principals.yaml +++ b/cluster-autoscaler/cloudprovider/oci/examples/oci-ip-cluster-autoscaler-w-principals.yaml @@ -16,7 +16,7 @@ metadata: k8s-app: cluster-autoscaler rules: - apiGroups: ["storage.k8s.io"] - resources: ["csidriver", "csistoragecapacities"] + resources: ["csidrivers", "csistoragecapacities"] verbs: ["watch", "list"] - apiGroups: [""] resources: ["events", "endpoints"] diff --git a/cluster-autoscaler/core/scaleup/orchestrator/orchestrator.go b/cluster-autoscaler/core/scaleup/orchestrator/orchestrator.go index e9b6060b8bdc..e184022248b4 100644 --- a/cluster-autoscaler/core/scaleup/orchestrator/orchestrator.go +++ b/cluster-autoscaler/core/scaleup/orchestrator/orchestrator.go @@ -119,6 +119,9 @@ func (o *ScaleUpOrchestrator) ScaleUp( } } + // Initialise binpacking limiter. + o.processors.BinpackingLimiter.InitBinpacking(o.autoscalingContext, nodeGroups) + resourcesLeft, aErr := o.resourceManager.ResourcesLeft(o.autoscalingContext, nodeInfos, nodes) if aErr != nil { return scaleUpError(&status.ScaleUpStatus{}, aErr.AddPrefix("could not compute total resources: ")) @@ -129,6 +132,11 @@ func (o *ScaleUpOrchestrator) ScaleUp( // Filter out invalid node groups validNodeGroups, skippedNodeGroups := o.filterValidScaleUpNodeGroups(nodeGroups, nodeInfos, resourcesLeft, now) + // Mark skipped node groups as processed. + for nodegroupID := range skippedNodeGroups { + o.processors.BinpackingLimiter.MarkProcessed(o.autoscalingContext, nodegroupID) + } + // Calculate expansion options schedulablePods := map[string][]*apiv1.Pod{} var options []expander.Option @@ -139,12 +147,17 @@ func (o *ScaleUpOrchestrator) ScaleUp( for _, nodeGroup := range validNodeGroups { option := o.ComputeExpansionOption(nodeGroup, schedulablePods, nodeInfos, upcomingNodes, now) + o.processors.BinpackingLimiter.MarkProcessed(o.autoscalingContext, nodeGroup.Id()) + if len(option.Pods) == 0 || option.NodeCount == 0 { klog.V(4).Infof("No pod can fit to %s", nodeGroup.Id()) - continue + } else { + options = append(options, option) } - options = append(options, option) + if o.processors.BinpackingLimiter.StopBinpacking(o.autoscalingContext, options) { + break + } } if len(options) == 0 { @@ -224,6 +237,17 @@ func (o *ScaleUpOrchestrator) ScaleUp( // Recompute similar node groups in case they need to be updated bestOption.SimilarNodeGroups = o.ComputeSimilarNodeGroups(bestOption.NodeGroup, nodeInfos, schedulablePods, now) + if bestOption.SimilarNodeGroups != nil { + // if similar node groups are found, log about them + similarNodeGroupIds := make([]string, 0) + for _, sng := range bestOption.SimilarNodeGroups { + similarNodeGroupIds = append(similarNodeGroupIds, sng.Id()) + } + klog.V(2).Infof("Found %d similar node groups: %v", len(bestOption.SimilarNodeGroups), similarNodeGroupIds) + } else if o.autoscalingContext.BalanceSimilarNodeGroups { + // if no similar node groups are found and the flag is enabled, log about it + klog.V(2).Info("No similar node groups found") + } nodeInfo, found := nodeInfos[bestOption.NodeGroup.Id()] if !found { diff --git a/cluster-autoscaler/core/scaleup/orchestrator/orchestrator_test.go b/cluster-autoscaler/core/scaleup/orchestrator/orchestrator_test.go index 90d7eeff20f7..1ca6d54e0708 100644 --- a/cluster-autoscaler/core/scaleup/orchestrator/orchestrator_test.go +++ b/cluster-autoscaler/core/scaleup/orchestrator/orchestrator_test.go @@ -805,6 +805,63 @@ func TestScaleUpUnhealthy(t *testing.T) { assert.False(t, scaleUpStatus.WasSuccessful()) } +func TestBinpackingLimiter(t *testing.T) { + n1 := BuildTestNode("n1", 1000, 1000) + n2 := BuildTestNode("n2", 100000, 100000) + now := time.Now() + + SetNodeReadyState(n1, true, now.Add(-2*time.Minute)) + SetNodeReadyState(n2, true, now.Add(-2*time.Minute)) + + nodes := []*apiv1.Node{n1, n2} + + podLister := kube_util.NewTestPodLister([]*apiv1.Pod{}) + listers := kube_util.NewListerRegistry(nil, nil, podLister, nil, nil, nil, nil, nil, nil, nil) + + provider := testprovider.NewTestCloudProvider(func(nodeGroup string, increase int) error { + return nil + }, nil) + + options := defaultOptions + provider.AddNodeGroup("ng1", 1, 10, 1) + provider.AddNode("ng1", n1) + provider.AddNodeGroup("ng2", 1, 10, 1) + provider.AddNode("ng2", n2) + assert.NotNil(t, provider) + + context, err := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, listers, provider, nil, nil) + assert.NoError(t, err) + + nodeInfos, err := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false). + Process(&context, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now) + assert.NoError(t, err) + + clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, NewBackoff(), clusterstate.NewStaticMaxNodeProvisionTimeProvider(15*time.Minute)) + clusterState.UpdateNodes(nodes, nodeInfos, time.Now()) + + extraPod := BuildTestPod("p-new", 500, 0) + + processors := NewTestProcessors(&context) + + // We should stop binpacking after finding expansion option from first node group. + processors.BinpackingLimiter = &MockBinpackingLimiter{} + + suOrchestrator := New() + suOrchestrator.Initialize(&context, processors, clusterState, taints.TaintConfig{}) + + expander := NewMockRepotingStrategy(t, &GroupSizeChange{GroupName: "ng1", SizeChange: 1}) + context.ExpanderStrategy = expander + + scaleUpStatus, err := suOrchestrator.ScaleUp([]*apiv1.Pod{extraPod}, nodes, []*appsv1.DaemonSet{}, nodeInfos) + processors.ScaleUpStatusProcessor.Process(&context, scaleUpStatus) + assert.NoError(t, err) + assert.True(t, scaleUpStatus.WasSuccessful()) + + expansionOptions := expander.LastInputOptions() + // Only 1 expansion option should be there. Without BinpackingLimiter there will be 2. + assert.True(t, len(expansionOptions) == 1) +} + func TestScaleUpNoHelp(t *testing.T) { n1 := BuildTestNode("n1", 100, 1000) now := time.Now() diff --git a/cluster-autoscaler/core/test/common.go b/cluster-autoscaler/core/test/common.go index 5a84f4435be9..82e0f0f5a3ac 100644 --- a/cluster-autoscaler/core/test/common.go +++ b/cluster-autoscaler/core/test/common.go @@ -37,6 +37,7 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/metrics" "k8s.io/autoscaler/cluster-autoscaler/processors" "k8s.io/autoscaler/cluster-autoscaler/processors/actionablecluster" + "k8s.io/autoscaler/cluster-autoscaler/processors/binpacking" processor_callbacks "k8s.io/autoscaler/cluster-autoscaler/processors/callbacks" "k8s.io/autoscaler/cluster-autoscaler/processors/customresources" "k8s.io/autoscaler/cluster-autoscaler/processors/nodegroupconfig" @@ -167,6 +168,7 @@ func NewTestProcessors(context *context.AutoscalingContext) *processors.Autoscal return &processors.AutoscalingProcessors{ PodListProcessor: podlistprocessor.NewDefaultPodListProcessor(context.PredicateChecker), NodeGroupListProcessor: &nodegroups.NoOpNodeGroupListProcessor{}, + BinpackingLimiter: binpacking.NewDefaultBinpackingLimiter(), NodeGroupSetProcessor: nodegroupset.NewDefaultNodeGroupSetProcessor([]string{}, config.NodeGroupDifferenceRatios{}), ScaleDownSetProcessor: nodes.NewPostFilteringScaleDownNodeProcessor(), // TODO(bskiba): change scale up test so that this can be a NoOpProcessor @@ -329,6 +331,26 @@ func (p *MockAutoprovisioningNodeGroupListProcessor) Process(context *context.Au func (p *MockAutoprovisioningNodeGroupListProcessor) CleanUp() { } +// MockBinpackingLimiter is a fake BinpackingLimiter to be used in tests. +type MockBinpackingLimiter struct { + requiredExpansionOptions int +} + +// InitBinpacking initialises the MockBinpackingLimiter and sets requiredExpansionOptions to 1. +func (p *MockBinpackingLimiter) InitBinpacking(context *context.AutoscalingContext, nodeGroups []cloudprovider.NodeGroup) { + p.requiredExpansionOptions = 1 +} + +// StopBinpacking stops the binpacking early, if we already have requiredExpansionOptions i.e. 1. +func (p *MockBinpackingLimiter) StopBinpacking(context *context.AutoscalingContext, evaluatedOptions []expander.Option) bool { + return len(evaluatedOptions) == p.requiredExpansionOptions +} + +// MarkProcessed is here to satisfy the interface. +func (p *MockBinpackingLimiter) MarkProcessed(context *context.AutoscalingContext, nodegroupId string) { + +} + // NewBackoff creates a new backoff object func NewBackoff() backoff.Backoff { return backoff.NewIdBasedExponentialBackoff(5*time.Minute, /*InitialNodeGroupBackoffDuration*/ diff --git a/cluster-autoscaler/go.mod b/cluster-autoscaler/go.mod index b3c85a5f0b2e..07ea5c77456a 100644 --- a/cluster-autoscaler/go.mod +++ b/cluster-autoscaler/go.mod @@ -4,17 +4,17 @@ go 1.20 require ( cloud.google.com/go/compute/metadata v0.2.3 - github.com/Azure/azure-sdk-for-go v67.2.0+incompatible - github.com/Azure/go-autorest/autorest v0.11.28 - github.com/Azure/go-autorest/autorest/adal v0.9.21 + github.com/Azure/azure-sdk-for-go v68.0.0+incompatible + github.com/Azure/go-autorest/autorest v0.11.29 + github.com/Azure/go-autorest/autorest/adal v0.9.23 github.com/Azure/go-autorest/autorest/azure/auth v0.5.8 github.com/Azure/go-autorest/autorest/date v0.3.0 github.com/Azure/go-autorest/autorest/to v0.4.0 github.com/Azure/skewer v0.0.14 github.com/aws/aws-sdk-go v1.44.241 - github.com/cenkalti/backoff/v4 v4.2.0 + github.com/cenkalti/backoff/v4 v4.2.1 github.com/digitalocean/godo v1.27.0 - github.com/gofrs/uuid v4.0.0+incompatible + github.com/gofrs/uuid v4.4.0+incompatible github.com/gogo/protobuf v1.3.2 github.com/golang/mock v1.6.0 github.com/google/go-cmp v0.5.9 @@ -30,23 +30,23 @@ require ( golang.org/x/crypto v0.8.0 golang.org/x/net v0.9.0 golang.org/x/oauth2 v0.7.0 - golang.org/x/sys v0.7.0 + golang.org/x/sys v0.8.0 google.golang.org/api v0.114.0 google.golang.org/grpc v1.54.0 google.golang.org/protobuf v1.30.0 gopkg.in/gcfg.v1 v1.2.3 gopkg.in/yaml.v2 v2.4.0 - k8s.io/api v0.27.0 - k8s.io/apimachinery v0.27.0 - k8s.io/apiserver v0.27.0 - k8s.io/client-go v0.27.0 - k8s.io/cloud-provider v0.27.0 + k8s.io/api v0.28.0-alpha.2 + k8s.io/apimachinery v0.28.0-alpha.2 + k8s.io/apiserver v0.28.0-alpha.2 + k8s.io/client-go v0.28.0-alpha.2 + k8s.io/cloud-provider v0.28.0-alpha.2 k8s.io/cloud-provider-aws v1.27.0 - k8s.io/component-base v0.27.0 - k8s.io/component-helpers v0.27.0 - k8s.io/klog/v2 v2.90.1 - k8s.io/kubelet v0.27.0 - k8s.io/kubernetes v1.27.0 + k8s.io/component-base v0.28.0-alpha.2 + k8s.io/component-helpers v0.28.0-alpha.2 + k8s.io/klog/v2 v2.100.1 + k8s.io/kubelet v0.28.0-alpha.2 + k8s.io/kubernetes v1.28.0-alpha.2 k8s.io/legacy-cloud-providers v0.0.0 k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 sigs.k8s.io/cloud-provider-azure v1.26.2 @@ -63,7 +63,7 @@ require ( github.com/Azure/go-autorest/tracing v0.6.0 // indirect github.com/GoogleCloudPlatform/k8s-cloud-provider v1.18.1-0.20220218231025-f11817397a1b // indirect github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab // indirect - github.com/Microsoft/go-winio v0.4.17 // indirect + github.com/Microsoft/go-winio v0.6.0 // indirect github.com/Microsoft/hcsshim v0.8.25 // indirect github.com/NYTimes/gziphandler v1.1.1 // indirect github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230321174746-8dcc6526cfb1 // indirect @@ -73,17 +73,17 @@ require ( github.com/blang/semver/v4 v4.0.0 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/checkpoint-restore/go-criu/v5 v5.3.0 // indirect - github.com/cilium/ebpf v0.7.0 // indirect - github.com/container-storage-interface/spec v1.7.0 // indirect - github.com/containerd/cgroups v1.0.1 // indirect + github.com/cilium/ebpf v0.9.1 // indirect + github.com/container-storage-interface/spec v1.8.0 // indirect + github.com/containerd/cgroups v1.1.0 // indirect github.com/containerd/console v1.0.3 // indirect - github.com/containerd/ttrpc v1.1.0 // indirect + github.com/containerd/ttrpc v1.2.2 // indirect github.com/coreos/go-semver v0.3.1 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/cyphar/filepath-securejoin v0.2.3 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/dimchansky/utfbom v1.1.1 // indirect - github.com/docker/distribution v2.8.1+incompatible // indirect + github.com/docker/distribution v2.8.2+incompatible // indirect github.com/docker/go-units v0.5.0 // indirect github.com/emicklei/go-restful/v3 v3.10.2 // indirect github.com/euank/go-kmsg-parser v2.0.0+incompatible // indirect @@ -96,12 +96,12 @@ require ( github.com/go-openapi/jsonreference v0.20.2 // indirect github.com/go-openapi/swag v0.22.3 // indirect github.com/godbus/dbus/v5 v5.0.6 // indirect - github.com/golang-jwt/jwt/v4 v4.4.2 // indirect + github.com/golang-jwt/jwt/v4 v4.5.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/google/cadvisor v0.47.1 // indirect - github.com/google/cel-go v0.14.0 // indirect - github.com/google/gnostic v0.6.9 // indirect + github.com/google/cel-go v0.16.0 // indirect + github.com/google/gnostic-models v0.6.8 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect github.com/googleapis/gax-go/v2 v2.7.1 // indirect @@ -117,7 +117,6 @@ require ( github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect - github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/moby/ipvs v1.1.0 // indirect github.com/moby/spdystream v0.2.0 // indirect github.com/moby/sys/mountinfo v0.6.2 // indirect @@ -128,7 +127,7 @@ require ( github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/runc v1.1.4 // indirect + github.com/opencontainers/runc v1.1.7 // indirect github.com/opencontainers/runtime-spec v1.0.3-0.20220909204839-494a5a6aca78 // indirect github.com/opencontainers/selinux v1.10.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect @@ -136,18 +135,18 @@ require ( github.com/prometheus/common v0.42.0 // indirect github.com/prometheus/procfs v0.9.0 // indirect github.com/rubiojr/go-vhd v0.0.0-20200706105327-02e210299021 // indirect - github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646 // indirect + github.com/seccomp/libseccomp-golang v0.10.0 // indirect github.com/sirupsen/logrus v1.9.0 // indirect github.com/spf13/cobra v1.7.0 // indirect github.com/stoewer/go-strcase v1.3.0 // indirect github.com/stretchr/objx v0.5.0 // indirect github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect github.com/vishvananda/netlink v1.1.0 // indirect - github.com/vishvananda/netns v0.0.2 // indirect + github.com/vishvananda/netns v0.0.4 // indirect github.com/vmware/govmomi v0.30.0 // indirect - go.etcd.io/etcd/api/v3 v3.5.7 // indirect - go.etcd.io/etcd/client/pkg/v3 v3.5.7 // indirect - go.etcd.io/etcd/client/v3 v3.5.7 // indirect + go.etcd.io/etcd/api/v3 v3.5.9 // indirect + go.etcd.io/etcd/client/pkg/v3 v3.5.9 // indirect + go.etcd.io/etcd/client/v3 v3.5.9 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/contrib/instrumentation/github.com/emicklei/go-restful/otelrestful v0.35.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.40.0 // indirect @@ -164,23 +163,26 @@ require ( go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.24.0 // indirect golang.org/x/exp v0.0.0-20230321023759-10a507213a29 // indirect + golang.org/x/mod v0.10.0 // indirect golang.org/x/sync v0.1.0 // indirect golang.org/x/term v0.7.0 // indirect golang.org/x/text v0.9.0 // indirect golang.org/x/time v0.3.0 // indirect + golang.org/x/tools v0.8.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect + google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/controller-manager v0.27.0 // indirect - k8s.io/cri-api v0.0.0 // indirect + k8s.io/controller-manager v0.28.0-alpha.2 // indirect + k8s.io/cri-api v0.28.0-alpha.2 // indirect k8s.io/csi-translation-lib v0.27.0 // indirect k8s.io/dynamic-resource-allocation v0.0.0 // indirect - k8s.io/kms v0.27.0 // indirect - k8s.io/kube-openapi v0.0.0-20230327201221-f5883ff37f0c // indirect - k8s.io/kube-proxy v0.0.0 // indirect + k8s.io/kms v0.28.0-alpha.2 // indirect + k8s.io/kube-openapi v0.0.0-20230601164746-7562a1006961 // indirect k8s.io/kube-scheduler v0.0.0 // indirect k8s.io/kubectl v0.0.0 // indirect k8s.io/mount-utils v0.26.0-alpha.0 // indirect @@ -195,62 +197,62 @@ replace github.com/digitalocean/godo => github.com/digitalocean/godo v1.27.0 replace github.com/rancher/go-rancher => github.com/rancher/go-rancher v0.1.0 -replace k8s.io/api => k8s.io/api v0.27.0 +replace k8s.io/api => k8s.io/api v0.28.0-alpha.2 -replace k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.27.0 +replace k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.28.0-alpha.2 -replace k8s.io/apimachinery => k8s.io/apimachinery v0.27.0 +replace k8s.io/apimachinery => k8s.io/apimachinery v0.28.0-alpha.2 -replace k8s.io/apiserver => k8s.io/apiserver v0.27.0 +replace k8s.io/apiserver => k8s.io/apiserver v0.28.0-alpha.2 -replace k8s.io/cli-runtime => k8s.io/cli-runtime v0.27.0 +replace k8s.io/cli-runtime => k8s.io/cli-runtime v0.28.0-alpha.2 -replace k8s.io/client-go => k8s.io/client-go v0.27.0 +replace k8s.io/client-go => k8s.io/client-go v0.28.0-alpha.2 -replace k8s.io/cloud-provider => k8s.io/cloud-provider v0.27.0 +replace k8s.io/cloud-provider => k8s.io/cloud-provider v0.28.0-alpha.2 -replace k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.27.0 +replace k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.28.0-alpha.2 -replace k8s.io/code-generator => k8s.io/code-generator v0.27.0 +replace k8s.io/code-generator => k8s.io/code-generator v0.28.0-alpha.2 -replace k8s.io/component-base => k8s.io/component-base v0.27.0 +replace k8s.io/component-base => k8s.io/component-base v0.28.0-alpha.2 -replace k8s.io/component-helpers => k8s.io/component-helpers v0.27.0 +replace k8s.io/component-helpers => k8s.io/component-helpers v0.28.0-alpha.2 -replace k8s.io/controller-manager => k8s.io/controller-manager v0.27.0 +replace k8s.io/controller-manager => k8s.io/controller-manager v0.28.0-alpha.2 -replace k8s.io/cri-api => k8s.io/cri-api v0.27.0 +replace k8s.io/cri-api => k8s.io/cri-api v0.28.0-alpha.2 -replace k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.27.0 +replace k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.28.0-alpha.2 -replace k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.27.0 +replace k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.28.0-alpha.2 -replace k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.27.0 +replace k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.28.0-alpha.2 -replace k8s.io/kube-proxy => k8s.io/kube-proxy v0.27.0 +replace k8s.io/kube-proxy => k8s.io/kube-proxy v0.28.0-alpha.2 -replace k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.27.0 +replace k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.28.0-alpha.2 -replace k8s.io/kubectl => k8s.io/kubectl v0.27.0 +replace k8s.io/kubectl => k8s.io/kubectl v0.28.0-alpha.2 -replace k8s.io/kubelet => k8s.io/kubelet v0.27.0 +replace k8s.io/kubelet => k8s.io/kubelet v0.28.0-alpha.2 -replace k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.27.0 +replace k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.28.0-alpha.2 -replace k8s.io/metrics => k8s.io/metrics v0.27.0 +replace k8s.io/metrics => k8s.io/metrics v0.28.0-alpha.2 -replace k8s.io/mount-utils => k8s.io/mount-utils v0.27.0 +replace k8s.io/mount-utils => k8s.io/mount-utils v0.28.0-alpha.2 -replace k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.27.0 +replace k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.28.0-alpha.2 -replace k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.27.0 +replace k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.28.0-alpha.2 -replace k8s.io/sample-controller => k8s.io/sample-controller v0.27.0 +replace k8s.io/sample-controller => k8s.io/sample-controller v0.28.0-alpha.2 -replace k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.27.0 +replace k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.28.0-alpha.2 -replace k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.27.0 +replace k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.28.0-alpha.2 -replace k8s.io/kms => k8s.io/kms v0.27.0 +replace k8s.io/kms => k8s.io/kms v0.28.0-alpha.2 replace k8s.io/noderesourcetopology-api => k8s.io/noderesourcetopology-api v0.27.0 diff --git a/cluster-autoscaler/go.sum b/cluster-autoscaler/go.sum index 0c3e9e4fff4a..9013f84ffde3 100644 --- a/cluster-autoscaler/go.sum +++ b/cluster-autoscaler/go.sum @@ -51,22 +51,22 @@ cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RX cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/Azure/azure-sdk-for-go v46.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go v67.2.0+incompatible h1:Uu/Ww6ernvPTrpq31kITVTIm/I5jlJ1wjtEH/bmSB2k= -github.com/Azure/azure-sdk-for-go v67.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU= +github.com/Azure/azure-sdk-for-go v68.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.11.4/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= github.com/Azure/go-autorest/autorest v0.11.17/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= -github.com/Azure/go-autorest/autorest v0.11.28 h1:ndAExarwr5Y+GaHE6VCaY1kyS/HwwGGyuimVhWsHOEM= -github.com/Azure/go-autorest/autorest v0.11.28/go.mod h1:MrkzG3Y3AH668QyF9KRk5neJnGgmhQ6krbhR8Q5eMvA= +github.com/Azure/go-autorest/autorest v0.11.29 h1:I4+HL/JDvErx2LjyzaVxllw2lRDB5/BT2Bm4g20iqYw= +github.com/Azure/go-autorest/autorest v0.11.29/go.mod h1:ZtEzC4Jy2JDrZLxvWs8LrBWEBycl1hbT1eknI8MtfAs= github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= github.com/Azure/go-autorest/autorest/adal v0.9.2/go.mod h1:/3SMAM86bP6wC9Ev35peQDUeqFZBMH07vvUOmg4z/fE= github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= github.com/Azure/go-autorest/autorest/adal v0.9.11/go.mod h1:nBKAnTomx8gDtl+3ZCJv2v0KACFHWTB2drffI1B68Pk= -github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= -github.com/Azure/go-autorest/autorest/adal v0.9.21 h1:jjQnVFXPfekaqb8vIsv2G1lxshoW+oGv4MDlhRtnYZk= -github.com/Azure/go-autorest/autorest/adal v0.9.21/go.mod h1:zua7mBUaCc5YnSLKYgGJR/w5ePdMDA6H56upLsHzA9U= +github.com/Azure/go-autorest/autorest/adal v0.9.22/go.mod h1:XuAbAEUv2Tta//+voMI038TrJBqjKam0me7qR+L8Cmk= +github.com/Azure/go-autorest/autorest/adal v0.9.23 h1:Yepx8CvFxwNKpH6ja7RZ+sKX+DWYNldbLiALMC3BTz8= +github.com/Azure/go-autorest/autorest/adal v0.9.23/go.mod h1:5pcMqFkdPhviJdlEy3kC/v1ZLnQl0MH6XA5YCcMhy4c= github.com/Azure/go-autorest/autorest/azure/auth v0.5.8 h1:TzPg6B6fTZ0G1zBf3T54aI7p3cAT6u//TOXGPmFMOXg= github.com/Azure/go-autorest/autorest/azure/auth v0.5.8/go.mod h1:kxyKZTSfKh8OVFWPAgOgQ/frrJgeYQJPyR5fLFmXko4= github.com/Azure/go-autorest/autorest/azure/cli v0.4.2 h1:dMOmEJfkLKW/7JsokJqkyoYSgmR08hi9KrhjZb+JALY= @@ -96,8 +96,9 @@ github.com/GoogleCloudPlatform/k8s-cloud-provider v1.18.1-0.20220218231025-f1181 github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab h1:UKkYhof1njT1/xq4SEg5z+VpTgjmNeHwPGRQl7takDI= github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab/go.mod h1:3VYc5hodBMJ5+l/7J4xAyMeuM2PNuepvHlGs8yilUCA= github.com/Microsoft/go-winio v0.4.15/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= -github.com/Microsoft/go-winio v0.4.17 h1:iT12IBVClFevaf8PuVyi3UmZOVh4OqnaLxDTW2O6j3w= github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.6.0 h1:slsWYD/zyx7lCXoZVlvQrj0hPTM1HI4+v1sIda2yDvg= +github.com/Microsoft/go-winio v0.6.0/go.mod h1:cTAf44im0RAYeL23bpB+fzCyDH2MJiz2BO69KH/soAE= github.com/Microsoft/hcsshim v0.8.25 h1:fRMwXiwk3qDwc0P05eHnh+y2v07JdtsfQ1fuAc69m9g= github.com/Microsoft/hcsshim v0.8.25/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg= github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= @@ -129,10 +130,9 @@ github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdn github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= -github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= -github.com/cenkalti/backoff/v4 v4.2.0 h1:HN5dHm3WBOgndBH6E8V0q2jIYIR3s9yglV8k/+MN3u4= -github.com/cenkalti/backoff/v4 v4.2.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= +github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -145,8 +145,9 @@ github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWR github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= -github.com/cilium/ebpf v0.7.0 h1:1k/q3ATgxSXRdrmPfH8d7YK0GfqVsEKZAX9dQZvs56k= github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA= +github.com/cilium/ebpf v0.9.1 h1:64sn2K3UKw8NbP/blsixRpF3nXuyhz/VjRlRzvlBRu4= +github.com/cilium/ebpf v0.9.1/go.mod h1:+OhNOIXx/Fnu1IE8bJz2dzOA+VSfyTfdNUVdlQnxUFY= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= @@ -157,10 +158,11 @@ github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/container-storage-interface/spec v1.7.0 h1:gW8eyFQUZWWrMWa8p1seJ28gwDoN5CVJ4uAbQ+Hdycw= -github.com/container-storage-interface/spec v1.7.0/go.mod h1:JYuzLqr9VVNoDJl44xp/8fmCOvWPDKzuGTwCoklhuqk= -github.com/containerd/cgroups v1.0.1 h1:iJnMvco9XGvKUvNQkv88bE4uJXxRQH18efbKo9w5vHQ= +github.com/container-storage-interface/spec v1.8.0 h1:D0vhF3PLIZwlwZEf2eNbpujGCNwspwTYf2idJRJx4xI= +github.com/container-storage-interface/spec v1.8.0/go.mod h1:ROLik+GhPslwwWRNFF1KasPzroNARibH2rfz1rkg4H0= github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= +github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= +github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw= github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw= github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ= github.com/containerd/console v1.0.3 h1:lIr7SlA5PxZyMV30bDW0MGbiOPXwc63yRuCP0ARubLw= @@ -169,8 +171,9 @@ github.com/containerd/containerd v1.4.9/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMX github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM= github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= -github.com/containerd/ttrpc v1.1.0 h1:GbtyLRxb0gOLR0TYQWt3O6B0NvT8tMdorEHqIQo/lWI= github.com/containerd/ttrpc v1.1.0/go.mod h1:XX4ZTnoOId4HklF4edwc4DcqskFZuvXB1Evzy5KFQpQ= +github.com/containerd/ttrpc v1.2.2 h1:9vqZr0pxwOF5koz6N0N3kJ0zDHokrcPxIR/ZR2YFtOs= +github.com/containerd/ttrpc v1.2.2/go.mod h1:sIT6l32Ph/H9cvnJsfXM5drIVzTr5A2flTf1G5tYZak= github.com/containerd/typeurl v1.0.2 h1:Chlt8zIieDbzQFzXzAeBEF92KhExuE4p9p92/QmY7aY= github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= @@ -202,8 +205,9 @@ github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQ github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= -github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68= github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= +github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v20.10.21+incompatible h1:UTLdBmHk3bEY+w8qeO5KttOhy6OmXWsl/FEet9Uswog= github.com/docker/docker v20.10.21+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= @@ -211,9 +215,8 @@ github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5Xh github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/emicklei/go-restful/v3 v3.10.2 h1:hIovbnmBTLjHXkqEBUz3HGpXZdM7ZrE9fJIZIqlJLqE= github.com/emicklei/go-restful/v3 v3.10.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -232,10 +235,9 @@ github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCv github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/flowstack/go-jsonschema v0.1.1/go.mod h1:yL7fNggx1o8rm9RlgXv7hTBWxdBM0rVwpMwimd3F3N0= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= -github.com/frankban/quicktest v1.11.3 h1:8sXhOn0uLys67V8EsXLc6eszDs8VXWxL3iRvebPhedY= github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= +github.com/frankban/quicktest v1.14.0 h1:+cqqvzZV87b4adx/5ayVOaYZ2CrvM4ejQvUdBzPPUss= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= @@ -266,22 +268,21 @@ github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.6 h1:mkgN1ofwASrYnJ5W6U/BxG15eXXXjirgZc7CLqkcaro= github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw= -github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gofrs/uuid v4.4.0+incompatible h1:3qXRTX8/NbyulANqlc0lchS1gqAVxRgsuW1YrTJupqA= +github.com/gofrs/uuid v4.4.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= -github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= -github.com/golang-jwt/jwt/v4 v4.4.2 h1:rcc4lwaZgFMCZ5jxF9ABolDcIHdBytAFgqFPbSJQAYs= -github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= +github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= @@ -326,10 +327,10 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/cadvisor v0.47.1 h1:YyKnRy/3myRNGOvF1bNF9FFnpjY7Gky5yKi/ZlN+BSo= github.com/google/cadvisor v0.47.1/go.mod h1:iJdTjcjyKHjLCf7OSTzwP5GxdfrkPusw2x5bwGvuLUw= -github.com/google/cel-go v0.14.0 h1:LFobwuUDslWUHdQ48SXVXvQgPH2X1XVhsgOGNioAEZ4= -github.com/google/cel-go v0.14.0/go.mod h1:YzWEoI07MC/a/wj9in8GeVatqfypkldgBlwXh9bCwqY= -github.com/google/gnostic v0.6.9 h1:ZK/5VhkoX835RikCHpSUJV9a+S3e1zLh59YnyWeBW+0= -github.com/google/gnostic v0.6.9/go.mod h1:Nm8234We1lq6iB9OmlgNv3nH91XLLVZHCDayfA3xq+E= +github.com/google/cel-go v0.16.0 h1:DG9YQ8nFCFXAs/FDDwBxmL1tpKNrdlGUM9U3537bX/Y= +github.com/google/cel-go v0.16.0/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY= +github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= +github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -431,7 +432,6 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxv github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -454,8 +454,6 @@ github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= -github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/moby/ipvs v1.1.0 h1:ONN4pGaZQgAx+1Scz5RvWV4Q7Gb+mvfRh3NsPS+1XQQ= github.com/moby/ipvs v1.1.0/go.mod h1:4VJMWuf098bsUMmZEiD4Tjk/O7mOn3l1PTD3s4OoYAs= github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= @@ -483,14 +481,15 @@ github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRW github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/onsi/ginkgo/v2 v2.9.1 h1:zie5Ly042PD3bsCvsSOPvRnFwyo3rKe64TJlD6nu0mk= -github.com/onsi/gomega v1.27.4 h1:Z2AnStgsdSayCMDiCU42qIz+HLqEPcgiOCXjAU/w+8E= +github.com/onsi/ginkgo/v2 v2.9.4 h1:xR7vG4IXt5RWx6FfIjyAtsoMAtnc3C/rFXBBd2AjZwE= +github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/runc v1.1.4 h1:nRCz/8sKg6K6jgYAFLDlXzPeITBZJyX28DBVhWD+5dg= github.com/opencontainers/runc v1.1.4/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg= +github.com/opencontainers/runc v1.1.7 h1:y2EZDS8sNng4Ksf0GUYNhKbTShZJPJg1FiXJNH/uoCk= +github.com/opencontainers/runc v1.1.7/go.mod h1:CbUumNnWCuTGFukNXahoo/RFBZvDAgRh/smNYNOhA50= github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= @@ -541,15 +540,16 @@ github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40T github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= github.com/rubiojr/go-vhd v0.0.0-20200706105327-02e210299021 h1:if3/24+h9Sq6eDx8UUz1SO9cT9tizyIsATfB7b4D3tc= github.com/rubiojr/go-vhd v0.0.0-20200706105327-02e210299021/go.mod h1:DM5xW0nvfNNm2uytzsvhI3OnX8uzaRAg8UX/CnDqbto= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= -github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646 h1:RpforrEYXWkmGwJHIGnLZ3tTWStkjVVstwzNGqxX2Ds= github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= +github.com/seccomp/libseccomp-golang v0.10.0 h1:aA4bp+/Zzi0BnWZ2F1wgNBs5gTpm+na2rWM6M9YjLpY= +github.com/seccomp/libseccomp-golang v0.10.0/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= @@ -573,7 +573,6 @@ github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= -github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs= github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -602,13 +601,10 @@ github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtX github.com/vishvananda/netlink v1.1.0 h1:1iyaYNBLmP6L0220aDnYQpo1QEV4t4hJ+xEEhhJH8j0= github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= -github.com/vishvananda/netns v0.0.2 h1:Cn05BRLm+iRP/DZxyVSsfVyrzgjDbwHwkVt38qvXnNI= -github.com/vishvananda/netns v0.0.2/go.mod h1:yitZXdAVI+yPFSb4QUe+VW3vOVl4PZPNcBgbPxAtJxw= +github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8= +github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= github.com/vmware/govmomi v0.30.0 h1:Fm8ugPnnlMSTSceDKY9goGvjmqc6eQLPUSUeNXdpeXA= github.com/vmware/govmomi v0.30.0/go.mod h1:F7adsVewLNHsW/IIm7ziFURaXDaHEwcc+ym4r3INMdY= -github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= -github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= -github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= @@ -619,17 +615,17 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= -go.etcd.io/etcd/api/v3 v3.5.7 h1:sbcmosSVesNrWOJ58ZQFitHMdncusIifYcrBfwrlJSY= -go.etcd.io/etcd/api/v3 v3.5.7/go.mod h1:9qew1gCdDDLu+VwmeG+iFpL+QlpHTo7iubavdVDgCAA= -go.etcd.io/etcd/client/pkg/v3 v3.5.7 h1:y3kf5Gbp4e4q7egZdn5T7W9TSHUvkClN6u+Rq9mEOmg= -go.etcd.io/etcd/client/pkg/v3 v3.5.7/go.mod h1:o0Abi1MK86iad3YrWhgUsbGx1pmTS+hrORWc2CamuhY= -go.etcd.io/etcd/client/v2 v2.305.7 h1:AELPkjNR3/igjbO7CjyF1fPuVPjrblliiKj+Y6xSGOU= -go.etcd.io/etcd/client/v3 v3.5.7 h1:u/OhpiuCgYY8awOHlhIhmGIGpxfBU/GZBUP3m/3/Iz4= -go.etcd.io/etcd/client/v3 v3.5.7/go.mod h1:sOWmj9DZUMyAngS7QQwCyAXXAL6WhgTOPLNS/NabQgw= -go.etcd.io/etcd/pkg/v3 v3.5.7 h1:obOzeVwerFwZ9trMWapU/VjDcYUJb5OfgC1zqEGWO/0= -go.etcd.io/etcd/raft/v3 v3.5.7 h1:aN79qxLmV3SvIq84aNTliYGmjwsW6NqJSnqmI1HLJKc= -go.etcd.io/etcd/server/v3 v3.5.7 h1:BTBD8IJUV7YFgsczZMHhMTS67XuA4KpRquL0MFOJGRk= +go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ= +go.etcd.io/etcd/api/v3 v3.5.9 h1:4wSsluwyTbGGmyjJktOf3wFQoTBIURXHnq9n/G/JQHs= +go.etcd.io/etcd/api/v3 v3.5.9/go.mod h1:uyAal843mC8uUVSLWz6eHa/d971iDGnCRpmKd2Z+X8k= +go.etcd.io/etcd/client/pkg/v3 v3.5.9 h1:oidDC4+YEuSIQbsR94rY9gur91UPL6DnxDCIYd2IGsE= +go.etcd.io/etcd/client/pkg/v3 v3.5.9/go.mod h1:y+CzeSmkMpWN2Jyu1npecjB9BBnABxGM4pN8cGuJeL4= +go.etcd.io/etcd/client/v2 v2.305.9 h1:YZ2OLi0OvR0H75AcgSUajjd5uqKDKocQUqROTG11jIo= +go.etcd.io/etcd/client/v3 v3.5.9 h1:r5xghnU7CwbUxD/fbUtRyJGaYNfDun8sp/gTr1hew6E= +go.etcd.io/etcd/client/v3 v3.5.9/go.mod h1:i/Eo5LrZ5IKqpbtpPDuaUnDOUv471oDg8cjQaUr2MbA= +go.etcd.io/etcd/pkg/v3 v3.5.9 h1:6R2jg/aWd/zB9+9JxmijDKStGJAPFsX3e6BeJkMi6eQ= +go.etcd.io/etcd/raft/v3 v3.5.9 h1:ZZ1GIHoUlHsn0QVqiRysAm3/81Xx7+i2d7nSdWxlOiI= +go.etcd.io/etcd/server/v3 v3.5.9 h1:vomEmmxeztLtS5OEH7d0hBAg4cjVIu9wXuNzUZx2ZA0= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -684,6 +680,7 @@ golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= golang.org/x/crypto v0.8.0 h1:pd9TJtTueMTVQXzk8E2XESSMQDj/U7OUu0PqJqPXQjQ= golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -724,6 +721,8 @@ golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk= +golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -765,13 +764,13 @@ golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLd golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM= golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -881,14 +880,15 @@ golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU= -golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.7.0 h1:BEvjmm5fURWqcfbSKTdpkDXYBrUS1c0m8agp14W48vQ= golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -902,6 +902,7 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -965,7 +966,8 @@ golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4= +golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y= +golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1068,10 +1070,13 @@ google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEc google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211021150943-2b146023228c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A= -google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= +google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54 h1:9NWlQfY2ePejTmfwUH1OWwmznFa+0kKcHGPDvcPza9M= +google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54/go.mod h1:zqTuNwFlFRsw5zIts5VnzLQxSRqh+CGOTVMlYbY0Eyk= +google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9 h1:m8v1xLLLzMe1m5P+gCTF8nJB9epwZQUBERm20Oy1poQ= +google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -1148,7 +1153,6 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= @@ -1160,52 +1164,50 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.27.0 h1:2owttiA8Oa+J3idFeq8TSnNpm4y6AOGPI3PDbIpp2cE= -k8s.io/api v0.27.0/go.mod h1:Wl+QRvQlh+T8SK5f4F6YBhhyH6hrFO08nl74xZb1MUE= -k8s.io/apimachinery v0.27.0 h1:vEyy/PVMbPMCPutrssCVHCf0JNZ0Px+YqPi82K2ALlk= -k8s.io/apimachinery v0.27.0/go.mod h1:5ikh59fK3AJ287GUvpUsryoMFtH9zj/ARfWCo3AyXTM= -k8s.io/apiserver v0.27.0 h1:sXt/2yVMebZef6GqJHs4IYHSdSYwwrJCafBV/KSCwDw= -k8s.io/apiserver v0.27.0/go.mod h1:8heEJ5f6EqiKwXC3Ez3ikgOvGtRSEQG/SQZkhO9UzIg= -k8s.io/client-go v0.27.0 h1:DyZS1fJkv73tEy7rWv4VF6NwGeJ7SKvNaLRXZBYLA+4= -k8s.io/client-go v0.27.0/go.mod h1:XVEmpNnM+4JYO3EENoFV/ZDv3KxKVJUnzGo70avk+C4= -k8s.io/cloud-provider v0.27.0 h1:UWEvGvfd9VDRSrtmek7dDeHfUUtycHyvIO6TGI9bFJE= -k8s.io/cloud-provider v0.27.0/go.mod h1:hUbqXpAWGaOTUhwL5k2QO9i2l9mEMhdMV9ChbvB3Gmw= +k8s.io/api v0.28.0-alpha.2 h1:TaoA5Bs2T84CStlFfyP9qQCCcc7neQdtU0gDAQHVUJA= +k8s.io/api v0.28.0-alpha.2/go.mod h1:RRsSimj8xj0SKXJz1V4opuv7jVz8AiYJT2PaxdXXhvo= +k8s.io/apimachinery v0.28.0-alpha.2 h1:7sc+ANLagRZFkNvfB9a32ZyjNnGDUOPW1lfybfvypLg= +k8s.io/apimachinery v0.28.0-alpha.2/go.mod h1:yLwDAEEdvM6sn/hdHLSdNPaP+w2iEDBApMp2VMR2Q14= +k8s.io/apiserver v0.28.0-alpha.2 h1:HEXXJUC4L2NfRkikFfms/veSYMo+2bFEgH2mK4T+J7g= +k8s.io/apiserver v0.28.0-alpha.2/go.mod h1:bTNlWcXYZr1ahStIIa0mxpDfh0Wn6bUAyqBW3ZHMoYA= +k8s.io/client-go v0.28.0-alpha.2 h1:Rheb29eRyFmcz7wtk17zVeaM2F7I8aO3wRbuMVydMb0= +k8s.io/client-go v0.28.0-alpha.2/go.mod h1:1JNmAg+bItB9F82fAkt71E42k+yTaH3a+rJ0ggd9NqA= +k8s.io/cloud-provider v0.28.0-alpha.2 h1:+PzKM3zwCEBro3yizGaaQvuPWxG9xT5SOnojgX/dC90= +k8s.io/cloud-provider v0.28.0-alpha.2/go.mod h1:IrJlWX2dAe5TK3p19gk18EpwthPPzeuc9clyHKkeCr4= k8s.io/cloud-provider-aws v1.27.0 h1:PF8YrH8QcN6JoXB3Xxlaz84SBDYMPunJuCc0cPuCWXA= k8s.io/cloud-provider-aws v1.27.0/go.mod h1:9vUb5mnVnReSRDBWcBxB1b0HOeEc472iOPmrnwpN9SA= -k8s.io/component-base v0.27.0 h1:g3/FkscH8Uqg9SiDCEfhfhTVwKiVo4T2+iBwUqiFkMg= -k8s.io/component-base v0.27.0/go.mod h1:PXyBQd/vYYjqqGB83rnsHffTTG6zlmxZAd0ZSOu6evk= -k8s.io/component-helpers v0.27.0 h1:rymQGJc4s30hHeb5VGuPdht8gKIPecj+Bw2FOJSavE4= -k8s.io/component-helpers v0.27.0/go.mod h1:vMjVwym/Y0BVyNvg8a4Et2vyPJAh/JhBM0OTRAt0Ceg= -k8s.io/controller-manager v0.27.0 h1:xW0V4tXJfxRmc5OEwZn0GHU0auKySRJmlVMS/tqrWPw= -k8s.io/controller-manager v0.27.0/go.mod h1:E9SEe60LMWkBTe7IUm1pVTrikc5tjzEl6RUNbBUdm3c= -k8s.io/cri-api v0.27.0 h1:kaOF4faxjECAGu0+ApMt2zHfW7Z7mLu0GWFtaU4fN5E= -k8s.io/cri-api v0.27.0/go.mod h1:+Ts/AVYbIo04S86XbTD73UPp/DkTiYxtsFeOFEu32L0= -k8s.io/csi-translation-lib v0.27.0 h1:1tv+MhNxJRFCmexqoSYXks4N/4zikOrgyLpYF63lXzo= -k8s.io/csi-translation-lib v0.27.0/go.mod h1:ZOPmKWI/2Ad2GRTIBXzOyX52NmtTcDTsh4GWqoHvHVA= -k8s.io/dynamic-resource-allocation v0.27.0 h1:MEKgqXhsxN7jJgcyVzsSlGIJ0ryLnj5DalNz+rP50zY= -k8s.io/dynamic-resource-allocation v0.27.0/go.mod h1:a/1QBgAWnYHP/4/JTuITLrdOWfiuhkRX3mw3ijFYuTo= +k8s.io/component-base v0.28.0-alpha.2 h1:a4CvWXg/CbPGb1Zq5EuwLVku+wQD5R9hqbApnuqMwao= +k8s.io/component-base v0.28.0-alpha.2/go.mod h1:Td23IK45CodVG8T80LDJAuahBC/CeVPHR815H5In7ss= +k8s.io/component-helpers v0.28.0-alpha.2 h1:xLLk5iyjepjTAAp0W0IcskoOkEUWKAOqoB+CTITqaNY= +k8s.io/component-helpers v0.28.0-alpha.2/go.mod h1:ATp82K1HBjcnqapnmJJ3+GNdvrGs2oOwY2iLCUQYV88= +k8s.io/controller-manager v0.28.0-alpha.2 h1:/dZM0NjwQFbk8jDZ7kABc4LEOgVY5z7KXUvvX65enV0= +k8s.io/controller-manager v0.28.0-alpha.2/go.mod h1:+qpPUtE1wTR62aoTwINx6nnPzv4xnF865xxwg+yJ+KQ= +k8s.io/cri-api v0.28.0-alpha.2 h1:tI8JjaLvhzFfT3wZonasxAAy0yodJIJbARtDZzzk4aY= +k8s.io/cri-api v0.28.0-alpha.2/go.mod h1:8Ac6jtKOYm8riUDQw8UfvYr5aWyZBgLoyRiJluUTij4= +k8s.io/csi-translation-lib v0.28.0-alpha.2 h1://OXUqIRDdz5GyASJG+Eu3x8fM73VtZet97UjGTitC0= +k8s.io/csi-translation-lib v0.28.0-alpha.2/go.mod h1:guloIIs9eenYDW0/MtNh/5qHwAyP73iWuoedaGTxcUM= +k8s.io/dynamic-resource-allocation v0.28.0-alpha.2 h1:m87YYKLNHoqMTUDjsPC+lbW1fbQgwJhX2jgfQ/WBnTA= +k8s.io/dynamic-resource-allocation v0.28.0-alpha.2/go.mod h1:oIqFvDEu/Ug4mMKfzF0vTQ5TQKyIrUJKYmAoA9dfQik= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/klog/v2 v2.90.1 h1:m4bYOKall2MmOiRaR1J+We67Do7vm9KiQVlT96lnHUw= -k8s.io/klog/v2 v2.90.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kms v0.27.0 h1:adCotKQybOjxwbxW7ogXyv8uQGan/3Y126S2aNW4YFY= -k8s.io/kms v0.27.0/go.mod h1:vI2R4Nhw+PZ+DYtVPVYKsIqip2IYjZWK9bESR64WdIw= -k8s.io/kube-openapi v0.0.0-20230327201221-f5883ff37f0c h1:EFfsozyzZ/pggw5qNx7ftTVZdp7WZl+3ih89GEjYEK8= -k8s.io/kube-openapi v0.0.0-20230327201221-f5883ff37f0c/go.mod h1:byini6yhqGC14c3ebc/QwanvYwhuMWF6yz2F8uwW8eg= -k8s.io/kube-proxy v0.27.0 h1:agah2aBnBjFsoZsBxPyYU5+0BOpC7P9yxVPr/U0gyCE= -k8s.io/kube-proxy v0.27.0/go.mod h1:4wF+C295vB266aepEH+6pCJjMuOd6fv0YgdFgJjH3gY= -k8s.io/kube-scheduler v0.27.0 h1:ea3alhkFy82Kc85X5NKaQz0dftA+c1c5o/oHOkMrpBA= -k8s.io/kube-scheduler v0.27.0/go.mod h1:J6qRR7a4UIdWb3fhCSfRYae79xqg3KVDqx26MHzNkqY= -k8s.io/kubectl v0.27.0 h1:ZcWS6ufixDXwovWtzF149gd5GzxdpsIl4YqfioSkq5w= -k8s.io/kubectl v0.27.0/go.mod h1:tyFzo+6WfbUEccm8rFIliQ79FAmm9uTFN+1oC5Ytamo= -k8s.io/kubelet v0.27.0 h1:zn70SDJKNmRSFG2qeU2UITzZWdEbLVWIf/u1kd1raUQ= -k8s.io/kubelet v0.27.0/go.mod h1:Z6ipUvM0AFzUWxvSmot8OodwcMN15lgkFM3bcBexBsI= -k8s.io/kubernetes v1.27.0 h1:VCI2Qoksx2cv6mHu9g9KVH30ZHNtWSB/+9BtKLSqduM= -k8s.io/kubernetes v1.27.0/go.mod h1:TTwPjSCKQ+a/NTiFKRGjvOnEaQL8wIG40nsYH8Er4bA= -k8s.io/legacy-cloud-providers v0.27.0 h1:E8iZLBHTbSoxOXXDpo7a+DbAmzN6XqJSuFkY62kqLLU= -k8s.io/legacy-cloud-providers v0.27.0/go.mod h1:bIGXGgMpIWlwZ0odAs1dkSSN7v0L6byUf8e7flwe54I= -k8s.io/mount-utils v0.27.0 h1:hLyzqhLYjIBI1W+6VklbmE6rUOjbYDFdjhhc5q17Vxw= -k8s.io/mount-utils v0.27.0/go.mod h1:vmcjYdi2Vg1VTWY7KkhvwJVY6WDHxb/QQhiQKkR8iNs= +k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= +k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/kms v0.28.0-alpha.2 h1:6ILRInn+MXy1wlCxiESbL4Sb0VWlV8mTornpc4uAIbM= +k8s.io/kms v0.28.0-alpha.2/go.mod h1:85pUi14WT49ie7EdZogRefx28APnyjLumfqR5EaRKGg= +k8s.io/kube-openapi v0.0.0-20230601164746-7562a1006961 h1:pqRVJGQJz6oeZby8qmPKXYIBjyrcv7EHCe/33UkZMYA= +k8s.io/kube-openapi v0.0.0-20230601164746-7562a1006961/go.mod h1:l8HTwL5fqnlns4jOveW1L75eo7R9KFHxiE0bsPGy428= +k8s.io/kube-scheduler v0.28.0-alpha.2 h1:XdUpeM+1Dx98HGk8FxaQlTZ3PK2hq4ezcjPJWn8VMeI= +k8s.io/kube-scheduler v0.28.0-alpha.2/go.mod h1:Q9Nk+RtsgL7VrDGYTFEk8r1ZpAdO5NYc5ONarwkKveM= +k8s.io/kubectl v0.28.0-alpha.2 h1:+He7BBHbmNBx3qCEPw3zsjIxQjnQ94edgd4bXPI1m+4= +k8s.io/kubectl v0.28.0-alpha.2/go.mod h1:Su4vnQ/r+UfOLDrJwLXVaEsS0kxLRTwHJW+0dxSG0Js= +k8s.io/kubelet v0.28.0-alpha.2 h1:1uMi5/nK74VRxB5ezL1XMi1SfKp+GRJqjWeJnFeBYi8= +k8s.io/kubelet v0.28.0-alpha.2/go.mod h1:UgEMQRgKCQoivBaLxZOk6ScHBweWGeYoquYzCj4iAoU= +k8s.io/kubernetes v1.28.0-alpha.2 h1:+j1kznKMXULWEdR7imNb7SKHsXsyAW2o/3L4QzrE8p8= +k8s.io/kubernetes v1.28.0-alpha.2/go.mod h1:7mUnouo88nuXIS2FZCrhXAUh/46RQAetiswNRACxVjU= +k8s.io/legacy-cloud-providers v0.28.0-alpha.2 h1:I3wUVBLOXproy/e/PV5xEMPwVz8ZFn5b+UzEBXcVvA8= +k8s.io/legacy-cloud-providers v0.28.0-alpha.2/go.mod h1:h++GawgCQXzXo56u4tEVXwia6Fcra7h9P05e5MDb8MY= +k8s.io/mount-utils v0.28.0-alpha.2 h1:LtFsD1z44DOtr6JXgtFiIyLLz6tin0gX+A/7mbPlY8Y= +k8s.io/mount-utils v0.28.0-alpha.2/go.mod h1:weYICrMzTxwFu7R9SJWvChKlupUZu1aucu0lpOPQ4tk= k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 h1:qY1Ad8PODbnymg2pRbkyMT/ylpTrCM8P2RJ0yroCyIk= k8s.io/utils v0.0.0-20230406110748-d93618cff8a2/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= diff --git a/cluster-autoscaler/processors/binpacking/binpacking_limiter.go b/cluster-autoscaler/processors/binpacking/binpacking_limiter.go new file mode 100644 index 000000000000..3b3ab61ba3f1 --- /dev/null +++ b/cluster-autoscaler/processors/binpacking/binpacking_limiter.go @@ -0,0 +1,52 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package binpacking + +import ( + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" + "k8s.io/autoscaler/cluster-autoscaler/context" + "k8s.io/autoscaler/cluster-autoscaler/expander" +) + +// BinpackingLimiter processes expansion options to stop binpacking early. +type BinpackingLimiter interface { + InitBinpacking(context *context.AutoscalingContext, nodeGroups []cloudprovider.NodeGroup) + StopBinpacking(context *context.AutoscalingContext, evaluatedOptions []expander.Option) bool + MarkProcessed(context *context.AutoscalingContext, nodegroupId string) +} + +// NoOpBinpackingLimiter returns true without processing expansion options. +type NoOpBinpackingLimiter struct { +} + +// NewDefaultBinpackingLimiter creates an instance of NoOpBinpackingLimiter. +func NewDefaultBinpackingLimiter() BinpackingLimiter { + return &NoOpBinpackingLimiter{} +} + +// InitBinpacking initialises the BinpackingLimiter. +func (p *NoOpBinpackingLimiter) InitBinpacking(context *context.AutoscalingContext, nodeGroups []cloudprovider.NodeGroup) { +} + +// StopBinpacking is used to make decsions on the evaluated expansion options. +func (p *NoOpBinpackingLimiter) StopBinpacking(context *context.AutoscalingContext, evaluatedOptions []expander.Option) bool { + return false +} + +// MarkProcessed marks the nodegroup as processed. +func (p *NoOpBinpackingLimiter) MarkProcessed(context *context.AutoscalingContext, nodegroupId string) { +} diff --git a/cluster-autoscaler/processors/nodegroupset/compare_nodegroups.go b/cluster-autoscaler/processors/nodegroupset/compare_nodegroups.go index 42dbb9ef8651..5d96ce777f26 100644 --- a/cluster-autoscaler/processors/nodegroupset/compare_nodegroups.go +++ b/cluster-autoscaler/processors/nodegroupset/compare_nodegroups.go @@ -23,6 +23,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" "k8s.io/autoscaler/cluster-autoscaler/config" "k8s.io/autoscaler/cluster-autoscaler/utils/scheduler" + klog "k8s.io/klog/v2" schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" ) @@ -122,11 +123,13 @@ func IsCloudProviderNodeInfoSimilar( for kind, qtyList := range capacity { if len(qtyList) != 2 { + klog.V(3).Infof("nodes %s and %s are not similar, missing capacity %s", n1.Node().Name, n2.Node().Name, kind) return false } switch kind { case apiv1.ResourceMemory: if !resourceListWithinTolerance(qtyList, ratioOpts.MaxCapacityMemoryDifferenceRatio) { + klog.V(3).Infof("nodes %s and %s are not similar, memory not within tolerance", n1.Node().Name, n2.Node().Name) return false } default: @@ -134,6 +137,7 @@ func IsCloudProviderNodeInfoSimilar( // If this is ever changed, enforcing MaxCoresTotal limits // as it is now may no longer work. if qtyList[0].Cmp(qtyList[1]) != 0 { + klog.V(3).Infof("nodes %s and %s are not similar, %s does not match", n1.Node().Name, n2.Node().Name, kind) return false } } @@ -141,13 +145,16 @@ func IsCloudProviderNodeInfoSimilar( // For allocatable and free we allow resource quantities to be within a few % of each other if !resourceMapsWithinTolerance(allocatable, ratioOpts.MaxAllocatableDifferenceRatio) { + klog.V(3).Infof("nodes %s and %s are not similar, allocatable resources not within tolerance", n1.Node().Name, n2.Node().Name) return false } if !resourceMapsWithinTolerance(free, ratioOpts.MaxFreeDifferenceRatio) { + klog.V(3).Infof("nodes %s and %s are not similar, free resources not within tolerance", n1.Node().Name, n2.Node().Name) return false } if !compareLabels(nodes, ignoredLabels) { + klog.V(3).Infof("nodes %s and %s are not similar, labels do not match", n1.Node().Name, n2.Node().Name) return false } diff --git a/cluster-autoscaler/processors/processors.go b/cluster-autoscaler/processors/processors.go index 78cc56ea13f9..b2afda6481b8 100644 --- a/cluster-autoscaler/processors/processors.go +++ b/cluster-autoscaler/processors/processors.go @@ -19,6 +19,7 @@ package processors import ( "k8s.io/autoscaler/cluster-autoscaler/config" "k8s.io/autoscaler/cluster-autoscaler/processors/actionablecluster" + "k8s.io/autoscaler/cluster-autoscaler/processors/binpacking" "k8s.io/autoscaler/cluster-autoscaler/processors/customresources" "k8s.io/autoscaler/cluster-autoscaler/processors/nodegroupconfig" "k8s.io/autoscaler/cluster-autoscaler/processors/nodegroups" @@ -38,6 +39,8 @@ type AutoscalingProcessors struct { PodListProcessor pods.PodListProcessor // NodeGroupListProcessor is used to process list of NodeGroups that can be used in scale-up. NodeGroupListProcessor nodegroups.NodeGroupListProcessor + // BinpackingLimiter processes expansion options to stop binpacking early. + BinpackingLimiter binpacking.BinpackingLimiter // NodeGroupSetProcessor is used to divide scale-up between similar NodeGroups. NodeGroupSetProcessor nodegroupset.NodeGroupSetProcessor // ScaleUpStatusProcessor is used to process the state of the cluster after a scale-up. @@ -71,6 +74,7 @@ func DefaultProcessors() *AutoscalingProcessors { return &AutoscalingProcessors{ PodListProcessor: pods.NewDefaultPodListProcessor(), NodeGroupListProcessor: nodegroups.NewDefaultNodeGroupListProcessor(), + BinpackingLimiter: binpacking.NewDefaultBinpackingLimiter(), NodeGroupSetProcessor: nodegroupset.NewDefaultNodeGroupSetProcessor([]string{}, config.NodeGroupDifferenceRatios{ MaxAllocatableDifferenceRatio: config.DefaultMaxAllocatableDifferenceRatio, MaxCapacityMemoryDifferenceRatio: config.DefaultMaxCapacityMemoryDifferenceRatio, diff --git a/cluster-autoscaler/simulator/predicatechecker/schedulerbased.go b/cluster-autoscaler/simulator/predicatechecker/schedulerbased.go index e6c486f82f68..fde1d2758a78 100644 --- a/cluster-autoscaler/simulator/predicatechecker/schedulerbased.go +++ b/cluster-autoscaler/simulator/predicatechecker/schedulerbased.go @@ -56,9 +56,9 @@ func NewSchedulerBasedPredicateChecker(kubeClient kube_client.Interface, stop <- sharedLister := NewDelegatingSchedulerSharedLister() framework, err := schedulerframeworkruntime.NewFramework( + context.TODO(), scheduler_plugins.NewInTreeRegistry(), &config.Profiles[0], - stop, schedulerframeworkruntime.WithInformerFactory(informerFactory), schedulerframeworkruntime.WithSnapshotSharedLister(sharedLister), ) diff --git a/cluster-autoscaler/utils/utils.go b/cluster-autoscaler/utils/utils.go index 70444651e8e2..19e017a116e9 100644 --- a/cluster-autoscaler/utils/utils.go +++ b/cluster-autoscaler/utils/utils.go @@ -94,6 +94,16 @@ func dropProjectedVolumesAndMounts(podSpec *apiv1.PodSpec) { } podSpec.Containers[i].VolumeMounts = volumeMounts } + + for i := range podSpec.InitContainers { + var volumeMounts []apiv1.VolumeMount + for _, mount := range podSpec.InitContainers[i].VolumeMounts { + if ok := projectedVolumeNames[mount.Name]; !ok { + volumeMounts = append(volumeMounts, mount) + } + } + podSpec.InitContainers[i].VolumeMounts = volumeMounts + } } func dropHostname(podSpec *apiv1.PodSpec) { diff --git a/cluster-autoscaler/utils/utils_test.go b/cluster-autoscaler/utils/utils_test.go index b90fa4f64b9b..c7046e1b39bb 100644 --- a/cluster-autoscaler/utils/utils_test.go +++ b/cluster-autoscaler/utils/utils_test.go @@ -111,6 +111,8 @@ func TestSanitizePodSpec(t *testing.T) { }, Containers: []apiv1.Container{ {Image: "foo/bar", Name: "foobar", VolumeMounts: []apiv1.VolumeMount{{Name: "projected1"}}}, + }, + InitContainers: []apiv1.Container{ {Image: "foo/baz", Name: "foobaz", VolumeMounts: []apiv1.VolumeMount{{Name: "projected2"}}}, }, }, @@ -118,6 +120,8 @@ func TestSanitizePodSpec(t *testing.T) { NodeSelector: map[string]string{"foo": "bar"}, Containers: []apiv1.Container{ {Image: "foo/bar", Name: "foobar"}, + }, + InitContainers: []apiv1.Container{ {Image: "foo/baz", Name: "foobaz"}, }, }, diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/version/version.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/version/version.go index 2dde094414cf..bcfbb15cce0d 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/version/version.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/version/version.go @@ -4,4 +4,4 @@ package version // Licensed under the MIT License. See License.txt in the project root for license information. // Number contains the semantic version of this SDK. -const Number = "v67.2.0" +const Number = "v68.0.0" diff --git a/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/adal/token.go b/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/adal/token.go index 1a9c8ab537f0..2a24ab80cf16 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/adal/token.go +++ b/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/adal/token.go @@ -127,6 +127,9 @@ type TokenRefreshCallback func(Token) error // TokenRefresh is a type representing a custom callback to refresh a token type TokenRefresh func(ctx context.Context, resource string) (*Token, error) +// JWTCallback is the type representing callback that will be called to get the federated OIDC JWT +type JWTCallback func() (string, error) + // Token encapsulates the access token used to authorize Azure requests. // https://docs.microsoft.com/en-us/azure/active-directory/develop/v1-oauth2-client-creds-grant-flow#service-to-service-access-token-response type Token struct { @@ -177,7 +180,7 @@ func (t Token) WillExpireIn(d time.Duration) bool { return !t.Expires().After(time.Now().Add(d)) } -//OAuthToken return the current access token +// OAuthToken return the current access token func (t *Token) OAuthToken() string { return t.AccessToken } @@ -367,14 +370,18 @@ func (secret ServicePrincipalAuthorizationCodeSecret) MarshalJSON() ([]byte, err // ServicePrincipalFederatedSecret implements ServicePrincipalSecret for Federated JWTs. type ServicePrincipalFederatedSecret struct { - jwt string + jwtCallback JWTCallback } // SetAuthenticationValues is a method of the interface ServicePrincipalSecret. // It will populate the form submitted during OAuth Token Acquisition using a JWT signed by an OIDC issuer. -func (secret *ServicePrincipalFederatedSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { +func (secret *ServicePrincipalFederatedSecret) SetAuthenticationValues(_ *ServicePrincipalToken, v *url.Values) error { + jwt, err := secret.jwtCallback() + if err != nil { + return err + } - v.Set("client_assertion", secret.jwt) + v.Set("client_assertion", jwt) v.Set("client_assertion_type", "urn:ietf:params:oauth:client-assertion-type:jwt-bearer") return nil } @@ -687,6 +694,8 @@ func NewServicePrincipalTokenFromAuthorizationCode(oauthConfig OAuthConfig, clie } // NewServicePrincipalTokenFromFederatedToken creates a ServicePrincipalToken from the supplied federated OIDC JWT. +// +// Deprecated: Use NewServicePrincipalTokenFromFederatedTokenWithCallback to refresh jwt dynamically. func NewServicePrincipalTokenFromFederatedToken(oauthConfig OAuthConfig, clientID string, jwt string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { if err := validateOAuthConfig(oauthConfig); err != nil { return nil, err @@ -700,12 +709,37 @@ func NewServicePrincipalTokenFromFederatedToken(oauthConfig OAuthConfig, clientI if jwt == "" { return nil, fmt.Errorf("parameter 'jwt' cannot be empty") } + return NewServicePrincipalTokenFromFederatedTokenCallback( + oauthConfig, + clientID, + func() (string, error) { + return jwt, nil + }, + resource, + callbacks..., + ) +} + +// NewServicePrincipalTokenFromFederatedTokenCallback creates a ServicePrincipalToken from the supplied federated OIDC JWTCallback. +func NewServicePrincipalTokenFromFederatedTokenCallback(oauthConfig OAuthConfig, clientID string, jwtCallback JWTCallback, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if jwtCallback == nil { + return nil, fmt.Errorf("parameter 'jwtCallback' cannot be empty") + } return NewServicePrincipalTokenWithSecret( oauthConfig, clientID, resource, &ServicePrincipalFederatedSecret{ - jwt: jwt, + jwtCallback: jwtCallback, }, callbacks..., ) diff --git a/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/autorest.go b/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/autorest.go index aafdf021fd6f..211c98d1ed04 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/autorest.go +++ b/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/autorest.go @@ -6,33 +6,33 @@ generated Go code. The package breaks sending and responding to HTTP requests into three phases: Preparing, Sending, and Responding. A typical pattern is: - req, err := Prepare(&http.Request{}, - token.WithAuthorization()) + req, err := Prepare(&http.Request{}, + token.WithAuthorization()) - resp, err := Send(req, - WithLogging(logger), - DoErrorIfStatusCode(http.StatusInternalServerError), - DoCloseIfError(), - DoRetryForAttempts(5, time.Second)) + resp, err := Send(req, + WithLogging(logger), + DoErrorIfStatusCode(http.StatusInternalServerError), + DoCloseIfError(), + DoRetryForAttempts(5, time.Second)) - err = Respond(resp, - ByDiscardingBody(), - ByClosing()) + err = Respond(resp, + ByDiscardingBody(), + ByClosing()) Each phase relies on decorators to modify and / or manage processing. Decorators may first modify and then pass the data along, pass the data first and then modify the result, or wrap themselves around passing the data (such as a logger might do). Decorators run in the order provided. For example, the following: - req, err := Prepare(&http.Request{}, - WithBaseURL("https://microsoft.com/"), - WithPath("a"), - WithPath("b"), - WithPath("c")) + req, err := Prepare(&http.Request{}, + WithBaseURL("https://microsoft.com/"), + WithPath("a"), + WithPath("b"), + WithPath("c")) will set the URL to: - https://microsoft.com/a/b/c + https://microsoft.com/a/b/c Preparers and Responders may be shared and re-used (assuming the underlying decorators support sharing and re-use). Performant use is obtained by creating one or more Preparers and Responders diff --git a/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go b/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go index 1328f1764c23..868345db6868 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go +++ b/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go @@ -214,7 +214,7 @@ func (r Resource) String() string { // See https://docs.microsoft.com/en-us/azure/azure-resource-manager/templates/template-functions-resource?tabs=json#resourceid. func ParseResourceID(resourceID string) (Resource, error) { - const resourceIDPatternText = `(?i)subscriptions/(.+)/resourceGroups/(.+)/providers/(.+?)/(.+?)/(.+)` + const resourceIDPatternText = `(?i)^/subscriptions/(.+)/resourceGroups/(.+)/providers/(.+?)/(.+?)/(.+)$` resourceIDPattern := regexp.MustCompile(resourceIDPatternText) match := resourceIDPattern.FindStringSubmatch(resourceID) diff --git a/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/utility.go b/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/utility.go index 3467b8fa6043..d35b3850ab34 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/utility.go +++ b/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/utility.go @@ -60,9 +60,9 @@ func NewDecoder(encodedAs EncodedAs, r io.Reader) Decoder { // is especially useful if there is a chance the data will fail to decode. // encodedAs specifies the expected encoding, r provides the io.Reader to the data, and v // is the decoding destination. -func CopyAndDecode(encodedAs EncodedAs, r io.Reader, v interface{}) (bytes.Buffer, error) { - b := bytes.Buffer{} - return b, NewDecoder(encodedAs, io.TeeReader(r, &b)).Decode(v) +func CopyAndDecode(encodedAs EncodedAs, r io.Reader, v interface{}) (b bytes.Buffer, err error) { + err = NewDecoder(encodedAs, io.TeeReader(r, &b)).Decode(v) + return } // TeeReadCloser returns a ReadCloser that writes to w what it reads from rc. diff --git a/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/.gitattributes b/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/.gitattributes new file mode 100644 index 000000000000..94f480de94e1 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/.gitattributes @@ -0,0 +1 @@ +* text=auto eol=lf \ No newline at end of file diff --git a/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/.gitignore b/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/.gitignore index b883f1fdc6d6..815e20660e5d 100644 --- a/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/.gitignore +++ b/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/.gitignore @@ -1 +1,10 @@ +.vscode/ + *.exe + +# testing +testdata + +# go workspaces +go.work +go.work.sum diff --git a/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/.golangci.yml b/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/.golangci.yml new file mode 100644 index 000000000000..af403bb13a08 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/.golangci.yml @@ -0,0 +1,144 @@ +run: + skip-dirs: + - pkg/etw/sample + +linters: + enable: + # style + - containedctx # struct contains a context + - dupl # duplicate code + - errname # erorrs are named correctly + - goconst # strings that should be constants + - godot # comments end in a period + - misspell + - nolintlint # "//nolint" directives are properly explained + - revive # golint replacement + - stylecheck # golint replacement, less configurable than revive + - unconvert # unnecessary conversions + - wastedassign + + # bugs, performance, unused, etc ... + - contextcheck # function uses a non-inherited context + - errorlint # errors not wrapped for 1.13 + - exhaustive # check exhaustiveness of enum switch statements + - gofmt # files are gofmt'ed + - gosec # security + - nestif # deeply nested ifs + - nilerr # returns nil even with non-nil error + - prealloc # slices that can be pre-allocated + - structcheck # unused struct fields + - unparam # unused function params + +issues: + exclude-rules: + # err is very often shadowed in nested scopes + - linters: + - govet + text: '^shadow: declaration of "err" shadows declaration' + + # ignore long lines for skip autogen directives + - linters: + - revive + text: "^line-length-limit: " + source: "^//(go:generate|sys) " + + # allow unjustified ignores of error checks in defer statements + - linters: + - nolintlint + text: "^directive `//nolint:errcheck` should provide explanation" + source: '^\s*defer ' + + # allow unjustified ignores of error lints for io.EOF + - linters: + - nolintlint + text: "^directive `//nolint:errorlint` should provide explanation" + source: '[=|!]= io.EOF' + + +linters-settings: + govet: + enable-all: true + disable: + # struct order is often for Win32 compat + # also, ignore pointer bytes/GC issues for now until performance becomes an issue + - fieldalignment + check-shadowing: true + nolintlint: + allow-leading-space: false + require-explanation: true + require-specific: true + revive: + # revive is more configurable than static check, so likely the preferred alternative to static-check + # (once the perf issue is solved: https://github.com/golangci/golangci-lint/issues/2997) + enable-all-rules: + true + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md + rules: + # rules with required arguments + - name: argument-limit + disabled: true + - name: banned-characters + disabled: true + - name: cognitive-complexity + disabled: true + - name: cyclomatic + disabled: true + - name: file-header + disabled: true + - name: function-length + disabled: true + - name: function-result-limit + disabled: true + - name: max-public-structs + disabled: true + # geneally annoying rules + - name: add-constant # complains about any and all strings and integers + disabled: true + - name: confusing-naming # we frequently use "Foo()" and "foo()" together + disabled: true + - name: flag-parameter # excessive, and a common idiom we use + disabled: true + # general config + - name: line-length-limit + arguments: + - 140 + - name: var-naming + arguments: + - [] + - - CID + - CRI + - CTRD + - DACL + - DLL + - DOS + - ETW + - FSCTL + - GCS + - GMSA + - HCS + - HV + - IO + - LCOW + - LDAP + - LPAC + - LTSC + - MMIO + - NT + - OCI + - PMEM + - PWSH + - RX + - SACl + - SID + - SMB + - TX + - VHD + - VHDX + - VMID + - VPCI + - WCOW + - WIM + stylecheck: + checks: + - "all" + - "-ST1003" # use revive's var naming diff --git a/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/README.md b/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/README.md index 5680010575b7..7474b4f0b653 100644 --- a/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/README.md +++ b/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/README.md @@ -1,4 +1,4 @@ -# go-winio +# go-winio [![Build Status](https://github.com/microsoft/go-winio/actions/workflows/ci.yml/badge.svg)](https://github.com/microsoft/go-winio/actions/workflows/ci.yml) This repository contains utilities for efficiently performing Win32 IO operations in Go. Currently, this is focused on accessing named pipes and other file handles, and @@ -11,12 +11,79 @@ package. Please see the LICENSE file for licensing information. -This project has adopted the [Microsoft Open Source Code of -Conduct](https://opensource.microsoft.com/codeofconduct/). For more information -see the [Code of Conduct -FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact -[opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional -questions or comments. +## Contributing -Thanks to natefinch for the inspiration for this library. See https://github.com/natefinch/npipe -for another named pipe implementation. +This project welcomes contributions and suggestions. +Most contributions require you to agree to a Contributor License Agreement (CLA) declaring that +you have the right to, and actually do, grant us the rights to use your contribution. +For details, visit [Microsoft CLA](https://cla.microsoft.com). + +When you submit a pull request, a CLA-bot will automatically determine whether you need to +provide a CLA and decorate the PR appropriately (e.g., label, comment). +Simply follow the instructions provided by the bot. +You will only need to do this once across all repos using our CLA. + +Additionally, the pull request pipeline requires the following steps to be performed before +mergining. + +### Code Sign-Off + +We require that contributors sign their commits using [`git commit --signoff`][git-commit-s] +to certify they either authored the work themselves or otherwise have permission to use it in this project. + +A range of commits can be signed off using [`git rebase --signoff`][git-rebase-s]. + +Please see [the developer certificate](https://developercertificate.org) for more info, +as well as to make sure that you can attest to the rules listed. +Our CI uses the DCO Github app to ensure that all commits in a given PR are signed-off. + +### Linting + +Code must pass a linting stage, which uses [`golangci-lint`][lint]. +The linting settings are stored in [`.golangci.yaml`](./.golangci.yaml), and can be run +automatically with VSCode by adding the following to your workspace or folder settings: + +```json + "go.lintTool": "golangci-lint", + "go.lintOnSave": "package", +``` + +Additional editor [integrations options are also available][lint-ide]. + +Alternatively, `golangci-lint` can be [installed locally][lint-install] and run from the repo root: + +```shell +# use . or specify a path to only lint a package +# to show all lint errors, use flags "--max-issues-per-linter=0 --max-same-issues=0" +> golangci-lint run ./... +``` + +### Go Generate + +The pipeline checks that auto-generated code, via `go generate`, are up to date. + +This can be done for the entire repo: + +```shell +> go generate ./... +``` + +## Code of Conduct + +This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). +For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or +contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. + +## Special Thanks + +Thanks to [natefinch][natefinch] for the inspiration for this library. +See [npipe](https://github.com/natefinch/npipe) for another named pipe implementation. + +[lint]: https://golangci-lint.run/ +[lint-ide]: https://golangci-lint.run/usage/integrations/#editor-integration +[lint-install]: https://golangci-lint.run/usage/install/#local-installation + +[git-commit-s]: https://git-scm.com/docs/git-commit#Documentation/git-commit.txt--s +[git-rebase-s]: https://git-scm.com/docs/git-rebase#Documentation/git-rebase.txt---signoff + +[natefinch]: https://github.com/natefinch diff --git a/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/SECURITY.md b/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/SECURITY.md new file mode 100644 index 000000000000..869fdfe2b246 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/SECURITY.md @@ -0,0 +1,41 @@ + + +## Security + +Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/). + +If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/opensource/security/definition), please report it to us as described below. + +## Reporting Security Issues + +**Please do not report security vulnerabilities through public GitHub issues.** + +Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/opensource/security/create-report). + +If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/opensource/security/pgpkey). + +You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://aka.ms/opensource/security/msrc). + +Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue: + + * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.) + * Full paths of source file(s) related to the manifestation of the issue + * The location of the affected source code (tag/branch/commit or direct URL) + * Any special configuration required to reproduce the issue + * Step-by-step instructions to reproduce the issue + * Proof-of-concept or exploit code (if possible) + * Impact of the issue, including how an attacker might exploit the issue + +This information will help us triage your report more quickly. + +If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/opensource/security/bounty) page for more details about our active programs. + +## Preferred Languages + +We prefer all communications to be in English. + +## Policy + +Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/opensource/security/cvd). + + diff --git a/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/backup.go b/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/backup.go index 2be34af43106..09621c88463a 100644 --- a/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/backup.go +++ b/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/backup.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package winio @@ -7,11 +8,12 @@ import ( "errors" "fmt" "io" - "io/ioutil" "os" "runtime" "syscall" "unicode/utf16" + + "golang.org/x/sys/windows" ) //sys backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupRead @@ -24,7 +26,7 @@ const ( BackupAlternateData BackupLink BackupPropertyData - BackupObjectId + BackupObjectId //revive:disable-line:var-naming ID, not Id BackupReparseData BackupSparseBlock BackupTxfsData @@ -34,14 +36,16 @@ const ( StreamSparseAttributes = uint32(8) ) +//nolint:revive // var-naming: ALL_CAPS const ( - WRITE_DAC = 0x40000 - WRITE_OWNER = 0x80000 - ACCESS_SYSTEM_SECURITY = 0x1000000 + WRITE_DAC = windows.WRITE_DAC + WRITE_OWNER = windows.WRITE_OWNER + ACCESS_SYSTEM_SECURITY = windows.ACCESS_SYSTEM_SECURITY ) // BackupHeader represents a backup stream of a file. type BackupHeader struct { + //revive:disable-next-line:var-naming ID, not Id Id uint32 // The backup stream ID Attributes uint32 // Stream attributes Size int64 // The size of the stream in bytes @@ -49,8 +53,8 @@ type BackupHeader struct { Offset int64 // The offset of the stream in the file (for BackupSparseBlock only). } -type win32StreamId struct { - StreamId uint32 +type win32StreamID struct { + StreamID uint32 Attributes uint32 Size uint64 NameSize uint32 @@ -71,7 +75,7 @@ func NewBackupStreamReader(r io.Reader) *BackupStreamReader { // Next returns the next backup stream and prepares for calls to Read(). It skips the remainder of the current stream if // it was not completely read. func (r *BackupStreamReader) Next() (*BackupHeader, error) { - if r.bytesLeft > 0 { + if r.bytesLeft > 0 { //nolint:nestif // todo: flatten this if s, ok := r.r.(io.Seeker); ok { // Make sure Seek on io.SeekCurrent sometimes succeeds // before trying the actual seek. @@ -82,16 +86,16 @@ func (r *BackupStreamReader) Next() (*BackupHeader, error) { r.bytesLeft = 0 } } - if _, err := io.Copy(ioutil.Discard, r); err != nil { + if _, err := io.Copy(io.Discard, r); err != nil { return nil, err } } - var wsi win32StreamId + var wsi win32StreamID if err := binary.Read(r.r, binary.LittleEndian, &wsi); err != nil { return nil, err } hdr := &BackupHeader{ - Id: wsi.StreamId, + Id: wsi.StreamID, Attributes: wsi.Attributes, Size: int64(wsi.Size), } @@ -102,7 +106,7 @@ func (r *BackupStreamReader) Next() (*BackupHeader, error) { } hdr.Name = syscall.UTF16ToString(name) } - if wsi.StreamId == BackupSparseBlock { + if wsi.StreamID == BackupSparseBlock { if err := binary.Read(r.r, binary.LittleEndian, &hdr.Offset); err != nil { return nil, err } @@ -147,8 +151,8 @@ func (w *BackupStreamWriter) WriteHeader(hdr *BackupHeader) error { return fmt.Errorf("missing %d bytes", w.bytesLeft) } name := utf16.Encode([]rune(hdr.Name)) - wsi := win32StreamId{ - StreamId: hdr.Id, + wsi := win32StreamID{ + StreamID: hdr.Id, Attributes: hdr.Attributes, Size: uint64(hdr.Size), NameSize: uint32(len(name) * 2), @@ -203,7 +207,7 @@ func (r *BackupFileReader) Read(b []byte) (int, error) { var bytesRead uint32 err := backupRead(syscall.Handle(r.f.Fd()), b, &bytesRead, false, r.includeSecurity, &r.ctx) if err != nil { - return 0, &os.PathError{"BackupRead", r.f.Name(), err} + return 0, &os.PathError{Op: "BackupRead", Path: r.f.Name(), Err: err} } runtime.KeepAlive(r.f) if bytesRead == 0 { @@ -216,7 +220,7 @@ func (r *BackupFileReader) Read(b []byte) (int, error) { // the underlying file. func (r *BackupFileReader) Close() error { if r.ctx != 0 { - backupRead(syscall.Handle(r.f.Fd()), nil, nil, true, false, &r.ctx) + _ = backupRead(syscall.Handle(r.f.Fd()), nil, nil, true, false, &r.ctx) runtime.KeepAlive(r.f) r.ctx = 0 } @@ -242,7 +246,7 @@ func (w *BackupFileWriter) Write(b []byte) (int, error) { var bytesWritten uint32 err := backupWrite(syscall.Handle(w.f.Fd()), b, &bytesWritten, false, w.includeSecurity, &w.ctx) if err != nil { - return 0, &os.PathError{"BackupWrite", w.f.Name(), err} + return 0, &os.PathError{Op: "BackupWrite", Path: w.f.Name(), Err: err} } runtime.KeepAlive(w.f) if int(bytesWritten) != len(b) { @@ -255,7 +259,7 @@ func (w *BackupFileWriter) Write(b []byte) (int, error) { // close the underlying file. func (w *BackupFileWriter) Close() error { if w.ctx != 0 { - backupWrite(syscall.Handle(w.f.Fd()), nil, nil, true, false, &w.ctx) + _ = backupWrite(syscall.Handle(w.f.Fd()), nil, nil, true, false, &w.ctx) runtime.KeepAlive(w.f) w.ctx = 0 } @@ -271,7 +275,13 @@ func OpenForBackup(path string, access uint32, share uint32, createmode uint32) if err != nil { return nil, err } - h, err := syscall.CreateFile(&winPath[0], access, share, nil, createmode, syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OPEN_REPARSE_POINT, 0) + h, err := syscall.CreateFile(&winPath[0], + access, + share, + nil, + createmode, + syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OPEN_REPARSE_POINT, + 0) if err != nil { err = &os.PathError{Op: "open", Path: path, Err: err} return nil, err diff --git a/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/doc.go b/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/doc.go new file mode 100644 index 000000000000..1f5bfe2d548e --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/doc.go @@ -0,0 +1,22 @@ +// This package provides utilities for efficiently performing Win32 IO operations in Go. +// Currently, this package is provides support for genreal IO and management of +// - named pipes +// - files +// - [Hyper-V sockets] +// +// This code is similar to Go's [net] package, and uses IO completion ports to avoid +// blocking IO on system threads, allowing Go to reuse the thread to schedule other goroutines. +// +// This limits support to Windows Vista and newer operating systems. +// +// Additionally, this package provides support for: +// - creating and managing GUIDs +// - writing to [ETW] +// - opening and manageing VHDs +// - parsing [Windows Image files] +// - auto-generating Win32 API code +// +// [Hyper-V sockets]: https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/user-guide/make-integration-service +// [ETW]: https://docs.microsoft.com/en-us/windows-hardware/drivers/devtest/event-tracing-for-windows--etw- +// [Windows Image files]: https://docs.microsoft.com/en-us/windows-hardware/manufacture/desktop/work-with-windows-images +package winio diff --git a/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/ea.go b/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/ea.go index 4051c1b33bfe..e104dbdfdf96 100644 --- a/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/ea.go +++ b/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/ea.go @@ -33,7 +33,7 @@ func parseEa(b []byte) (ea ExtendedAttribute, nb []byte, err error) { err = binary.Read(bytes.NewReader(b), binary.LittleEndian, &info) if err != nil { err = errInvalidEaBuffer - return + return ea, nb, err } nameOffset := fileFullEaInformationSize @@ -43,7 +43,7 @@ func parseEa(b []byte) (ea ExtendedAttribute, nb []byte, err error) { nextOffset := int(info.NextEntryOffset) if valueLen+valueOffset > len(b) || nextOffset < 0 || nextOffset > len(b) { err = errInvalidEaBuffer - return + return ea, nb, err } ea.Name = string(b[nameOffset : nameOffset+nameLen]) @@ -52,7 +52,7 @@ func parseEa(b []byte) (ea ExtendedAttribute, nb []byte, err error) { if info.NextEntryOffset != 0 { nb = b[info.NextEntryOffset:] } - return + return ea, nb, err } // DecodeExtendedAttributes decodes a list of EAs from a FILE_FULL_EA_INFORMATION @@ -67,7 +67,7 @@ func DecodeExtendedAttributes(b []byte) (eas []ExtendedAttribute, err error) { eas = append(eas, ea) b = nb } - return + return eas, err } func writeEa(buf *bytes.Buffer, ea *ExtendedAttribute, last bool) error { diff --git a/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/file.go b/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/file.go index 0385e4108129..175a99d3f429 100644 --- a/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/file.go +++ b/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/file.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package winio @@ -10,6 +11,8 @@ import ( "sync/atomic" "syscall" "time" + + "golang.org/x/sys/windows" ) //sys cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) = CancelIoEx @@ -23,6 +26,8 @@ type atomicBool int32 func (b *atomicBool) isSet() bool { return atomic.LoadInt32((*int32)(b)) != 0 } func (b *atomicBool) setFalse() { atomic.StoreInt32((*int32)(b), 0) } func (b *atomicBool) setTrue() { atomic.StoreInt32((*int32)(b), 1) } + +//revive:disable-next-line:predeclared Keep "new" to maintain consistency with "atomic" pkg func (b *atomicBool) swap(new bool) bool { var newInt int32 if new { @@ -31,11 +36,6 @@ func (b *atomicBool) swap(new bool) bool { return atomic.SwapInt32((*int32)(b), newInt) == 1 } -const ( - cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS = 1 - cFILE_SKIP_SET_EVENT_ON_HANDLE = 2 -) - var ( ErrFileClosed = errors.New("file has already been closed") ErrTimeout = &timeoutError{} @@ -43,28 +43,28 @@ var ( type timeoutError struct{} -func (e *timeoutError) Error() string { return "i/o timeout" } -func (e *timeoutError) Timeout() bool { return true } -func (e *timeoutError) Temporary() bool { return true } +func (*timeoutError) Error() string { return "i/o timeout" } +func (*timeoutError) Timeout() bool { return true } +func (*timeoutError) Temporary() bool { return true } type timeoutChan chan struct{} var ioInitOnce sync.Once var ioCompletionPort syscall.Handle -// ioResult contains the result of an asynchronous IO operation +// ioResult contains the result of an asynchronous IO operation. type ioResult struct { bytes uint32 err error } -// ioOperation represents an outstanding asynchronous Win32 IO +// ioOperation represents an outstanding asynchronous Win32 IO. type ioOperation struct { o syscall.Overlapped ch chan ioResult } -func initIo() { +func initIO() { h, err := createIoCompletionPort(syscall.InvalidHandle, 0, 0, 0xffffffff) if err != nil { panic(err) @@ -93,15 +93,15 @@ type deadlineHandler struct { timedout atomicBool } -// makeWin32File makes a new win32File from an existing file handle +// makeWin32File makes a new win32File from an existing file handle. func makeWin32File(h syscall.Handle) (*win32File, error) { f := &win32File{handle: h} - ioInitOnce.Do(initIo) + ioInitOnce.Do(initIO) _, err := createIoCompletionPort(h, ioCompletionPort, 0, 0xffffffff) if err != nil { return nil, err } - err = setFileCompletionNotificationModes(h, cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS|cFILE_SKIP_SET_EVENT_ON_HANDLE) + err = setFileCompletionNotificationModes(h, windows.FILE_SKIP_COMPLETION_PORT_ON_SUCCESS|windows.FILE_SKIP_SET_EVENT_ON_HANDLE) if err != nil { return nil, err } @@ -120,14 +120,14 @@ func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) { return f, nil } -// closeHandle closes the resources associated with a Win32 handle +// closeHandle closes the resources associated with a Win32 handle. func (f *win32File) closeHandle() { f.wgLock.Lock() // Atomically set that we are closing, releasing the resources only once. if !f.closing.swap(true) { f.wgLock.Unlock() // cancel all IO and wait for it to complete - cancelIoEx(f.handle, nil) + _ = cancelIoEx(f.handle, nil) f.wg.Wait() // at this point, no new IO can start syscall.Close(f.handle) @@ -143,9 +143,14 @@ func (f *win32File) Close() error { return nil } -// prepareIo prepares for a new IO operation. +// IsClosed checks if the file has been closed. +func (f *win32File) IsClosed() bool { + return f.closing.isSet() +} + +// prepareIO prepares for a new IO operation. // The caller must call f.wg.Done() when the IO is finished, prior to Close() returning. -func (f *win32File) prepareIo() (*ioOperation, error) { +func (f *win32File) prepareIO() (*ioOperation, error) { f.wgLock.RLock() if f.closing.isSet() { f.wgLock.RUnlock() @@ -158,7 +163,7 @@ func (f *win32File) prepareIo() (*ioOperation, error) { return c, nil } -// ioCompletionProcessor processes completed async IOs forever +// ioCompletionProcessor processes completed async IOs forever. func ioCompletionProcessor(h syscall.Handle) { for { var bytes uint32 @@ -172,15 +177,17 @@ func ioCompletionProcessor(h syscall.Handle) { } } -// asyncIo processes the return value from ReadFile or WriteFile, blocking until +// todo: helsaawy - create an asyncIO version that takes a context + +// asyncIO processes the return value from ReadFile or WriteFile, blocking until // the operation has actually completed. -func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, err error) (int, error) { - if err != syscall.ERROR_IO_PENDING { +func (f *win32File) asyncIO(c *ioOperation, d *deadlineHandler, bytes uint32, err error) (int, error) { + if err != syscall.ERROR_IO_PENDING { //nolint:errorlint // err is Errno return int(bytes), err } if f.closing.isSet() { - cancelIoEx(f.handle, &c.o) + _ = cancelIoEx(f.handle, &c.o) } var timeout timeoutChan @@ -194,7 +201,7 @@ func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, er select { case r = <-c.ch: err = r.err - if err == syscall.ERROR_OPERATION_ABORTED { + if err == syscall.ERROR_OPERATION_ABORTED { //nolint:errorlint // err is Errno if f.closing.isSet() { err = ErrFileClosed } @@ -204,10 +211,10 @@ func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, er err = wsaGetOverlappedResult(f.handle, &c.o, &bytes, false, &flags) } case <-timeout: - cancelIoEx(f.handle, &c.o) + _ = cancelIoEx(f.handle, &c.o) r = <-c.ch err = r.err - if err == syscall.ERROR_OPERATION_ABORTED { + if err == syscall.ERROR_OPERATION_ABORTED { //nolint:errorlint // err is Errno err = ErrTimeout } } @@ -215,13 +222,14 @@ func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, er // runtime.KeepAlive is needed, as c is passed via native // code to ioCompletionProcessor, c must remain alive // until the channel read is complete. + // todo: (de)allocate *ioOperation via win32 heap functions, instead of needing to KeepAlive? runtime.KeepAlive(c) return int(r.bytes), err } // Read reads from a file handle. func (f *win32File) Read(b []byte) (int, error) { - c, err := f.prepareIo() + c, err := f.prepareIO() if err != nil { return 0, err } @@ -233,13 +241,13 @@ func (f *win32File) Read(b []byte) (int, error) { var bytes uint32 err = syscall.ReadFile(f.handle, b, &bytes, &c.o) - n, err := f.asyncIo(c, &f.readDeadline, bytes, err) + n, err := f.asyncIO(c, &f.readDeadline, bytes, err) runtime.KeepAlive(b) // Handle EOF conditions. if err == nil && n == 0 && len(b) != 0 { return 0, io.EOF - } else if err == syscall.ERROR_BROKEN_PIPE { + } else if err == syscall.ERROR_BROKEN_PIPE { //nolint:errorlint // err is Errno return 0, io.EOF } else { return n, err @@ -248,7 +256,7 @@ func (f *win32File) Read(b []byte) (int, error) { // Write writes to a file handle. func (f *win32File) Write(b []byte) (int, error) { - c, err := f.prepareIo() + c, err := f.prepareIO() if err != nil { return 0, err } @@ -260,7 +268,7 @@ func (f *win32File) Write(b []byte) (int, error) { var bytes uint32 err = syscall.WriteFile(f.handle, b, &bytes, &c.o) - n, err := f.asyncIo(c, &f.writeDeadline, bytes, err) + n, err := f.asyncIO(c, &f.writeDeadline, bytes, err) runtime.KeepAlive(b) return n, err } diff --git a/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/fileinfo.go b/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/fileinfo.go index 3ab6bff69c5d..702950e72a49 100644 --- a/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/fileinfo.go +++ b/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/fileinfo.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package winio @@ -14,13 +15,18 @@ import ( type FileBasicInfo struct { CreationTime, LastAccessTime, LastWriteTime, ChangeTime windows.Filetime FileAttributes uint32 - pad uint32 // padding + _ uint32 // padding } // GetFileBasicInfo retrieves times and attributes for a file. func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) { bi := &FileBasicInfo{} - if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), windows.FileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil { + if err := windows.GetFileInformationByHandleEx( + windows.Handle(f.Fd()), + windows.FileBasicInfo, + (*byte)(unsafe.Pointer(bi)), + uint32(unsafe.Sizeof(*bi)), + ); err != nil { return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} } runtime.KeepAlive(f) @@ -29,7 +35,12 @@ func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) { // SetFileBasicInfo sets times and attributes for a file. func SetFileBasicInfo(f *os.File, bi *FileBasicInfo) error { - if err := windows.SetFileInformationByHandle(windows.Handle(f.Fd()), windows.FileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil { + if err := windows.SetFileInformationByHandle( + windows.Handle(f.Fd()), + windows.FileBasicInfo, + (*byte)(unsafe.Pointer(bi)), + uint32(unsafe.Sizeof(*bi)), + ); err != nil { return &os.PathError{Op: "SetFileInformationByHandle", Path: f.Name(), Err: err} } runtime.KeepAlive(f) @@ -48,7 +59,10 @@ type FileStandardInfo struct { // GetFileStandardInfo retrieves ended information for the file. func GetFileStandardInfo(f *os.File) (*FileStandardInfo, error) { si := &FileStandardInfo{} - if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), windows.FileStandardInfo, (*byte)(unsafe.Pointer(si)), uint32(unsafe.Sizeof(*si))); err != nil { + if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), + windows.FileStandardInfo, + (*byte)(unsafe.Pointer(si)), + uint32(unsafe.Sizeof(*si))); err != nil { return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} } runtime.KeepAlive(f) @@ -65,7 +79,12 @@ type FileIDInfo struct { // GetFileID retrieves the unique (volume, file ID) pair for a file. func GetFileID(f *os.File) (*FileIDInfo, error) { fileID := &FileIDInfo{} - if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), windows.FileIdInfo, (*byte)(unsafe.Pointer(fileID)), uint32(unsafe.Sizeof(*fileID))); err != nil { + if err := windows.GetFileInformationByHandleEx( + windows.Handle(f.Fd()), + windows.FileIdInfo, + (*byte)(unsafe.Pointer(fileID)), + uint32(unsafe.Sizeof(*fileID)), + ); err != nil { return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} } runtime.KeepAlive(f) diff --git a/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/hvsock.go b/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/hvsock.go index b632f8f8bb98..52f1c280f6a7 100644 --- a/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/hvsock.go +++ b/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/hvsock.go @@ -1,8 +1,11 @@ +//go:build windows // +build windows package winio import ( + "context" + "errors" "fmt" "io" "net" @@ -11,16 +14,87 @@ import ( "time" "unsafe" + "golang.org/x/sys/windows" + + "github.com/Microsoft/go-winio/internal/socket" "github.com/Microsoft/go-winio/pkg/guid" ) -//sys bind(s syscall.Handle, name unsafe.Pointer, namelen int32) (err error) [failretval==socketError] = ws2_32.bind +const afHVSock = 34 // AF_HYPERV -const ( - afHvSock = 34 // AF_HYPERV +// Well known Service and VM IDs +//https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/user-guide/make-integration-service#vmid-wildcards - socketError = ^uintptr(0) -) +// HvsockGUIDWildcard is the wildcard VmId for accepting connections from all partitions. +func HvsockGUIDWildcard() guid.GUID { // 00000000-0000-0000-0000-000000000000 + return guid.GUID{} +} + +// HvsockGUIDBroadcast is the wildcard VmId for broadcasting sends to all partitions. +func HvsockGUIDBroadcast() guid.GUID { //ffffffff-ffff-ffff-ffff-ffffffffffff + return guid.GUID{ + Data1: 0xffffffff, + Data2: 0xffff, + Data3: 0xffff, + Data4: [8]uint8{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, + } +} + +// HvsockGUIDLoopback is the Loopback VmId for accepting connections to the same partition as the connector. +func HvsockGUIDLoopback() guid.GUID { // e0e16197-dd56-4a10-9195-5ee7a155a838 + return guid.GUID{ + Data1: 0xe0e16197, + Data2: 0xdd56, + Data3: 0x4a10, + Data4: [8]uint8{0x91, 0x95, 0x5e, 0xe7, 0xa1, 0x55, 0xa8, 0x38}, + } +} + +// HvsockGUIDSiloHost is the address of a silo's host partition: +// - The silo host of a hosted silo is the utility VM. +// - The silo host of a silo on a physical host is the physical host. +func HvsockGUIDSiloHost() guid.GUID { // 36bd0c5c-7276-4223-88ba-7d03b654c568 + return guid.GUID{ + Data1: 0x36bd0c5c, + Data2: 0x7276, + Data3: 0x4223, + Data4: [8]byte{0x88, 0xba, 0x7d, 0x03, 0xb6, 0x54, 0xc5, 0x68}, + } +} + +// HvsockGUIDChildren is the wildcard VmId for accepting connections from the connector's child partitions. +func HvsockGUIDChildren() guid.GUID { // 90db8b89-0d35-4f79-8ce9-49ea0ac8b7cd + return guid.GUID{ + Data1: 0x90db8b89, + Data2: 0xd35, + Data3: 0x4f79, + Data4: [8]uint8{0x8c, 0xe9, 0x49, 0xea, 0xa, 0xc8, 0xb7, 0xcd}, + } +} + +// HvsockGUIDParent is the wildcard VmId for accepting connections from the connector's parent partition. +// Listening on this VmId accepts connection from: +// - Inside silos: silo host partition. +// - Inside hosted silo: host of the VM. +// - Inside VM: VM host. +// - Physical host: Not supported. +func HvsockGUIDParent() guid.GUID { // a42e7cda-d03f-480c-9cc2-a4de20abb878 + return guid.GUID{ + Data1: 0xa42e7cda, + Data2: 0xd03f, + Data3: 0x480c, + Data4: [8]uint8{0x9c, 0xc2, 0xa4, 0xde, 0x20, 0xab, 0xb8, 0x78}, + } +} + +// hvsockVsockServiceTemplate is the Service GUID used for the VSOCK protocol. +func hvsockVsockServiceTemplate() guid.GUID { // 00000000-facb-11e6-bd58-64006a7986d3 + return guid.GUID{ + Data2: 0xfacb, + Data3: 0x11e6, + Data4: [8]uint8{0xbd, 0x58, 0x64, 0x00, 0x6a, 0x79, 0x86, 0xd3}, + } +} // An HvsockAddr is an address for a AF_HYPERV socket. type HvsockAddr struct { @@ -35,8 +109,10 @@ type rawHvsockAddr struct { ServiceID guid.GUID } +var _ socket.RawSockaddr = &rawHvsockAddr{} + // Network returns the address's network name, "hvsock". -func (addr *HvsockAddr) Network() string { +func (*HvsockAddr) Network() string { return "hvsock" } @@ -46,14 +122,14 @@ func (addr *HvsockAddr) String() string { // VsockServiceID returns an hvsock service ID corresponding to the specified AF_VSOCK port. func VsockServiceID(port uint32) guid.GUID { - g, _ := guid.FromString("00000000-facb-11e6-bd58-64006a7986d3") + g := hvsockVsockServiceTemplate() // make a copy g.Data1 = port return g } func (addr *HvsockAddr) raw() rawHvsockAddr { return rawHvsockAddr{ - Family: afHvSock, + Family: afHVSock, VMID: addr.VMID, ServiceID: addr.ServiceID, } @@ -64,20 +140,48 @@ func (addr *HvsockAddr) fromRaw(raw *rawHvsockAddr) { addr.ServiceID = raw.ServiceID } +// Sockaddr returns a pointer to and the size of this struct. +// +// Implements the [socket.RawSockaddr] interface, and allows use in +// [socket.Bind] and [socket.ConnectEx]. +func (r *rawHvsockAddr) Sockaddr() (unsafe.Pointer, int32, error) { + return unsafe.Pointer(r), int32(unsafe.Sizeof(rawHvsockAddr{})), nil +} + +// Sockaddr interface allows use with `sockets.Bind()` and `.ConnectEx()`. +func (r *rawHvsockAddr) FromBytes(b []byte) error { + n := int(unsafe.Sizeof(rawHvsockAddr{})) + + if len(b) < n { + return fmt.Errorf("got %d, want %d: %w", len(b), n, socket.ErrBufferSize) + } + + copy(unsafe.Slice((*byte)(unsafe.Pointer(r)), n), b[:n]) + if r.Family != afHVSock { + return fmt.Errorf("got %d, want %d: %w", r.Family, afHVSock, socket.ErrAddrFamily) + } + + return nil +} + // HvsockListener is a socket listener for the AF_HYPERV address family. type HvsockListener struct { sock *win32File addr HvsockAddr } +var _ net.Listener = &HvsockListener{} + // HvsockConn is a connected socket of the AF_HYPERV address family. type HvsockConn struct { sock *win32File local, remote HvsockAddr } -func newHvSocket() (*win32File, error) { - fd, err := syscall.Socket(afHvSock, syscall.SOCK_STREAM, 1) +var _ net.Conn = &HvsockConn{} + +func newHVSocket() (*win32File, error) { + fd, err := syscall.Socket(afHVSock, syscall.SOCK_STREAM, 1) if err != nil { return nil, os.NewSyscallError("socket", err) } @@ -93,12 +197,12 @@ func newHvSocket() (*win32File, error) { // ListenHvsock listens for connections on the specified hvsock address. func ListenHvsock(addr *HvsockAddr) (_ *HvsockListener, err error) { l := &HvsockListener{addr: *addr} - sock, err := newHvSocket() + sock, err := newHVSocket() if err != nil { return nil, l.opErr("listen", err) } sa := addr.raw() - err = bind(sock.handle, unsafe.Pointer(&sa), int32(unsafe.Sizeof(sa))) + err = socket.Bind(windows.Handle(sock.handle), &sa) if err != nil { return nil, l.opErr("listen", os.NewSyscallError("socket", err)) } @@ -120,7 +224,7 @@ func (l *HvsockListener) Addr() net.Addr { // Accept waits for the next connection and returns it. func (l *HvsockListener) Accept() (_ net.Conn, err error) { - sock, err := newHvSocket() + sock, err := newHVSocket() if err != nil { return nil, l.opErr("accept", err) } @@ -129,27 +233,42 @@ func (l *HvsockListener) Accept() (_ net.Conn, err error) { sock.Close() } }() - c, err := l.sock.prepareIo() + c, err := l.sock.prepareIO() if err != nil { return nil, l.opErr("accept", err) } defer l.sock.wg.Done() // AcceptEx, per documentation, requires an extra 16 bytes per address. + // + // https://docs.microsoft.com/en-us/windows/win32/api/mswsock/nf-mswsock-acceptex const addrlen = uint32(16 + unsafe.Sizeof(rawHvsockAddr{})) var addrbuf [addrlen * 2]byte var bytes uint32 - err = syscall.AcceptEx(l.sock.handle, sock.handle, &addrbuf[0], 0, addrlen, addrlen, &bytes, &c.o) - _, err = l.sock.asyncIo(c, nil, bytes, err) - if err != nil { + err = syscall.AcceptEx(l.sock.handle, sock.handle, &addrbuf[0], 0 /*rxdatalen*/, addrlen, addrlen, &bytes, &c.o) + if _, err = l.sock.asyncIO(c, nil, bytes, err); err != nil { return nil, l.opErr("accept", os.NewSyscallError("acceptex", err)) } + conn := &HvsockConn{ sock: sock, } + // The local address returned in the AcceptEx buffer is the same as the Listener socket's + // address. However, the service GUID reported by GetSockName is different from the Listeners + // socket, and is sometimes the same as the local address of the socket that dialed the + // address, with the service GUID.Data1 incremented, but othertimes is different. + // todo: does the local address matter? is the listener's address or the actual address appropriate? conn.local.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[0]))) conn.remote.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[addrlen]))) + + // initialize the accepted socket and update its properties with those of the listening socket + if err = windows.Setsockopt(windows.Handle(sock.handle), + windows.SOL_SOCKET, windows.SO_UPDATE_ACCEPT_CONTEXT, + (*byte)(unsafe.Pointer(&l.sock.handle)), int32(unsafe.Sizeof(l.sock.handle))); err != nil { + return nil, conn.opErr("accept", os.NewSyscallError("setsockopt", err)) + } + sock = nil return conn, nil } @@ -159,43 +278,171 @@ func (l *HvsockListener) Close() error { return l.sock.Close() } -/* Need to finish ConnectEx handling -func DialHvsock(ctx context.Context, addr *HvsockAddr) (*HvsockConn, error) { - sock, err := newHvSocket() +// HvsockDialer configures and dials a Hyper-V Socket (ie, [HvsockConn]). +type HvsockDialer struct { + // Deadline is the time the Dial operation must connect before erroring. + Deadline time.Time + + // Retries is the number of additional connects to try if the connection times out, is refused, + // or the host is unreachable + Retries uint + + // RetryWait is the time to wait after a connection error to retry + RetryWait time.Duration + + rt *time.Timer // redial wait timer +} + +// Dial the Hyper-V socket at addr. +// +// See [HvsockDialer.Dial] for more information. +func Dial(ctx context.Context, addr *HvsockAddr) (conn *HvsockConn, err error) { + return (&HvsockDialer{}).Dial(ctx, addr) +} + +// Dial attempts to connect to the Hyper-V socket at addr, and returns a connection if successful. +// Will attempt (HvsockDialer).Retries if dialing fails, waiting (HvsockDialer).RetryWait between +// retries. +// +// Dialing can be cancelled either by providing (HvsockDialer).Deadline, or cancelling ctx. +func (d *HvsockDialer) Dial(ctx context.Context, addr *HvsockAddr) (conn *HvsockConn, err error) { + op := "dial" + // create the conn early to use opErr() + conn = &HvsockConn{ + remote: *addr, + } + + if !d.Deadline.IsZero() { + var cancel context.CancelFunc + ctx, cancel = context.WithDeadline(ctx, d.Deadline) + defer cancel() + } + + // preemptive timeout/cancellation check + if err = ctx.Err(); err != nil { + return nil, conn.opErr(op, err) + } + + sock, err := newHVSocket() if err != nil { - return nil, err + return nil, conn.opErr(op, err) } defer func() { if sock != nil { sock.Close() } }() - c, err := sock.prepareIo() + + sa := addr.raw() + err = socket.Bind(windows.Handle(sock.handle), &sa) if err != nil { - return nil, err + return nil, conn.opErr(op, os.NewSyscallError("bind", err)) + } + + c, err := sock.prepareIO() + if err != nil { + return nil, conn.opErr(op, err) } defer sock.wg.Done() var bytes uint32 - err = windows.ConnectEx(windows.Handle(sock.handle), sa, nil, 0, &bytes, &c.o) - _, err = sock.asyncIo(ctx, c, nil, bytes, err) + for i := uint(0); i <= d.Retries; i++ { + err = socket.ConnectEx( + windows.Handle(sock.handle), + &sa, + nil, // sendBuf + 0, // sendDataLen + &bytes, + (*windows.Overlapped)(unsafe.Pointer(&c.o))) + _, err = sock.asyncIO(c, nil, bytes, err) + if i < d.Retries && canRedial(err) { + if err = d.redialWait(ctx); err == nil { + continue + } + } + break + } if err != nil { - return nil, err + return nil, conn.opErr(op, os.NewSyscallError("connectex", err)) } - conn := &HvsockConn{ - sock: sock, - remote: *addr, + + // update the connection properties, so shutdown can be used + if err = windows.Setsockopt( + windows.Handle(sock.handle), + windows.SOL_SOCKET, + windows.SO_UPDATE_CONNECT_CONTEXT, + nil, // optvalue + 0, // optlen + ); err != nil { + return nil, conn.opErr(op, os.NewSyscallError("setsockopt", err)) + } + + // get the local name + var sal rawHvsockAddr + err = socket.GetSockName(windows.Handle(sock.handle), &sal) + if err != nil { + return nil, conn.opErr(op, os.NewSyscallError("getsockname", err)) + } + conn.local.fromRaw(&sal) + + // one last check for timeout, since asyncIO doesn't check the context + if err = ctx.Err(); err != nil { + return nil, conn.opErr(op, err) } + + conn.sock = sock sock = nil + return conn, nil } -*/ + +// redialWait waits before attempting to redial, resetting the timer as appropriate. +func (d *HvsockDialer) redialWait(ctx context.Context) (err error) { + if d.RetryWait == 0 { + return nil + } + + if d.rt == nil { + d.rt = time.NewTimer(d.RetryWait) + } else { + // should already be stopped and drained + d.rt.Reset(d.RetryWait) + } + + select { + case <-ctx.Done(): + case <-d.rt.C: + return nil + } + + // stop and drain the timer + if !d.rt.Stop() { + <-d.rt.C + } + return ctx.Err() +} + +// assumes error is a plain, unwrapped syscall.Errno provided by direct syscall. +func canRedial(err error) bool { + //nolint:errorlint // guaranteed to be an Errno + switch err { + case windows.WSAECONNREFUSED, windows.WSAENETUNREACH, windows.WSAETIMEDOUT, + windows.ERROR_CONNECTION_REFUSED, windows.ERROR_CONNECTION_UNAVAIL: + return true + default: + return false + } +} func (conn *HvsockConn) opErr(op string, err error) error { + // translate from "file closed" to "socket closed" + if errors.Is(err, ErrFileClosed) { + err = socket.ErrSocketClosed + } return &net.OpError{Op: op, Net: "hvsock", Source: &conn.local, Addr: &conn.remote, Err: err} } func (conn *HvsockConn) Read(b []byte) (int, error) { - c, err := conn.sock.prepareIo() + c, err := conn.sock.prepareIO() if err != nil { return 0, conn.opErr("read", err) } @@ -203,10 +450,11 @@ func (conn *HvsockConn) Read(b []byte) (int, error) { buf := syscall.WSABuf{Buf: &b[0], Len: uint32(len(b))} var flags, bytes uint32 err = syscall.WSARecv(conn.sock.handle, &buf, 1, &bytes, &flags, &c.o, nil) - n, err := conn.sock.asyncIo(c, &conn.sock.readDeadline, bytes, err) + n, err := conn.sock.asyncIO(c, &conn.sock.readDeadline, bytes, err) if err != nil { - if _, ok := err.(syscall.Errno); ok { - err = os.NewSyscallError("wsarecv", err) + var eno windows.Errno + if errors.As(err, &eno) { + err = os.NewSyscallError("wsarecv", eno) } return 0, conn.opErr("read", err) } else if n == 0 { @@ -229,7 +477,7 @@ func (conn *HvsockConn) Write(b []byte) (int, error) { } func (conn *HvsockConn) write(b []byte) (int, error) { - c, err := conn.sock.prepareIo() + c, err := conn.sock.prepareIO() if err != nil { return 0, conn.opErr("write", err) } @@ -237,10 +485,11 @@ func (conn *HvsockConn) write(b []byte) (int, error) { buf := syscall.WSABuf{Buf: &b[0], Len: uint32(len(b))} var bytes uint32 err = syscall.WSASend(conn.sock.handle, &buf, 1, &bytes, 0, &c.o, nil) - n, err := conn.sock.asyncIo(c, &conn.sock.writeDeadline, bytes, err) + n, err := conn.sock.asyncIO(c, &conn.sock.writeDeadline, bytes, err) if err != nil { - if _, ok := err.(syscall.Errno); ok { - err = os.NewSyscallError("wsasend", err) + var eno windows.Errno + if errors.As(err, &eno) { + err = os.NewSyscallError("wsasend", eno) } return 0, conn.opErr("write", err) } @@ -252,29 +501,43 @@ func (conn *HvsockConn) Close() error { return conn.sock.Close() } +func (conn *HvsockConn) IsClosed() bool { + return conn.sock.IsClosed() +} + +// shutdown disables sending or receiving on a socket. func (conn *HvsockConn) shutdown(how int) error { - err := syscall.Shutdown(conn.sock.handle, syscall.SHUT_RD) + if conn.IsClosed() { + return socket.ErrSocketClosed + } + + err := syscall.Shutdown(conn.sock.handle, how) if err != nil { + // If the connection was closed, shutdowns fail with "not connected" + if errors.Is(err, windows.WSAENOTCONN) || + errors.Is(err, windows.WSAESHUTDOWN) { + err = socket.ErrSocketClosed + } return os.NewSyscallError("shutdown", err) } return nil } -// CloseRead shuts down the read end of the socket. +// CloseRead shuts down the read end of the socket, preventing future read operations. func (conn *HvsockConn) CloseRead() error { err := conn.shutdown(syscall.SHUT_RD) if err != nil { - return conn.opErr("close", err) + return conn.opErr("closeread", err) } return nil } -// CloseWrite shuts down the write end of the socket, notifying the other endpoint that -// no more data will be written. +// CloseWrite shuts down the write end of the socket, preventing future write operations and +// notifying the other endpoint that no more data will be written. func (conn *HvsockConn) CloseWrite() error { err := conn.shutdown(syscall.SHUT_WR) if err != nil { - return conn.opErr("close", err) + return conn.opErr("closewrite", err) } return nil } @@ -291,8 +554,13 @@ func (conn *HvsockConn) RemoteAddr() net.Addr { // SetDeadline implements the net.Conn SetDeadline method. func (conn *HvsockConn) SetDeadline(t time.Time) error { - conn.SetReadDeadline(t) - conn.SetWriteDeadline(t) + // todo: implement `SetDeadline` for `win32File` + if err := conn.SetReadDeadline(t); err != nil { + return fmt.Errorf("set read deadline: %w", err) + } + if err := conn.SetWriteDeadline(t); err != nil { + return fmt.Errorf("set write deadline: %w", err) + } return nil } diff --git a/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/internal/socket/rawaddr.go b/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/internal/socket/rawaddr.go new file mode 100644 index 000000000000..7e82f9afa952 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/internal/socket/rawaddr.go @@ -0,0 +1,20 @@ +package socket + +import ( + "unsafe" +) + +// RawSockaddr allows structs to be used with [Bind] and [ConnectEx]. The +// struct must meet the Win32 sockaddr requirements specified here: +// https://docs.microsoft.com/en-us/windows/win32/winsock/sockaddr-2 +// +// Specifically, the struct size must be least larger than an int16 (unsigned short) +// for the address family. +type RawSockaddr interface { + // Sockaddr returns a pointer to the RawSockaddr and its struct size, allowing + // for the RawSockaddr's data to be overwritten by syscalls (if necessary). + // + // It is the callers responsibility to validate that the values are valid; invalid + // pointers or size can cause a panic. + Sockaddr() (unsafe.Pointer, int32, error) +} diff --git a/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/internal/socket/socket.go b/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/internal/socket/socket.go new file mode 100644 index 000000000000..39e8c05f8f3f --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/internal/socket/socket.go @@ -0,0 +1,179 @@ +//go:build windows + +package socket + +import ( + "errors" + "fmt" + "net" + "sync" + "syscall" + "unsafe" + + "github.com/Microsoft/go-winio/pkg/guid" + "golang.org/x/sys/windows" +) + +//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go socket.go + +//sys getsockname(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) [failretval==socketError] = ws2_32.getsockname +//sys getpeername(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) [failretval==socketError] = ws2_32.getpeername +//sys bind(s windows.Handle, name unsafe.Pointer, namelen int32) (err error) [failretval==socketError] = ws2_32.bind + +const socketError = uintptr(^uint32(0)) + +var ( + // todo(helsaawy): create custom error types to store the desired vs actual size and addr family? + + ErrBufferSize = errors.New("buffer size") + ErrAddrFamily = errors.New("address family") + ErrInvalidPointer = errors.New("invalid pointer") + ErrSocketClosed = fmt.Errorf("socket closed: %w", net.ErrClosed) +) + +// todo(helsaawy): replace these with generics, ie: GetSockName[S RawSockaddr](s windows.Handle) (S, error) + +// GetSockName writes the local address of socket s to the [RawSockaddr] rsa. +// If rsa is not large enough, the [windows.WSAEFAULT] is returned. +func GetSockName(s windows.Handle, rsa RawSockaddr) error { + ptr, l, err := rsa.Sockaddr() + if err != nil { + return fmt.Errorf("could not retrieve socket pointer and size: %w", err) + } + + // although getsockname returns WSAEFAULT if the buffer is too small, it does not set + // &l to the correct size, so--apart from doubling the buffer repeatedly--there is no remedy + return getsockname(s, ptr, &l) +} + +// GetPeerName returns the remote address the socket is connected to. +// +// See [GetSockName] for more information. +func GetPeerName(s windows.Handle, rsa RawSockaddr) error { + ptr, l, err := rsa.Sockaddr() + if err != nil { + return fmt.Errorf("could not retrieve socket pointer and size: %w", err) + } + + return getpeername(s, ptr, &l) +} + +func Bind(s windows.Handle, rsa RawSockaddr) (err error) { + ptr, l, err := rsa.Sockaddr() + if err != nil { + return fmt.Errorf("could not retrieve socket pointer and size: %w", err) + } + + return bind(s, ptr, l) +} + +// "golang.org/x/sys/windows".ConnectEx and .Bind only accept internal implementations of the +// their sockaddr interface, so they cannot be used with HvsockAddr +// Replicate functionality here from +// https://cs.opensource.google/go/x/sys/+/master:windows/syscall_windows.go + +// The function pointers to `AcceptEx`, `ConnectEx` and `GetAcceptExSockaddrs` must be loaded at +// runtime via a WSAIoctl call: +// https://docs.microsoft.com/en-us/windows/win32/api/Mswsock/nc-mswsock-lpfn_connectex#remarks + +type runtimeFunc struct { + id guid.GUID + once sync.Once + addr uintptr + err error +} + +func (f *runtimeFunc) Load() error { + f.once.Do(func() { + var s windows.Handle + s, f.err = windows.Socket(windows.AF_INET, windows.SOCK_STREAM, windows.IPPROTO_TCP) + if f.err != nil { + return + } + defer windows.CloseHandle(s) //nolint:errcheck + + var n uint32 + f.err = windows.WSAIoctl(s, + windows.SIO_GET_EXTENSION_FUNCTION_POINTER, + (*byte)(unsafe.Pointer(&f.id)), + uint32(unsafe.Sizeof(f.id)), + (*byte)(unsafe.Pointer(&f.addr)), + uint32(unsafe.Sizeof(f.addr)), + &n, + nil, //overlapped + 0, //completionRoutine + ) + }) + return f.err +} + +var ( + // todo: add `AcceptEx` and `GetAcceptExSockaddrs` + WSAID_CONNECTEX = guid.GUID{ //revive:disable-line:var-naming ALL_CAPS + Data1: 0x25a207b9, + Data2: 0xddf3, + Data3: 0x4660, + Data4: [8]byte{0x8e, 0xe9, 0x76, 0xe5, 0x8c, 0x74, 0x06, 0x3e}, + } + + connectExFunc = runtimeFunc{id: WSAID_CONNECTEX} +) + +func ConnectEx( + fd windows.Handle, + rsa RawSockaddr, + sendBuf *byte, + sendDataLen uint32, + bytesSent *uint32, + overlapped *windows.Overlapped, +) error { + if err := connectExFunc.Load(); err != nil { + return fmt.Errorf("failed to load ConnectEx function pointer: %w", err) + } + ptr, n, err := rsa.Sockaddr() + if err != nil { + return err + } + return connectEx(fd, ptr, n, sendBuf, sendDataLen, bytesSent, overlapped) +} + +// BOOL LpfnConnectex( +// [in] SOCKET s, +// [in] const sockaddr *name, +// [in] int namelen, +// [in, optional] PVOID lpSendBuffer, +// [in] DWORD dwSendDataLength, +// [out] LPDWORD lpdwBytesSent, +// [in] LPOVERLAPPED lpOverlapped +// ) + +func connectEx( + s windows.Handle, + name unsafe.Pointer, + namelen int32, + sendBuf *byte, + sendDataLen uint32, + bytesSent *uint32, + overlapped *windows.Overlapped, +) (err error) { + // todo: after upgrading to 1.18, switch from syscall.Syscall9 to syscall.SyscallN + r1, _, e1 := syscall.Syscall9(connectExFunc.addr, + 7, + uintptr(s), + uintptr(name), + uintptr(namelen), + uintptr(unsafe.Pointer(sendBuf)), + uintptr(sendDataLen), + uintptr(unsafe.Pointer(bytesSent)), + uintptr(unsafe.Pointer(overlapped)), + 0, + 0) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return err +} diff --git a/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/internal/socket/zsyscall_windows.go b/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/internal/socket/zsyscall_windows.go new file mode 100644 index 000000000000..6d2e1a9e4438 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/internal/socket/zsyscall_windows.go @@ -0,0 +1,72 @@ +//go:build windows + +// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT. + +package socket + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) + errERROR_EINVAL error = syscall.EINVAL +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return errERROR_EINVAL + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modws2_32 = windows.NewLazySystemDLL("ws2_32.dll") + + procbind = modws2_32.NewProc("bind") + procgetpeername = modws2_32.NewProc("getpeername") + procgetsockname = modws2_32.NewProc("getsockname") +) + +func bind(s windows.Handle, name unsafe.Pointer, namelen int32) (err error) { + r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen)) + if r1 == socketError { + err = errnoErr(e1) + } + return +} + +func getpeername(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) { + r1, _, e1 := syscall.Syscall(procgetpeername.Addr(), 3, uintptr(s), uintptr(name), uintptr(unsafe.Pointer(namelen))) + if r1 == socketError { + err = errnoErr(e1) + } + return +} + +func getsockname(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) { + r1, _, e1 := syscall.Syscall(procgetsockname.Addr(), 3, uintptr(s), uintptr(name), uintptr(unsafe.Pointer(namelen))) + if r1 == socketError { + err = errnoErr(e1) + } + return +} diff --git a/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/pipe.go b/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/pipe.go index 96700a73de25..ca6e38fc0006 100644 --- a/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/pipe.go +++ b/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/pipe.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package winio @@ -13,6 +14,8 @@ import ( "syscall" "time" "unsafe" + + "golang.org/x/sys/windows" ) //sys connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) = ConnectNamedPipe @@ -21,10 +24,10 @@ import ( //sys getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo //sys getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW //sys localAlloc(uFlags uint32, length uint32) (ptr uintptr) = LocalAlloc -//sys ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntstatus) = ntdll.NtCreateNamedPipeFile -//sys rtlNtStatusToDosError(status ntstatus) (winerr error) = ntdll.RtlNtStatusToDosErrorNoTeb -//sys rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntstatus) = ntdll.RtlDosPathNameToNtPathName_U -//sys rtlDefaultNpAcl(dacl *uintptr) (status ntstatus) = ntdll.RtlDefaultNpAcl +//sys ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntStatus) = ntdll.NtCreateNamedPipeFile +//sys rtlNtStatusToDosError(status ntStatus) (winerr error) = ntdll.RtlNtStatusToDosErrorNoTeb +//sys rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntStatus) = ntdll.RtlDosPathNameToNtPathName_U +//sys rtlDefaultNpAcl(dacl *uintptr) (status ntStatus) = ntdll.RtlDefaultNpAcl type ioStatusBlock struct { Status, Information uintptr @@ -51,45 +54,22 @@ type securityDescriptor struct { Control uint16 Owner uintptr Group uintptr - Sacl uintptr - Dacl uintptr + Sacl uintptr //revive:disable-line:var-naming SACL, not Sacl + Dacl uintptr //revive:disable-line:var-naming DACL, not Dacl } -type ntstatus int32 +type ntStatus int32 -func (status ntstatus) Err() error { +func (status ntStatus) Err() error { if status >= 0 { return nil } return rtlNtStatusToDosError(status) } -const ( - cERROR_PIPE_BUSY = syscall.Errno(231) - cERROR_NO_DATA = syscall.Errno(232) - cERROR_PIPE_CONNECTED = syscall.Errno(535) - cERROR_SEM_TIMEOUT = syscall.Errno(121) - - cSECURITY_SQOS_PRESENT = 0x100000 - cSECURITY_ANONYMOUS = 0 - - cPIPE_TYPE_MESSAGE = 4 - - cPIPE_READMODE_MESSAGE = 2 - - cFILE_OPEN = 1 - cFILE_CREATE = 2 - - cFILE_PIPE_MESSAGE_TYPE = 1 - cFILE_PIPE_REJECT_REMOTE_CLIENTS = 2 - - cSE_DACL_PRESENT = 4 -) - var ( // ErrPipeListenerClosed is returned for pipe operations on listeners that have been closed. - // This error should match net.errClosing since docker takes a dependency on its text. - ErrPipeListenerClosed = errors.New("use of closed network connection") + ErrPipeListenerClosed = net.ErrClosed errPipeWriteClosed = errors.New("pipe has been closed for write") ) @@ -116,9 +96,10 @@ func (f *win32Pipe) RemoteAddr() net.Addr { } func (f *win32Pipe) SetDeadline(t time.Time) error { - f.SetReadDeadline(t) - f.SetWriteDeadline(t) - return nil + if err := f.SetReadDeadline(t); err != nil { + return err + } + return f.SetWriteDeadline(t) } // CloseWrite closes the write side of a message pipe in byte mode. @@ -157,14 +138,14 @@ func (f *win32MessageBytePipe) Read(b []byte) (int, error) { return 0, io.EOF } n, err := f.win32File.Read(b) - if err == io.EOF { + if err == io.EOF { //nolint:errorlint // If this was the result of a zero-byte read, then // it is possible that the read was due to a zero-size // message. Since we are simulating CloseWrite with a // zero-byte message, ensure that all future Read() calls // also return EOF. f.readEOF = true - } else if err == syscall.ERROR_MORE_DATA { + } else if err == syscall.ERROR_MORE_DATA { //nolint:errorlint // err is Errno // ERROR_MORE_DATA indicates that the pipe's read mode is message mode // and the message still has more bytes. Treat this as a success, since // this package presents all named pipes as byte streams. @@ -173,7 +154,7 @@ func (f *win32MessageBytePipe) Read(b []byte) (int, error) { return n, err } -func (s pipeAddress) Network() string { +func (pipeAddress) Network() string { return "pipe" } @@ -184,16 +165,21 @@ func (s pipeAddress) String() string { // tryDialPipe attempts to dial the pipe at `path` until `ctx` cancellation or timeout. func tryDialPipe(ctx context.Context, path *string, access uint32) (syscall.Handle, error) { for { - select { case <-ctx.Done(): return syscall.Handle(0), ctx.Err() default: - h, err := createFile(*path, access, 0, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_OVERLAPPED|cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0) + h, err := createFile(*path, + access, + 0, + nil, + syscall.OPEN_EXISTING, + windows.FILE_FLAG_OVERLAPPED|windows.SECURITY_SQOS_PRESENT|windows.SECURITY_ANONYMOUS, + 0) if err == nil { return h, nil } - if err != cERROR_PIPE_BUSY { + if err != windows.ERROR_PIPE_BUSY { //nolint:errorlint // err is Errno return h, &os.PathError{Err: err, Op: "open", Path: *path} } // Wait 10 msec and try again. This is a rather simplistic @@ -213,9 +199,10 @@ func DialPipe(path string, timeout *time.Duration) (net.Conn, error) { } else { absTimeout = time.Now().Add(2 * time.Second) } - ctx, _ := context.WithDeadline(context.Background(), absTimeout) + ctx, cancel := context.WithDeadline(context.Background(), absTimeout) + defer cancel() conn, err := DialPipeContext(ctx, path) - if err == context.DeadlineExceeded { + if errors.Is(err, context.DeadlineExceeded) { return nil, ErrTimeout } return conn, err @@ -251,7 +238,7 @@ func DialPipeAccess(ctx context.Context, path string, access uint32) (net.Conn, // If the pipe is in message mode, return a message byte pipe, which // supports CloseWrite(). - if flags&cPIPE_TYPE_MESSAGE != 0 { + if flags&windows.PIPE_TYPE_MESSAGE != 0 { return &win32MessageBytePipe{ win32Pipe: win32Pipe{win32File: f, path: path}, }, nil @@ -283,7 +270,11 @@ func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (sy oa.Length = unsafe.Sizeof(oa) var ntPath unicodeString - if err := rtlDosPathNameToNtPathName(&path16[0], &ntPath, 0, 0).Err(); err != nil { + if err := rtlDosPathNameToNtPathName(&path16[0], + &ntPath, + 0, + 0, + ).Err(); err != nil { return 0, &os.PathError{Op: "open", Path: path, Err: err} } defer localFree(ntPath.Buffer) @@ -292,8 +283,8 @@ func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (sy // The security descriptor is only needed for the first pipe. if first { if sd != nil { - len := uint32(len(sd)) - sdb := localAlloc(0, len) + l := uint32(len(sd)) + sdb := localAlloc(0, l) defer localFree(sdb) copy((*[0xffff]byte)(unsafe.Pointer(sdb))[:], sd) oa.SecurityDescriptor = (*securityDescriptor)(unsafe.Pointer(sdb)) @@ -301,28 +292,28 @@ func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (sy // Construct the default named pipe security descriptor. var dacl uintptr if err := rtlDefaultNpAcl(&dacl).Err(); err != nil { - return 0, fmt.Errorf("getting default named pipe ACL: %s", err) + return 0, fmt.Errorf("getting default named pipe ACL: %w", err) } defer localFree(dacl) sdb := &securityDescriptor{ Revision: 1, - Control: cSE_DACL_PRESENT, + Control: windows.SE_DACL_PRESENT, Dacl: dacl, } oa.SecurityDescriptor = sdb } } - typ := uint32(cFILE_PIPE_REJECT_REMOTE_CLIENTS) + typ := uint32(windows.FILE_PIPE_REJECT_REMOTE_CLIENTS) if c.MessageMode { - typ |= cFILE_PIPE_MESSAGE_TYPE + typ |= windows.FILE_PIPE_MESSAGE_TYPE } - disposition := uint32(cFILE_OPEN) + disposition := uint32(windows.FILE_OPEN) access := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE | syscall.SYNCHRONIZE) if first { - disposition = cFILE_CREATE + disposition = windows.FILE_CREATE // By not asking for read or write access, the named pipe file system // will put this pipe into an initially disconnected state, blocking // client connections until the next call with first == false. @@ -335,7 +326,20 @@ func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (sy h syscall.Handle iosb ioStatusBlock ) - err = ntCreateNamedPipeFile(&h, access, &oa, &iosb, syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE, disposition, 0, typ, 0, 0, 0xffffffff, uint32(c.InputBufferSize), uint32(c.OutputBufferSize), &timeout).Err() + err = ntCreateNamedPipeFile(&h, + access, + &oa, + &iosb, + syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE, + disposition, + 0, + typ, + 0, + 0, + 0xffffffff, + uint32(c.InputBufferSize), + uint32(c.OutputBufferSize), + &timeout).Err() if err != nil { return 0, &os.PathError{Op: "open", Path: path, Err: err} } @@ -380,7 +384,7 @@ func (l *win32PipeListener) makeConnectedServerPipe() (*win32File, error) { p.Close() p = nil err = <-ch - if err == nil || err == ErrFileClosed { + if err == nil || err == ErrFileClosed { //nolint:errorlint // err is Errno err = ErrPipeListenerClosed } } @@ -402,12 +406,12 @@ func (l *win32PipeListener) listenerRoutine() { p, err = l.makeConnectedServerPipe() // If the connection was immediately closed by the client, try // again. - if err != cERROR_NO_DATA { + if err != windows.ERROR_NO_DATA { //nolint:errorlint // err is Errno break } } responseCh <- acceptResponse{p, err} - closed = err == ErrPipeListenerClosed + closed = err == ErrPipeListenerClosed //nolint:errorlint // err is Errno } } syscall.Close(l.firstHandle) @@ -469,15 +473,15 @@ func ListenPipe(path string, c *PipeConfig) (net.Listener, error) { } func connectPipe(p *win32File) error { - c, err := p.prepareIo() + c, err := p.prepareIO() if err != nil { return err } defer p.wg.Done() err = connectNamedPipe(p.handle, &c.o) - _, err = p.asyncIo(c, nil, 0, err) - if err != nil && err != cERROR_PIPE_CONNECTED { + _, err = p.asyncIO(c, nil, 0, err) + if err != nil && err != windows.ERROR_PIPE_CONNECTED { //nolint:errorlint // err is Errno return err } return nil diff --git a/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go b/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go index f497c0e39178..48ce4e924366 100644 --- a/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go +++ b/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go @@ -1,5 +1,3 @@ -// +build windows - // Package guid provides a GUID type. The backing structure for a GUID is // identical to that used by the golang.org/x/sys/windows GUID type. // There are two main binary encodings used for a GUID, the big-endian encoding, @@ -9,26 +7,26 @@ package guid import ( "crypto/rand" - "crypto/sha1" + "crypto/sha1" //nolint:gosec // not used for secure application "encoding" "encoding/binary" "fmt" "strconv" - - "golang.org/x/sys/windows" ) +//go:generate go run golang.org/x/tools/cmd/stringer -type=Variant -trimprefix=Variant -linecomment + // Variant specifies which GUID variant (or "type") of the GUID. It determines // how the entirety of the rest of the GUID is interpreted. type Variant uint8 -// The variants specified by RFC 4122. +// The variants specified by RFC 4122 section 4.1.1. const ( // VariantUnknown specifies a GUID variant which does not conform to one of // the variant encodings specified in RFC 4122. VariantUnknown Variant = iota VariantNCS - VariantRFC4122 + VariantRFC4122 // RFC 4122 VariantMicrosoft VariantFuture ) @@ -38,16 +36,13 @@ const ( // hash of an input string. type Version uint8 +func (v Version) String() string { + return strconv.FormatUint(uint64(v), 10) +} + var _ = (encoding.TextMarshaler)(GUID{}) var _ = (encoding.TextUnmarshaler)(&GUID{}) -// GUID represents a GUID/UUID. It has the same structure as -// golang.org/x/sys/windows.GUID so that it can be used with functions expecting -// that type. It is defined as its own type so that stringification and -// marshaling can be supported. The representation matches that used by native -// Windows code. -type GUID windows.GUID - // NewV4 returns a new version 4 (pseudorandom) GUID, as defined by RFC 4122. func NewV4() (GUID, error) { var b [16]byte @@ -70,7 +65,7 @@ func NewV4() (GUID, error) { // big-endian UTF16 stream of bytes. If that is desired, the string can be // encoded as such before being passed to this function. func NewV5(namespace GUID, name []byte) (GUID, error) { - b := sha1.New() + b := sha1.New() //nolint:gosec // not used for secure application namespaceBytes := namespace.ToArray() b.Write(namespaceBytes[:]) b.Write(name) diff --git a/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go b/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go new file mode 100644 index 000000000000..805bd3548424 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go @@ -0,0 +1,16 @@ +//go:build !windows +// +build !windows + +package guid + +// GUID represents a GUID/UUID. It has the same structure as +// golang.org/x/sys/windows.GUID so that it can be used with functions expecting +// that type. It is defined as its own type as that is only available to builds +// targeted at `windows`. The representation matches that used by native Windows +// code. +type GUID struct { + Data1 uint32 + Data2 uint16 + Data3 uint16 + Data4 [8]byte +} diff --git a/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go b/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go new file mode 100644 index 000000000000..27e45ee5ccf9 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go @@ -0,0 +1,13 @@ +//go:build windows +// +build windows + +package guid + +import "golang.org/x/sys/windows" + +// GUID represents a GUID/UUID. It has the same structure as +// golang.org/x/sys/windows.GUID so that it can be used with functions expecting +// that type. It is defined as its own type so that stringification and +// marshaling can be supported. The representation matches that used by native +// Windows code. +type GUID windows.GUID diff --git a/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/pkg/guid/variant_string.go b/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/pkg/guid/variant_string.go new file mode 100644 index 000000000000..4076d3132fdd --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/pkg/guid/variant_string.go @@ -0,0 +1,27 @@ +// Code generated by "stringer -type=Variant -trimprefix=Variant -linecomment"; DO NOT EDIT. + +package guid + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[VariantUnknown-0] + _ = x[VariantNCS-1] + _ = x[VariantRFC4122-2] + _ = x[VariantMicrosoft-3] + _ = x[VariantFuture-4] +} + +const _Variant_name = "UnknownNCSRFC 4122MicrosoftFuture" + +var _Variant_index = [...]uint8{0, 7, 10, 18, 27, 33} + +func (i Variant) String() string { + if i >= Variant(len(_Variant_index)-1) { + return "Variant(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _Variant_name[_Variant_index[i]:_Variant_index[i+1]] +} diff --git a/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/pkg/security/grantvmgroupaccess.go b/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/pkg/security/grantvmgroupaccess.go index fca241590cca..6df87b749905 100644 --- a/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/pkg/security/grantvmgroupaccess.go +++ b/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/pkg/security/grantvmgroupaccess.go @@ -1,13 +1,13 @@ +//go:build windows // +build windows package security import ( + "fmt" "os" "syscall" "unsafe" - - "github.com/pkg/errors" ) type ( @@ -21,6 +21,7 @@ type ( trusteeForm uint32 trusteeType uint32 + //nolint:structcheck // structcheck thinks fields are unused, but the are used to pass data to OS explicitAccess struct { accessPermissions accessMask accessMode accessMode @@ -28,6 +29,7 @@ type ( trustee trustee } + //nolint:structcheck,unused // structcheck thinks fields are unused, but the are used to pass data to OS trustee struct { multipleTrustee *trustee multipleTrusteeOperation int32 @@ -45,6 +47,7 @@ const ( desiredAccessReadControl desiredAccess = 0x20000 desiredAccessWriteDac desiredAccess = 0x40000 + //cspell:disable-next-line gvmga = "GrantVmGroupAccess:" inheritModeNoInheritance inheritMode = 0x0 @@ -57,9 +60,9 @@ const ( shareModeRead shareMode = 0x1 shareModeWrite shareMode = 0x2 - sidVmGroup = "S-1-5-83-0" + sidVMGroup = "S-1-5-83-0" - trusteeFormIsSid trusteeForm = 0 + trusteeFormIsSID trusteeForm = 0 trusteeTypeWellKnownGroup trusteeType = 5 ) @@ -68,11 +71,13 @@ const ( // include Grant ACE entries for the VM Group SID. This is a golang re- // implementation of the same function in vmcompute, just not exported in // RS5. Which kind of sucks. Sucks a lot :/ +// +//revive:disable-next-line:var-naming VM, not Vm func GrantVmGroupAccess(name string) error { // Stat (to determine if `name` is a directory). s, err := os.Stat(name) if err != nil { - return errors.Wrapf(err, "%s os.Stat %s", gvmga, name) + return fmt.Errorf("%s os.Stat %s: %w", gvmga, name, err) } // Get a handle to the file/directory. Must defer Close on success. @@ -80,7 +85,7 @@ func GrantVmGroupAccess(name string) error { if err != nil { return err // Already wrapped } - defer syscall.CloseHandle(fd) + defer syscall.CloseHandle(fd) //nolint:errcheck // Get the current DACL and Security Descriptor. Must defer LocalFree on success. ot := objectTypeFileObject @@ -88,9 +93,9 @@ func GrantVmGroupAccess(name string) error { sd := uintptr(0) origDACL := uintptr(0) if err := getSecurityInfo(fd, uint32(ot), uint32(si), nil, nil, &origDACL, nil, &sd); err != nil { - return errors.Wrapf(err, "%s GetSecurityInfo %s", gvmga, name) + return fmt.Errorf("%s GetSecurityInfo %s: %w", gvmga, name, err) } - defer syscall.LocalFree((syscall.Handle)(unsafe.Pointer(sd))) + defer syscall.LocalFree((syscall.Handle)(unsafe.Pointer(sd))) //nolint:errcheck // Generate a new DACL which is the current DACL with the required ACEs added. // Must defer LocalFree on success. @@ -98,11 +103,11 @@ func GrantVmGroupAccess(name string) error { if err != nil { return err // Already wrapped } - defer syscall.LocalFree((syscall.Handle)(unsafe.Pointer(newDACL))) + defer syscall.LocalFree((syscall.Handle)(unsafe.Pointer(newDACL))) //nolint:errcheck // And finally use SetSecurityInfo to apply the updated DACL. if err := setSecurityInfo(fd, uint32(ot), uint32(si), uintptr(0), uintptr(0), newDACL, uintptr(0)); err != nil { - return errors.Wrapf(err, "%s SetSecurityInfo %s", gvmga, name) + return fmt.Errorf("%s SetSecurityInfo %s: %w", gvmga, name, err) } return nil @@ -111,16 +116,19 @@ func GrantVmGroupAccess(name string) error { // createFile is a helper function to call [Nt]CreateFile to get a handle to // the file or directory. func createFile(name string, isDir bool) (syscall.Handle, error) { - namep := syscall.StringToUTF16(name) + namep, err := syscall.UTF16FromString(name) + if err != nil { + return syscall.InvalidHandle, fmt.Errorf("could not convernt name to UTF-16: %w", err) + } da := uint32(desiredAccessReadControl | desiredAccessWriteDac) sm := uint32(shareModeRead | shareModeWrite) fa := uint32(syscall.FILE_ATTRIBUTE_NORMAL) if isDir { - fa = uint32(fa | syscall.FILE_FLAG_BACKUP_SEMANTICS) + fa |= syscall.FILE_FLAG_BACKUP_SEMANTICS } fd, err := syscall.CreateFile(&namep[0], da, sm, nil, syscall.OPEN_EXISTING, fa, 0) if err != nil { - return 0, errors.Wrapf(err, "%s syscall.CreateFile %s", gvmga, name) + return syscall.InvalidHandle, fmt.Errorf("%s syscall.CreateFile %s: %w", gvmga, name, err) } return fd, nil } @@ -129,9 +137,9 @@ func createFile(name string, isDir bool) (syscall.Handle, error) { // The caller is responsible for LocalFree of the returned DACL on success. func generateDACLWithAcesAdded(name string, isDir bool, origDACL uintptr) (uintptr, error) { // Generate pointers to the SIDs based on the string SIDs - sid, err := syscall.StringToSid(sidVmGroup) + sid, err := syscall.StringToSid(sidVMGroup) if err != nil { - return 0, errors.Wrapf(err, "%s syscall.StringToSid %s %s", gvmga, name, sidVmGroup) + return 0, fmt.Errorf("%s syscall.StringToSid %s %s: %w", gvmga, name, sidVMGroup, err) } inheritance := inheritModeNoInheritance @@ -140,12 +148,12 @@ func generateDACLWithAcesAdded(name string, isDir bool, origDACL uintptr) (uintp } eaArray := []explicitAccess{ - explicitAccess{ + { accessPermissions: accessMaskDesiredPermission, accessMode: accessModeGrant, inheritance: inheritance, trustee: trustee{ - trusteeForm: trusteeFormIsSid, + trusteeForm: trusteeFormIsSID, trusteeType: trusteeTypeWellKnownGroup, name: uintptr(unsafe.Pointer(sid)), }, @@ -154,7 +162,7 @@ func generateDACLWithAcesAdded(name string, isDir bool, origDACL uintptr) (uintp modifiedDACL := uintptr(0) if err := setEntriesInAcl(uintptr(uint32(1)), uintptr(unsafe.Pointer(&eaArray[0])), origDACL, &modifiedDACL); err != nil { - return 0, errors.Wrapf(err, "%s SetEntriesInAcl %s", gvmga, name) + return 0, fmt.Errorf("%s SetEntriesInAcl %s: %w", gvmga, name, err) } return modifiedDACL, nil diff --git a/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/pkg/security/syscall_windows.go b/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/pkg/security/syscall_windows.go index c40c2739b7c6..71326e4e46fe 100644 --- a/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/pkg/security/syscall_windows.go +++ b/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/pkg/security/syscall_windows.go @@ -1,7 +1,7 @@ package security -//go:generate go run mksyscall_windows.go -output zsyscall_windows.go syscall_windows.go +//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go syscall_windows.go -//sys getSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, ppsidOwner **uintptr, ppsidGroup **uintptr, ppDacl *uintptr, ppSacl *uintptr, ppSecurityDescriptor *uintptr) (err error) [failretval!=0] = advapi32.GetSecurityInfo -//sys setSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, psidOwner uintptr, psidGroup uintptr, pDacl uintptr, pSacl uintptr) (err error) [failretval!=0] = advapi32.SetSecurityInfo -//sys setEntriesInAcl(count uintptr, pListOfEEs uintptr, oldAcl uintptr, newAcl *uintptr) (err error) [failretval!=0] = advapi32.SetEntriesInAclW +//sys getSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, ppsidOwner **uintptr, ppsidGroup **uintptr, ppDacl *uintptr, ppSacl *uintptr, ppSecurityDescriptor *uintptr) (win32err error) = advapi32.GetSecurityInfo +//sys setSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, psidOwner uintptr, psidGroup uintptr, pDacl uintptr, pSacl uintptr) (win32err error) = advapi32.SetSecurityInfo +//sys setEntriesInAcl(count uintptr, pListOfEEs uintptr, oldAcl uintptr, newAcl *uintptr) (win32err error) = advapi32.SetEntriesInAclW diff --git a/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/pkg/security/zsyscall_windows.go b/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/pkg/security/zsyscall_windows.go index 4a90cb3cc814..26c986b88fea 100644 --- a/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/pkg/security/zsyscall_windows.go +++ b/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/pkg/security/zsyscall_windows.go @@ -1,4 +1,6 @@ -// Code generated by 'go generate'; DO NOT EDIT. +//go:build windows + +// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT. package security @@ -45,26 +47,26 @@ var ( procSetSecurityInfo = modadvapi32.NewProc("SetSecurityInfo") ) -func getSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, ppsidOwner **uintptr, ppsidGroup **uintptr, ppDacl *uintptr, ppSacl *uintptr, ppSecurityDescriptor *uintptr) (err error) { - r1, _, e1 := syscall.Syscall9(procGetSecurityInfo.Addr(), 8, uintptr(handle), uintptr(objectType), uintptr(si), uintptr(unsafe.Pointer(ppsidOwner)), uintptr(unsafe.Pointer(ppsidGroup)), uintptr(unsafe.Pointer(ppDacl)), uintptr(unsafe.Pointer(ppSacl)), uintptr(unsafe.Pointer(ppSecurityDescriptor)), 0) - if r1 != 0 { - err = errnoErr(e1) +func getSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, ppsidOwner **uintptr, ppsidGroup **uintptr, ppDacl *uintptr, ppSacl *uintptr, ppSecurityDescriptor *uintptr) (win32err error) { + r0, _, _ := syscall.Syscall9(procGetSecurityInfo.Addr(), 8, uintptr(handle), uintptr(objectType), uintptr(si), uintptr(unsafe.Pointer(ppsidOwner)), uintptr(unsafe.Pointer(ppsidGroup)), uintptr(unsafe.Pointer(ppDacl)), uintptr(unsafe.Pointer(ppSacl)), uintptr(unsafe.Pointer(ppSecurityDescriptor)), 0) + if r0 != 0 { + win32err = syscall.Errno(r0) } return } -func setEntriesInAcl(count uintptr, pListOfEEs uintptr, oldAcl uintptr, newAcl *uintptr) (err error) { - r1, _, e1 := syscall.Syscall6(procSetEntriesInAclW.Addr(), 4, uintptr(count), uintptr(pListOfEEs), uintptr(oldAcl), uintptr(unsafe.Pointer(newAcl)), 0, 0) - if r1 != 0 { - err = errnoErr(e1) +func setEntriesInAcl(count uintptr, pListOfEEs uintptr, oldAcl uintptr, newAcl *uintptr) (win32err error) { + r0, _, _ := syscall.Syscall6(procSetEntriesInAclW.Addr(), 4, uintptr(count), uintptr(pListOfEEs), uintptr(oldAcl), uintptr(unsafe.Pointer(newAcl)), 0, 0) + if r0 != 0 { + win32err = syscall.Errno(r0) } return } -func setSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, psidOwner uintptr, psidGroup uintptr, pDacl uintptr, pSacl uintptr) (err error) { - r1, _, e1 := syscall.Syscall9(procSetSecurityInfo.Addr(), 7, uintptr(handle), uintptr(objectType), uintptr(si), uintptr(psidOwner), uintptr(psidGroup), uintptr(pDacl), uintptr(pSacl), 0, 0) - if r1 != 0 { - err = errnoErr(e1) +func setSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, psidOwner uintptr, psidGroup uintptr, pDacl uintptr, pSacl uintptr) (win32err error) { + r0, _, _ := syscall.Syscall9(procSetSecurityInfo.Addr(), 7, uintptr(handle), uintptr(objectType), uintptr(si), uintptr(psidOwner), uintptr(psidGroup), uintptr(pDacl), uintptr(pSacl), 0, 0) + if r0 != 0 { + win32err = syscall.Errno(r0) } return } diff --git a/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/privilege.go b/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/privilege.go index 9c83d36fe533..0ff9dac906d3 100644 --- a/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/privilege.go +++ b/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/privilege.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package winio @@ -24,19 +25,15 @@ import ( //sys lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) = advapi32.LookupPrivilegeDisplayNameW const ( - SE_PRIVILEGE_ENABLED = 2 + //revive:disable-next-line:var-naming ALL_CAPS + SE_PRIVILEGE_ENABLED = windows.SE_PRIVILEGE_ENABLED - ERROR_NOT_ALL_ASSIGNED syscall.Errno = 1300 + //revive:disable-next-line:var-naming ALL_CAPS + ERROR_NOT_ALL_ASSIGNED syscall.Errno = windows.ERROR_NOT_ALL_ASSIGNED - SeBackupPrivilege = "SeBackupPrivilege" - SeRestorePrivilege = "SeRestorePrivilege" -) - -const ( - securityAnonymous = iota - securityIdentification - securityImpersonation - securityDelegation + SeBackupPrivilege = "SeBackupPrivilege" + SeRestorePrivilege = "SeRestorePrivilege" + SeSecurityPrivilege = "SeSecurityPrivilege" ) var ( @@ -50,11 +47,9 @@ type PrivilegeError struct { } func (e *PrivilegeError) Error() string { - s := "" + s := "Could not enable privilege " if len(e.privileges) > 1 { s = "Could not enable privileges " - } else { - s = "Could not enable privilege " } for i, p := range e.privileges { if i != 0 { @@ -93,7 +88,7 @@ func RunWithPrivileges(names []string, fn func() error) error { } func mapPrivileges(names []string) ([]uint64, error) { - var privileges []uint64 + privileges := make([]uint64, 0, len(names)) privNameMutex.Lock() defer privNameMutex.Unlock() for _, name := range names { @@ -126,7 +121,7 @@ func enableDisableProcessPrivilege(names []string, action uint32) error { return err } - p, _ := windows.GetCurrentProcess() + p := windows.CurrentProcess() var token windows.Token err = windows.OpenProcessToken(p, windows.TOKEN_ADJUST_PRIVILEGES|windows.TOKEN_QUERY, &token) if err != nil { @@ -139,10 +134,10 @@ func enableDisableProcessPrivilege(names []string, action uint32) error { func adjustPrivileges(token windows.Token, privileges []uint64, action uint32) error { var b bytes.Buffer - binary.Write(&b, binary.LittleEndian, uint32(len(privileges))) + _ = binary.Write(&b, binary.LittleEndian, uint32(len(privileges))) for _, p := range privileges { - binary.Write(&b, binary.LittleEndian, p) - binary.Write(&b, binary.LittleEndian, action) + _ = binary.Write(&b, binary.LittleEndian, p) + _ = binary.Write(&b, binary.LittleEndian, action) } prevState := make([]byte, b.Len()) reqSize := uint32(0) @@ -150,7 +145,7 @@ func adjustPrivileges(token windows.Token, privileges []uint64, action uint32) e if !success { return err } - if err == ERROR_NOT_ALL_ASSIGNED { + if err == ERROR_NOT_ALL_ASSIGNED { //nolint:errorlint // err is Errno return &PrivilegeError{privileges} } return nil @@ -176,7 +171,7 @@ func getPrivilegeName(luid uint64) string { } func newThreadToken() (windows.Token, error) { - err := impersonateSelf(securityImpersonation) + err := impersonateSelf(windows.SecurityImpersonation) if err != nil { return 0, err } diff --git a/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/reparse.go b/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/reparse.go index fc1ee4d3a3e9..67d1a104a63f 100644 --- a/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/reparse.go +++ b/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/reparse.go @@ -1,3 +1,6 @@ +//go:build windows +// +build windows + package winio import ( @@ -113,16 +116,16 @@ func EncodeReparsePoint(rp *ReparsePoint) []byte { } var b bytes.Buffer - binary.Write(&b, binary.LittleEndian, &data) + _ = binary.Write(&b, binary.LittleEndian, &data) if !rp.IsMountPoint { flags := uint32(0) if relative { flags |= 1 } - binary.Write(&b, binary.LittleEndian, flags) + _ = binary.Write(&b, binary.LittleEndian, flags) } - binary.Write(&b, binary.LittleEndian, ntTarget16) - binary.Write(&b, binary.LittleEndian, target16) + _ = binary.Write(&b, binary.LittleEndian, ntTarget16) + _ = binary.Write(&b, binary.LittleEndian, target16) return b.Bytes() } diff --git a/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/sd.go b/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/sd.go index db1b370a1b57..5550ef6b61ef 100644 --- a/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/sd.go +++ b/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/sd.go @@ -1,23 +1,25 @@ +//go:build windows // +build windows package winio import ( + "errors" "syscall" "unsafe" + + "golang.org/x/sys/windows" ) //sys lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) = advapi32.LookupAccountNameW +//sys lookupAccountSid(systemName *uint16, sid *byte, name *uint16, nameSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) = advapi32.LookupAccountSidW //sys convertSidToStringSid(sid *byte, str **uint16) (err error) = advapi32.ConvertSidToStringSidW +//sys convertStringSidToSid(str *uint16, sid **byte) (err error) = advapi32.ConvertStringSidToSidW //sys convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) = advapi32.ConvertStringSecurityDescriptorToSecurityDescriptorW //sys convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) = advapi32.ConvertSecurityDescriptorToStringSecurityDescriptorW //sys localFree(mem uintptr) = LocalFree //sys getSecurityDescriptorLength(sd uintptr) (len uint32) = advapi32.GetSecurityDescriptorLength -const ( - cERROR_NONE_MAPPED = syscall.Errno(1332) -) - type AccountLookupError struct { Name string Err error @@ -28,8 +30,10 @@ func (e *AccountLookupError) Error() string { return "lookup account: empty account name specified" } var s string - switch e.Err { - case cERROR_NONE_MAPPED: + switch { + case errors.Is(e.Err, windows.ERROR_INVALID_SID): + s = "the security ID structure is invalid" + case errors.Is(e.Err, windows.ERROR_NONE_MAPPED): s = "not found" default: s = e.Err.Error() @@ -37,6 +41,8 @@ func (e *AccountLookupError) Error() string { return "lookup account " + e.Name + ": " + s } +func (e *AccountLookupError) Unwrap() error { return e.Err } + type SddlConversionError struct { Sddl string Err error @@ -46,15 +52,19 @@ func (e *SddlConversionError) Error() string { return "convert " + e.Sddl + ": " + e.Err.Error() } +func (e *SddlConversionError) Unwrap() error { return e.Err } + // LookupSidByName looks up the SID of an account by name +// +//revive:disable-next-line:var-naming SID, not Sid func LookupSidByName(name string) (sid string, err error) { if name == "" { - return "", &AccountLookupError{name, cERROR_NONE_MAPPED} + return "", &AccountLookupError{name, windows.ERROR_NONE_MAPPED} } var sidSize, sidNameUse, refDomainSize uint32 err = lookupAccountName(nil, name, nil, &sidSize, nil, &refDomainSize, &sidNameUse) - if err != nil && err != syscall.ERROR_INSUFFICIENT_BUFFER { + if err != nil && err != syscall.ERROR_INSUFFICIENT_BUFFER { //nolint:errorlint // err is Errno return "", &AccountLookupError{name, err} } sidBuffer := make([]byte, sidSize) @@ -73,6 +83,42 @@ func LookupSidByName(name string) (sid string, err error) { return sid, nil } +// LookupNameBySid looks up the name of an account by SID +// +//revive:disable-next-line:var-naming SID, not Sid +func LookupNameBySid(sid string) (name string, err error) { + if sid == "" { + return "", &AccountLookupError{sid, windows.ERROR_NONE_MAPPED} + } + + sidBuffer, err := windows.UTF16PtrFromString(sid) + if err != nil { + return "", &AccountLookupError{sid, err} + } + + var sidPtr *byte + if err = convertStringSidToSid(sidBuffer, &sidPtr); err != nil { + return "", &AccountLookupError{sid, err} + } + defer localFree(uintptr(unsafe.Pointer(sidPtr))) + + var nameSize, refDomainSize, sidNameUse uint32 + err = lookupAccountSid(nil, sidPtr, nil, &nameSize, nil, &refDomainSize, &sidNameUse) + if err != nil && err != windows.ERROR_INSUFFICIENT_BUFFER { //nolint:errorlint // err is Errno + return "", &AccountLookupError{sid, err} + } + + nameBuffer := make([]uint16, nameSize) + refDomainBuffer := make([]uint16, refDomainSize) + err = lookupAccountSid(nil, sidPtr, &nameBuffer[0], &nameSize, &refDomainBuffer[0], &refDomainSize, &sidNameUse) + if err != nil { + return "", &AccountLookupError{sid, err} + } + + name = windows.UTF16ToString(nameBuffer) + return name, nil +} + func SddlToSecurityDescriptor(sddl string) ([]byte, error) { var sdBuffer uintptr err := convertStringSecurityDescriptorToSecurityDescriptor(sddl, 1, &sdBuffer, nil) @@ -87,7 +133,7 @@ func SddlToSecurityDescriptor(sddl string) ([]byte, error) { func SecurityDescriptorToSddl(sd []byte) (string, error) { var sddl *uint16 - // The returned string length seems to including an aribtrary number of terminating NULs. + // The returned string length seems to include an arbitrary number of terminating NULs. // Don't use it. err := convertSecurityDescriptorToStringSecurityDescriptor(&sd[0], 1, 0xff, &sddl, nil) if err != nil { diff --git a/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/syscall.go b/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/syscall.go index 5955c99fdeaf..a6ca111b39c4 100644 --- a/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/syscall.go +++ b/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/syscall.go @@ -1,3 +1,5 @@ +//go:build windows + package winio -//go:generate go run golang.org/x/sys/windows/mkwinsyscall -output zsyscall_windows.go file.go pipe.go sd.go fileinfo.go privilege.go backup.go hvsock.go +//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go ./*.go diff --git a/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/tools.go b/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/tools.go new file mode 100644 index 000000000000..2aa045843ea8 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/tools.go @@ -0,0 +1,5 @@ +//go:build tools + +package winio + +import _ "golang.org/x/tools/cmd/stringer" diff --git a/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/vhd/vhd.go b/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/vhd/vhd.go index b03b789e6579..b54cad112703 100644 --- a/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/vhd/vhd.go +++ b/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/vhd/vhd.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package vhd @@ -7,17 +8,16 @@ import ( "syscall" "github.com/Microsoft/go-winio/pkg/guid" - "github.com/pkg/errors" "golang.org/x/sys/windows" ) -//go:generate go run mksyscall_windows.go -output zvhd_windows.go vhd.go +//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zvhd_windows.go vhd.go -//sys createVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (err error) [failretval != 0] = virtdisk.CreateVirtualDisk -//sys openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *OpenVirtualDiskParameters, handle *syscall.Handle) (err error) [failretval != 0] = virtdisk.OpenVirtualDisk -//sys attachVirtualDisk(handle syscall.Handle, securityDescriptor *uintptr, attachVirtualDiskFlag uint32, providerSpecificFlags uint32, parameters *AttachVirtualDiskParameters, overlapped *syscall.Overlapped) (err error) [failretval != 0] = virtdisk.AttachVirtualDisk -//sys detachVirtualDisk(handle syscall.Handle, detachVirtualDiskFlags uint32, providerSpecificFlags uint32) (err error) [failretval != 0] = virtdisk.DetachVirtualDisk -//sys getVirtualDiskPhysicalPath(handle syscall.Handle, diskPathSizeInBytes *uint32, buffer *uint16) (err error) [failretval != 0] = virtdisk.GetVirtualDiskPhysicalPath +//sys createVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (win32err error) = virtdisk.CreateVirtualDisk +//sys openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (win32err error) = virtdisk.OpenVirtualDisk +//sys attachVirtualDisk(handle syscall.Handle, securityDescriptor *uintptr, attachVirtualDiskFlag uint32, providerSpecificFlags uint32, parameters *AttachVirtualDiskParameters, overlapped *syscall.Overlapped) (win32err error) = virtdisk.AttachVirtualDisk +//sys detachVirtualDisk(handle syscall.Handle, detachVirtualDiskFlags uint32, providerSpecificFlags uint32) (win32err error) = virtdisk.DetachVirtualDisk +//sys getVirtualDiskPhysicalPath(handle syscall.Handle, diskPathSizeInBytes *uint32, buffer *uint16) (win32err error) = virtdisk.GetVirtualDiskPhysicalPath type ( CreateVirtualDiskFlag uint32 @@ -62,20 +62,35 @@ type OpenVirtualDiskParameters struct { Version2 OpenVersion2 } +// The higher level `OpenVersion2` struct uses `bool`s to refer to `GetInfoOnly` and `ReadOnly` for ease of use. However, +// the internal windows structure uses `BOOL`s aka int32s for these types. `openVersion2` is used for translating +// `OpenVersion2` fields to the correct windows internal field types on the `Open____` methods. +type openVersion2 struct { + getInfoOnly int32 + readOnly int32 + resiliencyGUID guid.GUID +} + +type openVirtualDiskParameters struct { + version uint32 + version2 openVersion2 +} + type AttachVersion2 struct { RestrictedOffset uint64 RestrictedLength uint64 } type AttachVirtualDiskParameters struct { - Version uint32 // Must always be set to 2 + Version uint32 Version2 AttachVersion2 } const ( + //revive:disable-next-line:var-naming ALL_CAPS VIRTUAL_STORAGE_TYPE_DEVICE_VHDX = 0x3 - // Access Mask for opening a VHD + // Access Mask for opening a VHD. VirtualDiskAccessNone VirtualDiskAccessMask = 0x00000000 VirtualDiskAccessAttachRO VirtualDiskAccessMask = 0x00010000 VirtualDiskAccessAttachRW VirtualDiskAccessMask = 0x00020000 @@ -87,7 +102,7 @@ const ( VirtualDiskAccessAll VirtualDiskAccessMask = 0x003f0000 VirtualDiskAccessWritable VirtualDiskAccessMask = 0x00320000 - // Flags for creating a VHD + // Flags for creating a VHD. CreateVirtualDiskFlagNone CreateVirtualDiskFlag = 0x0 CreateVirtualDiskFlagFullPhysicalAllocation CreateVirtualDiskFlag = 0x1 CreateVirtualDiskFlagPreventWritesToSourceDisk CreateVirtualDiskFlag = 0x2 @@ -95,12 +110,12 @@ const ( CreateVirtualDiskFlagCreateBackingStorage CreateVirtualDiskFlag = 0x8 CreateVirtualDiskFlagUseChangeTrackingSourceLimit CreateVirtualDiskFlag = 0x10 CreateVirtualDiskFlagPreserveParentChangeTrackingState CreateVirtualDiskFlag = 0x20 - CreateVirtualDiskFlagVhdSetUseOriginalBackingStorage CreateVirtualDiskFlag = 0x40 + CreateVirtualDiskFlagVhdSetUseOriginalBackingStorage CreateVirtualDiskFlag = 0x40 //revive:disable-line:var-naming VHD, not Vhd CreateVirtualDiskFlagSparseFile CreateVirtualDiskFlag = 0x80 - CreateVirtualDiskFlagPmemCompatible CreateVirtualDiskFlag = 0x100 + CreateVirtualDiskFlagPmemCompatible CreateVirtualDiskFlag = 0x100 //revive:disable-line:var-naming PMEM, not Pmem CreateVirtualDiskFlagSupportCompressedVolumes CreateVirtualDiskFlag = 0x200 - // Flags for opening a VHD + // Flags for opening a VHD. OpenVirtualDiskFlagNone VirtualDiskFlag = 0x00000000 OpenVirtualDiskFlagNoParents VirtualDiskFlag = 0x00000001 OpenVirtualDiskFlagBlankFile VirtualDiskFlag = 0x00000002 @@ -113,7 +128,7 @@ const ( OpenVirtualDiskFlagNoWriteHardening VirtualDiskFlag = 0x00000100 OpenVirtualDiskFlagSupportCompressedVolumes VirtualDiskFlag = 0x00000200 - // Flags for attaching a VHD + // Flags for attaching a VHD. AttachVirtualDiskFlagNone AttachVirtualDiskFlag = 0x00000000 AttachVirtualDiskFlagReadOnly AttachVirtualDiskFlag = 0x00000001 AttachVirtualDiskFlagNoDriveLetter AttachVirtualDiskFlag = 0x00000002 @@ -126,12 +141,14 @@ const ( AttachVirtualDiskFlagSinglePartition AttachVirtualDiskFlag = 0x00000100 AttachVirtualDiskFlagRegisterVolume AttachVirtualDiskFlag = 0x00000200 - // Flags for detaching a VHD + // Flags for detaching a VHD. DetachVirtualDiskFlagNone DetachVirtualDiskFlag = 0x0 ) // CreateVhdx is a helper function to create a simple vhdx file at the given path using // default values. +// +//revive:disable-next-line:var-naming VHDX, not Vhdx func CreateVhdx(path string, maxSizeInGb, blockSizeInMb uint32) error { params := CreateVirtualDiskParameters{ Version: 2, @@ -146,21 +163,20 @@ func CreateVhdx(path string, maxSizeInGb, blockSizeInMb uint32) error { return err } - if err := syscall.CloseHandle(handle); err != nil { - return err - } - return nil + return syscall.CloseHandle(handle) } // DetachVirtualDisk detaches a virtual hard disk by handle. func DetachVirtualDisk(handle syscall.Handle) (err error) { if err := detachVirtualDisk(handle, 0, 0); err != nil { - return errors.Wrap(err, "failed to detach virtual disk") + return fmt.Errorf("failed to detach virtual disk: %w", err) } return nil } // DetachVhd detaches a vhd found at `path`. +// +//revive:disable-next-line:var-naming VHD, not Vhd func DetachVhd(path string) error { handle, err := OpenVirtualDisk( path, @@ -170,12 +186,16 @@ func DetachVhd(path string) error { if err != nil { return err } - defer syscall.CloseHandle(handle) + defer syscall.CloseHandle(handle) //nolint:errcheck return DetachVirtualDisk(handle) } // AttachVirtualDisk attaches a virtual hard disk for use. -func AttachVirtualDisk(handle syscall.Handle, attachVirtualDiskFlag AttachVirtualDiskFlag, parameters *AttachVirtualDiskParameters) (err error) { +func AttachVirtualDisk( + handle syscall.Handle, + attachVirtualDiskFlag AttachVirtualDiskFlag, + parameters *AttachVirtualDiskParameters, +) (err error) { // Supports both version 1 and 2 of the attach parameters as version 2 wasn't present in RS5. if err := attachVirtualDisk( handle, @@ -185,13 +205,15 @@ func AttachVirtualDisk(handle syscall.Handle, attachVirtualDiskFlag AttachVirtua parameters, nil, ); err != nil { - return errors.Wrap(err, "failed to attach virtual disk") + return fmt.Errorf("failed to attach virtual disk: %w", err) } return nil } // AttachVhd attaches a virtual hard disk at `path` for use. Attaches using version 2 // of the ATTACH_VIRTUAL_DISK_PARAMETERS. +// +//revive:disable-next-line:var-naming VHD, not Vhd func AttachVhd(path string) (err error) { handle, err := OpenVirtualDisk( path, @@ -202,20 +224,24 @@ func AttachVhd(path string) (err error) { return err } - defer syscall.CloseHandle(handle) + defer syscall.CloseHandle(handle) //nolint:errcheck params := AttachVirtualDiskParameters{Version: 2} if err := AttachVirtualDisk( handle, AttachVirtualDiskFlagNone, ¶ms, ); err != nil { - return errors.Wrap(err, "failed to attach virtual disk") + return fmt.Errorf("failed to attach virtual disk: %w", err) } return nil } // OpenVirtualDisk obtains a handle to a VHD opened with supplied access mask and flags. -func OpenVirtualDisk(vhdPath string, virtualDiskAccessMask VirtualDiskAccessMask, openVirtualDiskFlags VirtualDiskFlag) (syscall.Handle, error) { +func OpenVirtualDisk( + vhdPath string, + virtualDiskAccessMask VirtualDiskAccessMask, + openVirtualDiskFlags VirtualDiskFlag, +) (syscall.Handle, error) { parameters := OpenVirtualDiskParameters{Version: 2} handle, err := OpenVirtualDiskWithParameters( vhdPath, @@ -230,29 +256,55 @@ func OpenVirtualDisk(vhdPath string, virtualDiskAccessMask VirtualDiskAccessMask } // OpenVirtualDiskWithParameters obtains a handle to a VHD opened with supplied access mask, flags and parameters. -func OpenVirtualDiskWithParameters(vhdPath string, virtualDiskAccessMask VirtualDiskAccessMask, openVirtualDiskFlags VirtualDiskFlag, parameters *OpenVirtualDiskParameters) (syscall.Handle, error) { +func OpenVirtualDiskWithParameters( + vhdPath string, + virtualDiskAccessMask VirtualDiskAccessMask, + openVirtualDiskFlags VirtualDiskFlag, + parameters *OpenVirtualDiskParameters, +) (syscall.Handle, error) { var ( handle syscall.Handle defaultType VirtualStorageType + getInfoOnly int32 + readOnly int32 ) if parameters.Version != 2 { return handle, fmt.Errorf("only version 2 VHDs are supported, found version: %d", parameters.Version) } + if parameters.Version2.GetInfoOnly { + getInfoOnly = 1 + } + if parameters.Version2.ReadOnly { + readOnly = 1 + } + params := &openVirtualDiskParameters{ + version: parameters.Version, + version2: openVersion2{ + getInfoOnly, + readOnly, + parameters.Version2.ResiliencyGUID, + }, + } if err := openVirtualDisk( &defaultType, vhdPath, uint32(virtualDiskAccessMask), uint32(openVirtualDiskFlags), - parameters, + params, &handle, ); err != nil { - return 0, errors.Wrap(err, "failed to open virtual disk") + return 0, fmt.Errorf("failed to open virtual disk: %w", err) } return handle, nil } // CreateVirtualDisk creates a virtual harddisk and returns a handle to the disk. -func CreateVirtualDisk(path string, virtualDiskAccessMask VirtualDiskAccessMask, createVirtualDiskFlags CreateVirtualDiskFlag, parameters *CreateVirtualDiskParameters) (syscall.Handle, error) { +func CreateVirtualDisk( + path string, + virtualDiskAccessMask VirtualDiskAccessMask, + createVirtualDiskFlags CreateVirtualDiskFlag, + parameters *CreateVirtualDiskParameters, +) (syscall.Handle, error) { var ( handle syscall.Handle defaultType VirtualStorageType @@ -272,7 +324,7 @@ func CreateVirtualDisk(path string, virtualDiskAccessMask VirtualDiskAccessMask, nil, &handle, ); err != nil { - return handle, errors.Wrap(err, "failed to create virtual disk") + return handle, fmt.Errorf("failed to create virtual disk: %w", err) } return handle, nil } @@ -290,12 +342,14 @@ func GetVirtualDiskPhysicalPath(handle syscall.Handle) (_ string, err error) { &diskPathSizeInBytes, &diskPhysicalPathBuf[0], ); err != nil { - return "", errors.Wrap(err, "failed to get disk physical path") + return "", fmt.Errorf("failed to get disk physical path: %w", err) } return windows.UTF16ToString(diskPhysicalPathBuf[:]), nil } // CreateDiffVhd is a helper function to create a differencing virtual disk. +// +//revive:disable-next-line:var-naming VHD, not Vhd func CreateDiffVhd(diffVhdPath, baseVhdPath string, blockSizeInMB uint32) error { // Setting `ParentPath` is how to signal to create a differencing disk. createParams := &CreateVirtualDiskParameters{ @@ -314,10 +368,10 @@ func CreateDiffVhd(diffVhdPath, baseVhdPath string, blockSizeInMB uint32) error createParams, ) if err != nil { - return fmt.Errorf("failed to create differencing vhd: %s", err) + return fmt.Errorf("failed to create differencing vhd: %w", err) } if err := syscall.CloseHandle(vhdHandle); err != nil { - return fmt.Errorf("failed to close differencing vhd handle: %s", err) + return fmt.Errorf("failed to close differencing vhd handle: %w", err) } return nil } diff --git a/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go b/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go index 572f7b42f105..d0e917d2be37 100644 --- a/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go +++ b/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go @@ -1,4 +1,6 @@ -// Code generated by 'go generate'; DO NOT EDIT. +//go:build windows + +// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT. package vhd @@ -47,60 +49,60 @@ var ( procOpenVirtualDisk = modvirtdisk.NewProc("OpenVirtualDisk") ) -func attachVirtualDisk(handle syscall.Handle, securityDescriptor *uintptr, attachVirtualDiskFlag uint32, providerSpecificFlags uint32, parameters *AttachVirtualDiskParameters, overlapped *syscall.Overlapped) (err error) { - r1, _, e1 := syscall.Syscall6(procAttachVirtualDisk.Addr(), 6, uintptr(handle), uintptr(unsafe.Pointer(securityDescriptor)), uintptr(attachVirtualDiskFlag), uintptr(providerSpecificFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(overlapped))) - if r1 != 0 { - err = errnoErr(e1) +func attachVirtualDisk(handle syscall.Handle, securityDescriptor *uintptr, attachVirtualDiskFlag uint32, providerSpecificFlags uint32, parameters *AttachVirtualDiskParameters, overlapped *syscall.Overlapped) (win32err error) { + r0, _, _ := syscall.Syscall6(procAttachVirtualDisk.Addr(), 6, uintptr(handle), uintptr(unsafe.Pointer(securityDescriptor)), uintptr(attachVirtualDiskFlag), uintptr(providerSpecificFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(overlapped))) + if r0 != 0 { + win32err = syscall.Errno(r0) } return } -func createVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (err error) { +func createVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (win32err error) { var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(path) - if err != nil { + _p0, win32err = syscall.UTF16PtrFromString(path) + if win32err != nil { return } return _createVirtualDisk(virtualStorageType, _p0, virtualDiskAccessMask, securityDescriptor, createVirtualDiskFlags, providerSpecificFlags, parameters, overlapped, handle) } -func _createVirtualDisk(virtualStorageType *VirtualStorageType, path *uint16, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (err error) { - r1, _, e1 := syscall.Syscall9(procCreateVirtualDisk.Addr(), 9, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(unsafe.Pointer(securityDescriptor)), uintptr(createVirtualDiskFlags), uintptr(providerSpecificFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(handle))) - if r1 != 0 { - err = errnoErr(e1) +func _createVirtualDisk(virtualStorageType *VirtualStorageType, path *uint16, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (win32err error) { + r0, _, _ := syscall.Syscall9(procCreateVirtualDisk.Addr(), 9, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(unsafe.Pointer(securityDescriptor)), uintptr(createVirtualDiskFlags), uintptr(providerSpecificFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(handle))) + if r0 != 0 { + win32err = syscall.Errno(r0) } return } -func detachVirtualDisk(handle syscall.Handle, detachVirtualDiskFlags uint32, providerSpecificFlags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procDetachVirtualDisk.Addr(), 3, uintptr(handle), uintptr(detachVirtualDiskFlags), uintptr(providerSpecificFlags)) - if r1 != 0 { - err = errnoErr(e1) +func detachVirtualDisk(handle syscall.Handle, detachVirtualDiskFlags uint32, providerSpecificFlags uint32) (win32err error) { + r0, _, _ := syscall.Syscall(procDetachVirtualDisk.Addr(), 3, uintptr(handle), uintptr(detachVirtualDiskFlags), uintptr(providerSpecificFlags)) + if r0 != 0 { + win32err = syscall.Errno(r0) } return } -func getVirtualDiskPhysicalPath(handle syscall.Handle, diskPathSizeInBytes *uint32, buffer *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procGetVirtualDiskPhysicalPath.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(diskPathSizeInBytes)), uintptr(unsafe.Pointer(buffer))) - if r1 != 0 { - err = errnoErr(e1) +func getVirtualDiskPhysicalPath(handle syscall.Handle, diskPathSizeInBytes *uint32, buffer *uint16) (win32err error) { + r0, _, _ := syscall.Syscall(procGetVirtualDiskPhysicalPath.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(diskPathSizeInBytes)), uintptr(unsafe.Pointer(buffer))) + if r0 != 0 { + win32err = syscall.Errno(r0) } return } -func openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *OpenVirtualDiskParameters, handle *syscall.Handle) (err error) { +func openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (win32err error) { var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(path) - if err != nil { + _p0, win32err = syscall.UTF16PtrFromString(path) + if win32err != nil { return } return _openVirtualDisk(virtualStorageType, _p0, virtualDiskAccessMask, openVirtualDiskFlags, parameters, handle) } -func _openVirtualDisk(virtualStorageType *VirtualStorageType, path *uint16, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *OpenVirtualDiskParameters, handle *syscall.Handle) (err error) { - r1, _, e1 := syscall.Syscall6(procOpenVirtualDisk.Addr(), 6, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(openVirtualDiskFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(handle))) - if r1 != 0 { - err = errnoErr(e1) +func _openVirtualDisk(virtualStorageType *VirtualStorageType, path *uint16, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (win32err error) { + r0, _, _ := syscall.Syscall6(procOpenVirtualDisk.Addr(), 6, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(openVirtualDiskFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(handle))) + if r0 != 0 { + win32err = syscall.Errno(r0) } return } diff --git a/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go b/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go index 176ff75e320c..83f45a1351ba 100644 --- a/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go +++ b/cluster-autoscaler/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go @@ -1,4 +1,6 @@ -// Code generated by 'go generate'; DO NOT EDIT. +//go:build windows + +// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT. package winio @@ -47,9 +49,11 @@ var ( procConvertSecurityDescriptorToStringSecurityDescriptorW = modadvapi32.NewProc("ConvertSecurityDescriptorToStringSecurityDescriptorW") procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW") procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW") + procConvertStringSidToSidW = modadvapi32.NewProc("ConvertStringSidToSidW") procGetSecurityDescriptorLength = modadvapi32.NewProc("GetSecurityDescriptorLength") procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf") procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW") + procLookupAccountSidW = modadvapi32.NewProc("LookupAccountSidW") procLookupPrivilegeDisplayNameW = modadvapi32.NewProc("LookupPrivilegeDisplayNameW") procLookupPrivilegeNameW = modadvapi32.NewProc("LookupPrivilegeNameW") procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW") @@ -74,7 +78,6 @@ var ( procRtlDosPathNameToNtPathName_U = modntdll.NewProc("RtlDosPathNameToNtPathName_U") procRtlNtStatusToDosErrorNoTeb = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb") procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult") - procbind = modws2_32.NewProc("bind") ) func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) { @@ -123,6 +126,14 @@ func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision return } +func convertStringSidToSid(str *uint16, sid **byte) (err error) { + r1, _, e1 := syscall.Syscall(procConvertStringSidToSidW.Addr(), 2, uintptr(unsafe.Pointer(str)), uintptr(unsafe.Pointer(sid)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func getSecurityDescriptorLength(sd uintptr) (len uint32) { r0, _, _ := syscall.Syscall(procGetSecurityDescriptorLength.Addr(), 1, uintptr(sd), 0, 0) len = uint32(r0) @@ -154,6 +165,14 @@ func _lookupAccountName(systemName *uint16, accountName *uint16, sid *byte, sidS return } +func lookupAccountSid(systemName *uint16, sid *byte, name *uint16, nameSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procLookupAccountSidW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) { var _p0 *uint16 _p0, err = syscall.UTF16PtrFromString(systemName) @@ -380,25 +399,25 @@ func setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err erro return } -func ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntstatus) { +func ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntStatus) { r0, _, _ := syscall.Syscall15(procNtCreateNamedPipeFile.Addr(), 14, uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout)), 0) - status = ntstatus(r0) + status = ntStatus(r0) return } -func rtlDefaultNpAcl(dacl *uintptr) (status ntstatus) { +func rtlDefaultNpAcl(dacl *uintptr) (status ntStatus) { r0, _, _ := syscall.Syscall(procRtlDefaultNpAcl.Addr(), 1, uintptr(unsafe.Pointer(dacl)), 0, 0) - status = ntstatus(r0) + status = ntStatus(r0) return } -func rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntstatus) { +func rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntStatus) { r0, _, _ := syscall.Syscall6(procRtlDosPathNameToNtPathName_U.Addr(), 4, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(ntName)), uintptr(filePart), uintptr(reserved), 0, 0) - status = ntstatus(r0) + status = ntStatus(r0) return } -func rtlNtStatusToDosError(status ntstatus) (winerr error) { +func rtlNtStatusToDosError(status ntStatus) (winerr error) { r0, _, _ := syscall.Syscall(procRtlNtStatusToDosErrorNoTeb.Addr(), 1, uintptr(status), 0, 0) if r0 != 0 { winerr = syscall.Errno(r0) @@ -417,11 +436,3 @@ func wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint } return } - -func bind(s syscall.Handle, name unsafe.Pointer, namelen int32) (err error) { - r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen)) - if r1 == socketError { - err = errnoErr(e1) - } - return -} diff --git a/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/hcn/hcn.go b/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/hcn/hcn.go deleted file mode 100644 index eefd88d8562e..000000000000 --- a/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/hcn/hcn.go +++ /dev/null @@ -1,304 +0,0 @@ -// Package hcn is a shim for the Host Compute Networking (HCN) service, which manages networking for Windows Server -// containers and Hyper-V containers. Previous to RS5, HCN was referred to as Host Networking Service (HNS). -package hcn - -import ( - "encoding/json" - "fmt" - "syscall" - - "github.com/Microsoft/go-winio/pkg/guid" -) - -//go:generate go run ../mksyscall_windows.go -output zsyscall_windows.go hcn.go - -/// HNS V1 API - -//sys SetCurrentThreadCompartmentId(compartmentId uint32) (hr error) = iphlpapi.SetCurrentThreadCompartmentId -//sys _hnsCall(method string, path string, object string, response **uint16) (hr error) = vmcompute.HNSCall? - -/// HCN V2 API - -// Network -//sys hcnEnumerateNetworks(query string, networks **uint16, result **uint16) (hr error) = computenetwork.HcnEnumerateNetworks? -//sys hcnCreateNetwork(id *_guid, settings string, network *hcnNetwork, result **uint16) (hr error) = computenetwork.HcnCreateNetwork? -//sys hcnOpenNetwork(id *_guid, network *hcnNetwork, result **uint16) (hr error) = computenetwork.HcnOpenNetwork? -//sys hcnModifyNetwork(network hcnNetwork, settings string, result **uint16) (hr error) = computenetwork.HcnModifyNetwork? -//sys hcnQueryNetworkProperties(network hcnNetwork, query string, properties **uint16, result **uint16) (hr error) = computenetwork.HcnQueryNetworkProperties? -//sys hcnDeleteNetwork(id *_guid, result **uint16) (hr error) = computenetwork.HcnDeleteNetwork? -//sys hcnCloseNetwork(network hcnNetwork) (hr error) = computenetwork.HcnCloseNetwork? - -// Endpoint -//sys hcnEnumerateEndpoints(query string, endpoints **uint16, result **uint16) (hr error) = computenetwork.HcnEnumerateEndpoints? -//sys hcnCreateEndpoint(network hcnNetwork, id *_guid, settings string, endpoint *hcnEndpoint, result **uint16) (hr error) = computenetwork.HcnCreateEndpoint? -//sys hcnOpenEndpoint(id *_guid, endpoint *hcnEndpoint, result **uint16) (hr error) = computenetwork.HcnOpenEndpoint? -//sys hcnModifyEndpoint(endpoint hcnEndpoint, settings string, result **uint16) (hr error) = computenetwork.HcnModifyEndpoint? -//sys hcnQueryEndpointProperties(endpoint hcnEndpoint, query string, properties **uint16, result **uint16) (hr error) = computenetwork.HcnQueryEndpointProperties? -//sys hcnDeleteEndpoint(id *_guid, result **uint16) (hr error) = computenetwork.HcnDeleteEndpoint? -//sys hcnCloseEndpoint(endpoint hcnEndpoint) (hr error) = computenetwork.HcnCloseEndpoint? - -// Namespace -//sys hcnEnumerateNamespaces(query string, namespaces **uint16, result **uint16) (hr error) = computenetwork.HcnEnumerateNamespaces? -//sys hcnCreateNamespace(id *_guid, settings string, namespace *hcnNamespace, result **uint16) (hr error) = computenetwork.HcnCreateNamespace? -//sys hcnOpenNamespace(id *_guid, namespace *hcnNamespace, result **uint16) (hr error) = computenetwork.HcnOpenNamespace? -//sys hcnModifyNamespace(namespace hcnNamespace, settings string, result **uint16) (hr error) = computenetwork.HcnModifyNamespace? -//sys hcnQueryNamespaceProperties(namespace hcnNamespace, query string, properties **uint16, result **uint16) (hr error) = computenetwork.HcnQueryNamespaceProperties? -//sys hcnDeleteNamespace(id *_guid, result **uint16) (hr error) = computenetwork.HcnDeleteNamespace? -//sys hcnCloseNamespace(namespace hcnNamespace) (hr error) = computenetwork.HcnCloseNamespace? - -// LoadBalancer -//sys hcnEnumerateLoadBalancers(query string, loadBalancers **uint16, result **uint16) (hr error) = computenetwork.HcnEnumerateLoadBalancers? -//sys hcnCreateLoadBalancer(id *_guid, settings string, loadBalancer *hcnLoadBalancer, result **uint16) (hr error) = computenetwork.HcnCreateLoadBalancer? -//sys hcnOpenLoadBalancer(id *_guid, loadBalancer *hcnLoadBalancer, result **uint16) (hr error) = computenetwork.HcnOpenLoadBalancer? -//sys hcnModifyLoadBalancer(loadBalancer hcnLoadBalancer, settings string, result **uint16) (hr error) = computenetwork.HcnModifyLoadBalancer? -//sys hcnQueryLoadBalancerProperties(loadBalancer hcnLoadBalancer, query string, properties **uint16, result **uint16) (hr error) = computenetwork.HcnQueryLoadBalancerProperties? -//sys hcnDeleteLoadBalancer(id *_guid, result **uint16) (hr error) = computenetwork.HcnDeleteLoadBalancer? -//sys hcnCloseLoadBalancer(loadBalancer hcnLoadBalancer) (hr error) = computenetwork.HcnCloseLoadBalancer? - -// SDN Routes -//sys hcnEnumerateRoutes(query string, routes **uint16, result **uint16) (hr error) = computenetwork.HcnEnumerateSdnRoutes? -//sys hcnCreateRoute(id *_guid, settings string, route *hcnRoute, result **uint16) (hr error) = computenetwork.HcnCreateSdnRoute? -//sys hcnOpenRoute(id *_guid, route *hcnRoute, result **uint16) (hr error) = computenetwork.HcnOpenSdnRoute? -//sys hcnModifyRoute(route hcnRoute, settings string, result **uint16) (hr error) = computenetwork.HcnModifySdnRoute? -//sys hcnQueryRouteProperties(route hcnRoute, query string, properties **uint16, result **uint16) (hr error) = computenetwork.HcnQuerySdnRouteProperties? -//sys hcnDeleteRoute(id *_guid, result **uint16) (hr error) = computenetwork.HcnDeleteSdnRoute? -//sys hcnCloseRoute(route hcnRoute) (hr error) = computenetwork.HcnCloseSdnRoute? - -type _guid = guid.GUID - -type hcnNetwork syscall.Handle -type hcnEndpoint syscall.Handle -type hcnNamespace syscall.Handle -type hcnLoadBalancer syscall.Handle -type hcnRoute syscall.Handle - -// SchemaVersion for HCN Objects/Queries. -type SchemaVersion = Version // hcnglobals.go - -// HostComputeQueryFlags are passed in to a HostComputeQuery to determine which -// properties of an object are returned. -type HostComputeQueryFlags uint32 - -var ( - // HostComputeQueryFlagsNone returns an object with the standard properties. - HostComputeQueryFlagsNone HostComputeQueryFlags - // HostComputeQueryFlagsDetailed returns an object with all properties. - HostComputeQueryFlagsDetailed HostComputeQueryFlags = 1 -) - -// HostComputeQuery is the format for HCN queries. -type HostComputeQuery struct { - SchemaVersion SchemaVersion `json:""` - Flags HostComputeQueryFlags `json:",omitempty"` - Filter string `json:",omitempty"` -} - -type ExtraParams struct { - Resources json.RawMessage `json:",omitempty"` - SharedContainers json.RawMessage `json:",omitempty"` - LayeredOn string `json:",omitempty"` - SwitchGuid string `json:",omitempty"` - UtilityVM string `json:",omitempty"` - VirtualMachine string `json:",omitempty"` -} - -type Health struct { - Data interface{} `json:",omitempty"` - Extra ExtraParams `json:",omitempty"` -} - -// defaultQuery generates HCN Query. -// Passed into get/enumerate calls to filter results. -func defaultQuery() HostComputeQuery { - query := HostComputeQuery{ - SchemaVersion: SchemaVersion{ - Major: 2, - Minor: 0, - }, - Flags: HostComputeQueryFlagsNone, - } - return query -} - -// PlatformDoesNotSupportError happens when users are attempting to use a newer shim on an older OS -func platformDoesNotSupportError(featureName string) error { - return fmt.Errorf("Platform does not support feature %s", featureName) -} - -// V2ApiSupported returns an error if the HCN version does not support the V2 Apis. -func V2ApiSupported() error { - supported, err := GetCachedSupportedFeatures() - if err != nil { - return err - } - if supported.Api.V2 { - return nil - } - return platformDoesNotSupportError("V2 Api/Schema") -} - -func V2SchemaVersion() SchemaVersion { - return SchemaVersion{ - Major: 2, - Minor: 0, - } -} - -// RemoteSubnetSupported returns an error if the HCN version does not support Remote Subnet policies. -func RemoteSubnetSupported() error { - supported, err := GetCachedSupportedFeatures() - if err != nil { - return err - } - if supported.RemoteSubnet { - return nil - } - return platformDoesNotSupportError("Remote Subnet") -} - -// HostRouteSupported returns an error if the HCN version does not support Host Route policies. -func HostRouteSupported() error { - supported, err := GetCachedSupportedFeatures() - if err != nil { - return err - } - if supported.HostRoute { - return nil - } - return platformDoesNotSupportError("Host Route") -} - -// DSRSupported returns an error if the HCN version does not support Direct Server Return. -func DSRSupported() error { - supported, err := GetCachedSupportedFeatures() - if err != nil { - return err - } - if supported.DSR { - return nil - } - return platformDoesNotSupportError("Direct Server Return (DSR)") -} - -// Slash32EndpointPrefixesSupported returns an error if the HCN version does not support configuring endpoints with /32 prefixes. -func Slash32EndpointPrefixesSupported() error { - supported, err := GetCachedSupportedFeatures() - if err != nil { - return err - } - if supported.Slash32EndpointPrefixes { - return nil - } - return platformDoesNotSupportError("Slash 32 Endpoint prefixes") -} - -// AclSupportForProtocol252Supported returns an error if the HCN version does not support HNS ACL Policies to support protocol 252 for VXLAN. -func AclSupportForProtocol252Supported() error { - supported, err := GetCachedSupportedFeatures() - if err != nil { - return err - } - if supported.AclSupportForProtocol252 { - return nil - } - return platformDoesNotSupportError("HNS ACL Policies to support protocol 252 for VXLAN") -} - -// SessionAffinitySupported returns an error if the HCN version does not support Session Affinity. -func SessionAffinitySupported() error { - supported, err := GetCachedSupportedFeatures() - if err != nil { - return err - } - if supported.SessionAffinity { - return nil - } - return platformDoesNotSupportError("Session Affinity") -} - -// IPv6DualStackSupported returns an error if the HCN version does not support IPv6DualStack. -func IPv6DualStackSupported() error { - supported, err := GetCachedSupportedFeatures() - if err != nil { - return err - } - if supported.IPv6DualStack { - return nil - } - return platformDoesNotSupportError("IPv6 DualStack") -} - -//L4proxySupported returns an error if the HCN verison does not support L4Proxy -func L4proxyPolicySupported() error { - supported, err := GetCachedSupportedFeatures() - if err != nil { - return err - } - if supported.L4Proxy { - return nil - } - return platformDoesNotSupportError("L4ProxyPolicy") -} - -// L4WfpProxySupported returns an error if the HCN verison does not support L4WfpProxy -func L4WfpProxyPolicySupported() error { - supported, err := GetCachedSupportedFeatures() - if err != nil { - return err - } - if supported.L4WfpProxy { - return nil - } - return platformDoesNotSupportError("L4WfpProxyPolicy") -} - -// SetPolicySupported returns an error if the HCN version does not support SetPolicy. -func SetPolicySupported() error { - supported, err := GetCachedSupportedFeatures() - if err != nil { - return err - } - if supported.SetPolicy { - return nil - } - return platformDoesNotSupportError("SetPolicy") -} - -// VxlanPortSupported returns an error if the HCN version does not support configuring the VXLAN TCP port. -func VxlanPortSupported() error { - supported, err := GetCachedSupportedFeatures() - if err != nil { - return err - } - if supported.VxlanPort { - return nil - } - return platformDoesNotSupportError("VXLAN port configuration") -} - -// TierAclPolicySupported returns an error if the HCN version does not support configuring the TierAcl. -func TierAclPolicySupported() error { - supported, err := GetCachedSupportedFeatures() - if err != nil { - return err - } - if supported.TierAcl { - return nil - } - return platformDoesNotSupportError("TierAcl") -} - -// RequestType are the different operations performed to settings. -// Used to update the settings of Endpoint/Namespace objects. -type RequestType string - -var ( - // RequestTypeAdd adds the provided settings object. - RequestTypeAdd RequestType = "Add" - // RequestTypeRemove removes the provided settings object. - RequestTypeRemove RequestType = "Remove" - // RequestTypeUpdate replaces settings with the ones provided. - RequestTypeUpdate RequestType = "Update" - // RequestTypeRefresh refreshes the settings provided. - RequestTypeRefresh RequestType = "Refresh" -) diff --git a/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/hcn/hcnendpoint.go b/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/hcn/hcnendpoint.go deleted file mode 100644 index 545e8639d6cf..000000000000 --- a/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/hcn/hcnendpoint.go +++ /dev/null @@ -1,388 +0,0 @@ -package hcn - -import ( - "encoding/json" - "errors" - - "github.com/Microsoft/go-winio/pkg/guid" - "github.com/Microsoft/hcsshim/internal/interop" - "github.com/sirupsen/logrus" -) - -// IpConfig is assoicated with an endpoint -type IpConfig struct { - IpAddress string `json:",omitempty"` - PrefixLength uint8 `json:",omitempty"` -} - -// EndpointFlags are special settings on an endpoint. -type EndpointFlags uint32 - -var ( - // EndpointFlagsNone is the default. - EndpointFlagsNone EndpointFlags - // EndpointFlagsRemoteEndpoint means that an endpoint is on another host. - EndpointFlagsRemoteEndpoint EndpointFlags = 1 -) - -// HostComputeEndpoint represents a network endpoint -type HostComputeEndpoint struct { - Id string `json:"ID,omitempty"` - Name string `json:",omitempty"` - HostComputeNetwork string `json:",omitempty"` // GUID - HostComputeNamespace string `json:",omitempty"` // GUID - Policies []EndpointPolicy `json:",omitempty"` - IpConfigurations []IpConfig `json:",omitempty"` - Dns Dns `json:",omitempty"` - Routes []Route `json:",omitempty"` - MacAddress string `json:",omitempty"` - Flags EndpointFlags `json:",omitempty"` - Health Health `json:",omitempty"` - SchemaVersion SchemaVersion `json:",omitempty"` -} - -// EndpointResourceType are the two different Endpoint settings resources. -type EndpointResourceType string - -var ( - // EndpointResourceTypePolicy is for Endpoint Policies. Ex: ACL, NAT - EndpointResourceTypePolicy EndpointResourceType = "Policy" - // EndpointResourceTypePort is for Endpoint Port settings. - EndpointResourceTypePort EndpointResourceType = "Port" -) - -// ModifyEndpointSettingRequest is the structure used to send request to modify an endpoint. -// Used to update policy/port on an endpoint. -type ModifyEndpointSettingRequest struct { - ResourceType EndpointResourceType `json:",omitempty"` // Policy, Port - RequestType RequestType `json:",omitempty"` // Add, Remove, Update, Refresh - Settings json.RawMessage `json:",omitempty"` -} - -// VmEndpointRequest creates a switch port with identifier `PortId`. -type VmEndpointRequest struct { - PortId guid.GUID `json:",omitempty"` - VirtualNicName string `json:",omitempty"` - VirtualMachineId guid.GUID `json:",omitempty"` -} - -type PolicyEndpointRequest struct { - Policies []EndpointPolicy `json:",omitempty"` -} - -func getEndpoint(endpointGuid guid.GUID, query string) (*HostComputeEndpoint, error) { - // Open endpoint. - var ( - endpointHandle hcnEndpoint - resultBuffer *uint16 - propertiesBuffer *uint16 - ) - hr := hcnOpenEndpoint(&endpointGuid, &endpointHandle, &resultBuffer) - if err := checkForErrors("hcnOpenEndpoint", hr, resultBuffer); err != nil { - return nil, err - } - // Query endpoint. - hr = hcnQueryEndpointProperties(endpointHandle, query, &propertiesBuffer, &resultBuffer) - if err := checkForErrors("hcnQueryEndpointProperties", hr, resultBuffer); err != nil { - return nil, err - } - properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer) - // Close endpoint. - hr = hcnCloseEndpoint(endpointHandle) - if err := checkForErrors("hcnCloseEndpoint", hr, nil); err != nil { - return nil, err - } - // Convert output to HostComputeEndpoint - var outputEndpoint HostComputeEndpoint - if err := json.Unmarshal([]byte(properties), &outputEndpoint); err != nil { - return nil, err - } - return &outputEndpoint, nil -} - -func enumerateEndpoints(query string) ([]HostComputeEndpoint, error) { - // Enumerate all Endpoint Guids - var ( - resultBuffer *uint16 - endpointBuffer *uint16 - ) - hr := hcnEnumerateEndpoints(query, &endpointBuffer, &resultBuffer) - if err := checkForErrors("hcnEnumerateEndpoints", hr, resultBuffer); err != nil { - return nil, err - } - - endpoints := interop.ConvertAndFreeCoTaskMemString(endpointBuffer) - var endpointIds []guid.GUID - err := json.Unmarshal([]byte(endpoints), &endpointIds) - if err != nil { - return nil, err - } - - var outputEndpoints []HostComputeEndpoint - for _, endpointGuid := range endpointIds { - endpoint, err := getEndpoint(endpointGuid, query) - if err != nil { - return nil, err - } - outputEndpoints = append(outputEndpoints, *endpoint) - } - return outputEndpoints, nil -} - -func createEndpoint(networkId string, endpointSettings string) (*HostComputeEndpoint, error) { - networkGuid, err := guid.FromString(networkId) - if err != nil { - return nil, errInvalidNetworkID - } - // Open network. - var networkHandle hcnNetwork - var resultBuffer *uint16 - hr := hcnOpenNetwork(&networkGuid, &networkHandle, &resultBuffer) - if err := checkForErrors("hcnOpenNetwork", hr, resultBuffer); err != nil { - return nil, err - } - // Create endpoint. - endpointId := guid.GUID{} - var endpointHandle hcnEndpoint - hr = hcnCreateEndpoint(networkHandle, &endpointId, endpointSettings, &endpointHandle, &resultBuffer) - if err := checkForErrors("hcnCreateEndpoint", hr, resultBuffer); err != nil { - return nil, err - } - // Query endpoint. - hcnQuery := defaultQuery() - query, err := json.Marshal(hcnQuery) - if err != nil { - return nil, err - } - var propertiesBuffer *uint16 - hr = hcnQueryEndpointProperties(endpointHandle, string(query), &propertiesBuffer, &resultBuffer) - if err := checkForErrors("hcnQueryEndpointProperties", hr, resultBuffer); err != nil { - return nil, err - } - properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer) - // Close endpoint. - hr = hcnCloseEndpoint(endpointHandle) - if err := checkForErrors("hcnCloseEndpoint", hr, nil); err != nil { - return nil, err - } - // Close network. - hr = hcnCloseNetwork(networkHandle) - if err := checkForErrors("hcnCloseNetwork", hr, nil); err != nil { - return nil, err - } - // Convert output to HostComputeEndpoint - var outputEndpoint HostComputeEndpoint - if err := json.Unmarshal([]byte(properties), &outputEndpoint); err != nil { - return nil, err - } - return &outputEndpoint, nil -} - -func modifyEndpoint(endpointId string, settings string) (*HostComputeEndpoint, error) { - endpointGuid, err := guid.FromString(endpointId) - if err != nil { - return nil, errInvalidEndpointID - } - // Open endpoint - var ( - endpointHandle hcnEndpoint - resultBuffer *uint16 - propertiesBuffer *uint16 - ) - hr := hcnOpenEndpoint(&endpointGuid, &endpointHandle, &resultBuffer) - if err := checkForErrors("hcnOpenEndpoint", hr, resultBuffer); err != nil { - return nil, err - } - // Modify endpoint - hr = hcnModifyEndpoint(endpointHandle, settings, &resultBuffer) - if err := checkForErrors("hcnModifyEndpoint", hr, resultBuffer); err != nil { - return nil, err - } - // Query endpoint. - hcnQuery := defaultQuery() - query, err := json.Marshal(hcnQuery) - if err != nil { - return nil, err - } - hr = hcnQueryEndpointProperties(endpointHandle, string(query), &propertiesBuffer, &resultBuffer) - if err := checkForErrors("hcnQueryEndpointProperties", hr, resultBuffer); err != nil { - return nil, err - } - properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer) - // Close endpoint. - hr = hcnCloseEndpoint(endpointHandle) - if err := checkForErrors("hcnCloseEndpoint", hr, nil); err != nil { - return nil, err - } - // Convert output to HostComputeEndpoint - var outputEndpoint HostComputeEndpoint - if err := json.Unmarshal([]byte(properties), &outputEndpoint); err != nil { - return nil, err - } - return &outputEndpoint, nil -} - -func deleteEndpoint(endpointId string) error { - endpointGuid, err := guid.FromString(endpointId) - if err != nil { - return errInvalidEndpointID - } - var resultBuffer *uint16 - hr := hcnDeleteEndpoint(&endpointGuid, &resultBuffer) - if err := checkForErrors("hcnDeleteEndpoint", hr, resultBuffer); err != nil { - return err - } - return nil -} - -// ListEndpoints makes a call to list all available endpoints. -func ListEndpoints() ([]HostComputeEndpoint, error) { - hcnQuery := defaultQuery() - endpoints, err := ListEndpointsQuery(hcnQuery) - if err != nil { - return nil, err - } - return endpoints, nil -} - -// ListEndpointsQuery makes a call to query the list of available endpoints. -func ListEndpointsQuery(query HostComputeQuery) ([]HostComputeEndpoint, error) { - queryJson, err := json.Marshal(query) - if err != nil { - return nil, err - } - - endpoints, err := enumerateEndpoints(string(queryJson)) - if err != nil { - return nil, err - } - return endpoints, nil -} - -// ListEndpointsOfNetwork queries the list of endpoints on a network. -func ListEndpointsOfNetwork(networkId string) ([]HostComputeEndpoint, error) { - hcnQuery := defaultQuery() - // TODO: Once query can convert schema, change to {HostComputeNetwork:networkId} - mapA := map[string]string{"VirtualNetwork": networkId} - filter, err := json.Marshal(mapA) - if err != nil { - return nil, err - } - hcnQuery.Filter = string(filter) - - return ListEndpointsQuery(hcnQuery) -} - -// GetEndpointByID returns an endpoint specified by Id -func GetEndpointByID(endpointId string) (*HostComputeEndpoint, error) { - hcnQuery := defaultQuery() - mapA := map[string]string{"ID": endpointId} - filter, err := json.Marshal(mapA) - if err != nil { - return nil, err - } - hcnQuery.Filter = string(filter) - - endpoints, err := ListEndpointsQuery(hcnQuery) - if err != nil { - return nil, err - } - if len(endpoints) == 0 { - return nil, EndpointNotFoundError{EndpointID: endpointId} - } - return &endpoints[0], err -} - -// GetEndpointByName returns an endpoint specified by Name -func GetEndpointByName(endpointName string) (*HostComputeEndpoint, error) { - hcnQuery := defaultQuery() - mapA := map[string]string{"Name": endpointName} - filter, err := json.Marshal(mapA) - if err != nil { - return nil, err - } - hcnQuery.Filter = string(filter) - - endpoints, err := ListEndpointsQuery(hcnQuery) - if err != nil { - return nil, err - } - if len(endpoints) == 0 { - return nil, EndpointNotFoundError{EndpointName: endpointName} - } - return &endpoints[0], err -} - -// Create Endpoint. -func (endpoint *HostComputeEndpoint) Create() (*HostComputeEndpoint, error) { - logrus.Debugf("hcn::HostComputeEndpoint::Create id=%s", endpoint.Id) - - if endpoint.HostComputeNamespace != "" { - return nil, errors.New("endpoint create error, endpoint json HostComputeNamespace is read only and should not be set") - } - - jsonString, err := json.Marshal(endpoint) - if err != nil { - return nil, err - } - - logrus.Debugf("hcn::HostComputeEndpoint::Create JSON: %s", jsonString) - endpoint, hcnErr := createEndpoint(endpoint.HostComputeNetwork, string(jsonString)) - if hcnErr != nil { - return nil, hcnErr - } - return endpoint, nil -} - -// Delete Endpoint. -func (endpoint *HostComputeEndpoint) Delete() error { - logrus.Debugf("hcn::HostComputeEndpoint::Delete id=%s", endpoint.Id) - - if err := deleteEndpoint(endpoint.Id); err != nil { - return err - } - return nil -} - -// ModifyEndpointSettings updates the Port/Policy of an Endpoint. -func ModifyEndpointSettings(endpointId string, request *ModifyEndpointSettingRequest) error { - logrus.Debugf("hcn::HostComputeEndpoint::ModifyEndpointSettings id=%s", endpointId) - - endpointSettingsRequest, err := json.Marshal(request) - if err != nil { - return err - } - - _, err = modifyEndpoint(endpointId, string(endpointSettingsRequest)) - if err != nil { - return err - } - return nil -} - -// ApplyPolicy applies a Policy (ex: ACL) on the Endpoint. -func (endpoint *HostComputeEndpoint) ApplyPolicy(requestType RequestType, endpointPolicy PolicyEndpointRequest) error { - logrus.Debugf("hcn::HostComputeEndpoint::ApplyPolicy id=%s", endpoint.Id) - - settingsJson, err := json.Marshal(endpointPolicy) - if err != nil { - return err - } - requestMessage := &ModifyEndpointSettingRequest{ - ResourceType: EndpointResourceTypePolicy, - RequestType: requestType, - Settings: settingsJson, - } - - return ModifyEndpointSettings(endpoint.Id, requestMessage) -} - -// NamespaceAttach modifies a Namespace to add an endpoint. -func (endpoint *HostComputeEndpoint) NamespaceAttach(namespaceId string) error { - return AddNamespaceEndpoint(namespaceId, endpoint.Id) -} - -// NamespaceDetach modifies a Namespace to remove an endpoint. -func (endpoint *HostComputeEndpoint) NamespaceDetach(namespaceId string) error { - return RemoveNamespaceEndpoint(namespaceId, endpoint.Id) -} diff --git a/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/hcn/hcnerrors.go b/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/hcn/hcnerrors.go deleted file mode 100644 index ad30d320d97e..000000000000 --- a/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/hcn/hcnerrors.go +++ /dev/null @@ -1,164 +0,0 @@ -// Package hcn is a shim for the Host Compute Networking (HCN) service, which manages networking for Windows Server -// containers and Hyper-V containers. Previous to RS5, HCN was referred to as Host Networking Service (HNS). -package hcn - -import ( - "errors" - "fmt" - - "github.com/Microsoft/hcsshim/internal/hcs" - "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/Microsoft/hcsshim/internal/interop" - "github.com/sirupsen/logrus" -) - -var ( - errInvalidNetworkID = errors.New("invalid network ID") - errInvalidEndpointID = errors.New("invalid endpoint ID") - errInvalidNamespaceID = errors.New("invalid namespace ID") - errInvalidLoadBalancerID = errors.New("invalid load balancer ID") - errInvalidRouteID = errors.New("invalid route ID") -) - -func checkForErrors(methodName string, hr error, resultBuffer *uint16) error { - errorFound := false - - if hr != nil { - errorFound = true - } - - result := "" - if resultBuffer != nil { - result = interop.ConvertAndFreeCoTaskMemString(resultBuffer) - if result != "" { - errorFound = true - } - } - - if errorFound { - returnError := new(hr, methodName, result) - logrus.Debugf(returnError.Error()) // HCN errors logged for debugging. - return returnError - } - - return nil -} - -type ErrorCode uint32 - -// For common errors, define the error as it is in windows, so we can quickly determine it later -const ( - ERROR_NOT_FOUND = 0x490 - HCN_E_PORT_ALREADY_EXISTS ErrorCode = 0x803b0013 -) - -type HcnError struct { - *hcserror.HcsError - code ErrorCode -} - -func (e *HcnError) Error() string { - return e.HcsError.Error() -} - -func CheckErrorWithCode(err error, code ErrorCode) bool { - hcnError, ok := err.(*HcnError) - if ok { - return hcnError.code == code - } - return false -} - -func IsElementNotFoundError(err error) bool { - return CheckErrorWithCode(err, ERROR_NOT_FOUND) -} - -func IsPortAlreadyExistsError(err error) bool { - return CheckErrorWithCode(err, HCN_E_PORT_ALREADY_EXISTS) -} - -func new(hr error, title string, rest string) error { - err := &HcnError{} - hcsError := hcserror.New(hr, title, rest) - err.HcsError = hcsError.(*hcserror.HcsError) - err.code = ErrorCode(hcserror.Win32FromError(hr)) - return err -} - -// -// Note that the below errors are not errors returned by hcn itself -// we wish to seperate them as they are shim usage error -// - -// NetworkNotFoundError results from a failed seach for a network by Id or Name -type NetworkNotFoundError struct { - NetworkName string - NetworkID string -} - -func (e NetworkNotFoundError) Error() string { - if e.NetworkName != "" { - return fmt.Sprintf("Network name %q not found", e.NetworkName) - } - return fmt.Sprintf("Network ID %q not found", e.NetworkID) -} - -// EndpointNotFoundError results from a failed seach for an endpoint by Id or Name -type EndpointNotFoundError struct { - EndpointName string - EndpointID string -} - -func (e EndpointNotFoundError) Error() string { - if e.EndpointName != "" { - return fmt.Sprintf("Endpoint name %q not found", e.EndpointName) - } - return fmt.Sprintf("Endpoint ID %q not found", e.EndpointID) -} - -// NamespaceNotFoundError results from a failed seach for a namsepace by Id -type NamespaceNotFoundError struct { - NamespaceID string -} - -func (e NamespaceNotFoundError) Error() string { - return fmt.Sprintf("Namespace ID %q not found", e.NamespaceID) -} - -// LoadBalancerNotFoundError results from a failed seach for a loadbalancer by Id -type LoadBalancerNotFoundError struct { - LoadBalancerId string -} - -func (e LoadBalancerNotFoundError) Error() string { - return fmt.Sprintf("LoadBalancer %q not found", e.LoadBalancerId) -} - -// RouteNotFoundError results from a failed seach for a route by Id -type RouteNotFoundError struct { - RouteId string -} - -func (e RouteNotFoundError) Error() string { - return fmt.Sprintf("SDN Route %q not found", e.RouteId) -} - -// IsNotFoundError returns a boolean indicating whether the error was caused by -// a resource not being found. -func IsNotFoundError(err error) bool { - switch pe := err.(type) { - case NetworkNotFoundError: - return true - case EndpointNotFoundError: - return true - case NamespaceNotFoundError: - return true - case LoadBalancerNotFoundError: - return true - case RouteNotFoundError: - return true - case *hcserror.HcsError: - return pe.Err == hcs.ErrElementNotFound - } - return false -} diff --git a/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/hcn/hcnglobals.go b/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/hcn/hcnglobals.go deleted file mode 100644 index d03c48736da1..000000000000 --- a/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/hcn/hcnglobals.go +++ /dev/null @@ -1,132 +0,0 @@ -package hcn - -import ( - "encoding/json" - "fmt" - "math" - - "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/Microsoft/hcsshim/internal/interop" - "github.com/sirupsen/logrus" -) - -// Globals are all global properties of the HCN Service. -type Globals struct { - Version Version `json:"Version"` -} - -// Version is the HCN Service version. -type Version struct { - Major int `json:"Major"` - Minor int `json:"Minor"` -} - -type VersionRange struct { - MinVersion Version - MaxVersion Version -} - -type VersionRanges []VersionRange - -var ( - // HNSVersion1803 added ACL functionality. - HNSVersion1803 = VersionRanges{VersionRange{MinVersion: Version{Major: 7, Minor: 2}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}} - // V2ApiSupport allows the use of V2 Api calls and V2 Schema. - V2ApiSupport = VersionRanges{VersionRange{MinVersion: Version{Major: 9, Minor: 2}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}} - // Remote Subnet allows for Remote Subnet policies on Overlay networks - RemoteSubnetVersion = VersionRanges{VersionRange{MinVersion: Version{Major: 9, Minor: 2}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}} - // A Host Route policy allows for local container to local host communication Overlay networks - HostRouteVersion = VersionRanges{VersionRange{MinVersion: Version{Major: 9, Minor: 2}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}} - // HNS 9.3 through 10.0 (not included), and 10.2+ allows for Direct Server Return for loadbalancing - DSRVersion = VersionRanges{ - VersionRange{MinVersion: Version{Major: 9, Minor: 3}, MaxVersion: Version{Major: 9, Minor: math.MaxInt32}}, - VersionRange{MinVersion: Version{Major: 10, Minor: 2}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}, - } - // HNS 9.3 through 10.0 (not included) and, 10.4+ provide support for configuring endpoints with /32 prefixes - Slash32EndpointPrefixesVersion = VersionRanges{ - VersionRange{MinVersion: Version{Major: 9, Minor: 3}, MaxVersion: Version{Major: 9, Minor: math.MaxInt32}}, - VersionRange{MinVersion: Version{Major: 10, Minor: 4}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}, - } - // HNS 9.3 through 10.0 (not included) and, 10.4+ allow for HNS ACL Policies to support protocol 252 for VXLAN - AclSupportForProtocol252Version = VersionRanges{ - VersionRange{MinVersion: Version{Major: 11, Minor: 0}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}, - } - // HNS 12.0 allows for session affinity for loadbalancing - SessionAffinityVersion = VersionRanges{VersionRange{MinVersion: Version{Major: 12, Minor: 0}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}} - // HNS 11.10+ supports Ipv6 dual stack. - IPv6DualStackVersion = VersionRanges{ - VersionRange{MinVersion: Version{Major: 11, Minor: 10}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}, - } - // HNS 13.0 allows for Set Policy support - SetPolicyVersion = VersionRanges{VersionRange{MinVersion: Version{Major: 13, Minor: 0}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}} - // HNS 10.3 allows for VXLAN ports - VxlanPortVersion = VersionRanges{VersionRange{MinVersion: Version{Major: 10, Minor: 3}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}} - - //HNS 9.5 through 10.0(not included), 10.5 through 11.0(not included), 11.11 through 12.0(not included), 12.1 through 13.0(not included), 13.1+ allows for Network L4Proxy Policy support - L4ProxyPolicyVersion = VersionRanges{ - VersionRange{MinVersion: Version{Major: 9, Minor: 5}, MaxVersion: Version{Major: 9, Minor: math.MaxInt32}}, - VersionRange{MinVersion: Version{Major: 10, Minor: 5}, MaxVersion: Version{Major: 10, Minor: math.MaxInt32}}, - VersionRange{MinVersion: Version{Major: 11, Minor: 11}, MaxVersion: Version{Major: 11, Minor: math.MaxInt32}}, - VersionRange{MinVersion: Version{Major: 12, Minor: 1}, MaxVersion: Version{Major: 12, Minor: math.MaxInt32}}, - VersionRange{MinVersion: Version{Major: 13, Minor: 1}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}, - } - - //HNS 13.2 allows for L4WfpProxy Policy support - L4WfpProxyPolicyVersion = VersionRanges{VersionRange{MinVersion: Version{Major: 13, Minor: 2}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}} - - //HNS 14.0 allows for TierAcl Policy support - TierAclPolicyVersion = VersionRanges{VersionRange{MinVersion: Version{Major: 14, Minor: 0}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}} -) - -// GetGlobals returns the global properties of the HCN Service. -func GetGlobals() (*Globals, error) { - var version Version - err := hnsCall("GET", "/globals/version", "", &version) - if err != nil { - return nil, err - } - - globals := &Globals{ - Version: version, - } - - return globals, nil -} - -type hnsResponse struct { - Success bool - Error string - Output json.RawMessage -} - -func hnsCall(method, path, request string, returnResponse interface{}) error { - var responseBuffer *uint16 - logrus.Debugf("[%s]=>[%s] Request : %s", method, path, request) - - err := _hnsCall(method, path, request, &responseBuffer) - if err != nil { - return hcserror.New(err, "hnsCall ", "") - } - response := interop.ConvertAndFreeCoTaskMemString(responseBuffer) - - hnsresponse := &hnsResponse{} - if err = json.Unmarshal([]byte(response), &hnsresponse); err != nil { - return err - } - - if !hnsresponse.Success { - return fmt.Errorf("HNS failed with error : %s", hnsresponse.Error) - } - - if len(hnsresponse.Output) == 0 { - return nil - } - - logrus.Debugf("Network Response : %s", hnsresponse.Output) - err = json.Unmarshal(hnsresponse.Output, returnResponse) - if err != nil { - return err - } - - return nil -} diff --git a/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/hcn/hcnloadbalancer.go b/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/hcn/hcnloadbalancer.go deleted file mode 100644 index 1b434b07b3ad..000000000000 --- a/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/hcn/hcnloadbalancer.go +++ /dev/null @@ -1,311 +0,0 @@ -package hcn - -import ( - "encoding/json" - - "github.com/Microsoft/go-winio/pkg/guid" - "github.com/Microsoft/hcsshim/internal/interop" - "github.com/sirupsen/logrus" -) - -// LoadBalancerPortMapping is associated with HostComputeLoadBalancer -type LoadBalancerPortMapping struct { - Protocol uint32 `json:",omitempty"` // EX: TCP = 6, UDP = 17 - InternalPort uint16 `json:",omitempty"` - ExternalPort uint16 `json:",omitempty"` - DistributionType LoadBalancerDistribution `json:",omitempty"` // EX: Distribute per connection = 0, distribute traffic of the same protocol per client IP = 1, distribute per client IP = 2 - Flags LoadBalancerPortMappingFlags `json:",omitempty"` -} - -// HostComputeLoadBalancer represents software load balancer. -type HostComputeLoadBalancer struct { - Id string `json:"ID,omitempty"` - HostComputeEndpoints []string `json:",omitempty"` - SourceVIP string `json:",omitempty"` - FrontendVIPs []string `json:",omitempty"` - PortMappings []LoadBalancerPortMapping `json:",omitempty"` - SchemaVersion SchemaVersion `json:",omitempty"` - Flags LoadBalancerFlags `json:",omitempty"` // 0: None, 1: EnableDirectServerReturn -} - -//LoadBalancerFlags modify settings for a loadbalancer. -type LoadBalancerFlags uint32 - -var ( - // LoadBalancerFlagsNone is the default. - LoadBalancerFlagsNone LoadBalancerFlags = 0 - // LoadBalancerFlagsDSR enables Direct Server Return (DSR) - LoadBalancerFlagsDSR LoadBalancerFlags = 1 - LoadBalancerFlagsIPv6 LoadBalancerFlags = 2 -) - -// LoadBalancerPortMappingFlags are special settings on a loadbalancer. -type LoadBalancerPortMappingFlags uint32 - -var ( - // LoadBalancerPortMappingFlagsNone is the default. - LoadBalancerPortMappingFlagsNone LoadBalancerPortMappingFlags - // LoadBalancerPortMappingFlagsILB enables internal loadbalancing. - LoadBalancerPortMappingFlagsILB LoadBalancerPortMappingFlags = 1 - // LoadBalancerPortMappingFlagsLocalRoutedVIP enables VIP access from the host. - LoadBalancerPortMappingFlagsLocalRoutedVIP LoadBalancerPortMappingFlags = 2 - // LoadBalancerPortMappingFlagsUseMux enables DSR for NodePort access of VIP. - LoadBalancerPortMappingFlagsUseMux LoadBalancerPortMappingFlags = 4 - // LoadBalancerPortMappingFlagsPreserveDIP delivers packets with destination IP as the VIP. - LoadBalancerPortMappingFlagsPreserveDIP LoadBalancerPortMappingFlags = 8 -) - -// LoadBalancerDistribution specifies how the loadbalancer distributes traffic. -type LoadBalancerDistribution uint32 - -var ( - // LoadBalancerDistributionNone is the default and loadbalances each connection to the same pod. - LoadBalancerDistributionNone LoadBalancerDistribution - // LoadBalancerDistributionSourceIPProtocol loadbalances all traffic of the same protocol from a client IP to the same pod. - LoadBalancerDistributionSourceIPProtocol LoadBalancerDistribution = 1 - // LoadBalancerDistributionSourceIP loadbalances all traffic from a client IP to the same pod. - LoadBalancerDistributionSourceIP LoadBalancerDistribution = 2 -) - -func getLoadBalancer(loadBalancerGuid guid.GUID, query string) (*HostComputeLoadBalancer, error) { - // Open loadBalancer. - var ( - loadBalancerHandle hcnLoadBalancer - resultBuffer *uint16 - propertiesBuffer *uint16 - ) - hr := hcnOpenLoadBalancer(&loadBalancerGuid, &loadBalancerHandle, &resultBuffer) - if err := checkForErrors("hcnOpenLoadBalancer", hr, resultBuffer); err != nil { - return nil, err - } - // Query loadBalancer. - hr = hcnQueryLoadBalancerProperties(loadBalancerHandle, query, &propertiesBuffer, &resultBuffer) - if err := checkForErrors("hcnQueryLoadBalancerProperties", hr, resultBuffer); err != nil { - return nil, err - } - properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer) - // Close loadBalancer. - hr = hcnCloseLoadBalancer(loadBalancerHandle) - if err := checkForErrors("hcnCloseLoadBalancer", hr, nil); err != nil { - return nil, err - } - // Convert output to HostComputeLoadBalancer - var outputLoadBalancer HostComputeLoadBalancer - if err := json.Unmarshal([]byte(properties), &outputLoadBalancer); err != nil { - return nil, err - } - return &outputLoadBalancer, nil -} - -func enumerateLoadBalancers(query string) ([]HostComputeLoadBalancer, error) { - // Enumerate all LoadBalancer Guids - var ( - resultBuffer *uint16 - loadBalancerBuffer *uint16 - ) - hr := hcnEnumerateLoadBalancers(query, &loadBalancerBuffer, &resultBuffer) - if err := checkForErrors("hcnEnumerateLoadBalancers", hr, resultBuffer); err != nil { - return nil, err - } - - loadBalancers := interop.ConvertAndFreeCoTaskMemString(loadBalancerBuffer) - var loadBalancerIds []guid.GUID - if err := json.Unmarshal([]byte(loadBalancers), &loadBalancerIds); err != nil { - return nil, err - } - - var outputLoadBalancers []HostComputeLoadBalancer - for _, loadBalancerGuid := range loadBalancerIds { - loadBalancer, err := getLoadBalancer(loadBalancerGuid, query) - if err != nil { - return nil, err - } - outputLoadBalancers = append(outputLoadBalancers, *loadBalancer) - } - return outputLoadBalancers, nil -} - -func createLoadBalancer(settings string) (*HostComputeLoadBalancer, error) { - // Create new loadBalancer. - var ( - loadBalancerHandle hcnLoadBalancer - resultBuffer *uint16 - propertiesBuffer *uint16 - ) - loadBalancerGuid := guid.GUID{} - hr := hcnCreateLoadBalancer(&loadBalancerGuid, settings, &loadBalancerHandle, &resultBuffer) - if err := checkForErrors("hcnCreateLoadBalancer", hr, resultBuffer); err != nil { - return nil, err - } - // Query loadBalancer. - hcnQuery := defaultQuery() - query, err := json.Marshal(hcnQuery) - if err != nil { - return nil, err - } - hr = hcnQueryLoadBalancerProperties(loadBalancerHandle, string(query), &propertiesBuffer, &resultBuffer) - if err := checkForErrors("hcnQueryLoadBalancerProperties", hr, resultBuffer); err != nil { - return nil, err - } - properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer) - // Close loadBalancer. - hr = hcnCloseLoadBalancer(loadBalancerHandle) - if err := checkForErrors("hcnCloseLoadBalancer", hr, nil); err != nil { - return nil, err - } - // Convert output to HostComputeLoadBalancer - var outputLoadBalancer HostComputeLoadBalancer - if err := json.Unmarshal([]byte(properties), &outputLoadBalancer); err != nil { - return nil, err - } - return &outputLoadBalancer, nil -} - -func deleteLoadBalancer(loadBalancerId string) error { - loadBalancerGuid, err := guid.FromString(loadBalancerId) - if err != nil { - return errInvalidLoadBalancerID - } - var resultBuffer *uint16 - hr := hcnDeleteLoadBalancer(&loadBalancerGuid, &resultBuffer) - if err := checkForErrors("hcnDeleteLoadBalancer", hr, resultBuffer); err != nil { - return err - } - return nil -} - -// ListLoadBalancers makes a call to list all available loadBalancers. -func ListLoadBalancers() ([]HostComputeLoadBalancer, error) { - hcnQuery := defaultQuery() - loadBalancers, err := ListLoadBalancersQuery(hcnQuery) - if err != nil { - return nil, err - } - return loadBalancers, nil -} - -// ListLoadBalancersQuery makes a call to query the list of available loadBalancers. -func ListLoadBalancersQuery(query HostComputeQuery) ([]HostComputeLoadBalancer, error) { - queryJson, err := json.Marshal(query) - if err != nil { - return nil, err - } - - loadBalancers, err := enumerateLoadBalancers(string(queryJson)) - if err != nil { - return nil, err - } - return loadBalancers, nil -} - -// GetLoadBalancerByID returns the LoadBalancer specified by Id. -func GetLoadBalancerByID(loadBalancerId string) (*HostComputeLoadBalancer, error) { - hcnQuery := defaultQuery() - mapA := map[string]string{"ID": loadBalancerId} - filter, err := json.Marshal(mapA) - if err != nil { - return nil, err - } - hcnQuery.Filter = string(filter) - - loadBalancers, err := ListLoadBalancersQuery(hcnQuery) - if err != nil { - return nil, err - } - if len(loadBalancers) == 0 { - return nil, LoadBalancerNotFoundError{LoadBalancerId: loadBalancerId} - } - return &loadBalancers[0], err -} - -// Create LoadBalancer. -func (loadBalancer *HostComputeLoadBalancer) Create() (*HostComputeLoadBalancer, error) { - logrus.Debugf("hcn::HostComputeLoadBalancer::Create id=%s", loadBalancer.Id) - - jsonString, err := json.Marshal(loadBalancer) - if err != nil { - return nil, err - } - - logrus.Debugf("hcn::HostComputeLoadBalancer::Create JSON: %s", jsonString) - loadBalancer, hcnErr := createLoadBalancer(string(jsonString)) - if hcnErr != nil { - return nil, hcnErr - } - return loadBalancer, nil -} - -// Delete LoadBalancer. -func (loadBalancer *HostComputeLoadBalancer) Delete() error { - logrus.Debugf("hcn::HostComputeLoadBalancer::Delete id=%s", loadBalancer.Id) - - if err := deleteLoadBalancer(loadBalancer.Id); err != nil { - return err - } - return nil -} - -// AddEndpoint add an endpoint to a LoadBalancer -func (loadBalancer *HostComputeLoadBalancer) AddEndpoint(endpoint *HostComputeEndpoint) (*HostComputeLoadBalancer, error) { - logrus.Debugf("hcn::HostComputeLoadBalancer::AddEndpoint loadBalancer=%s endpoint=%s", loadBalancer.Id, endpoint.Id) - - err := loadBalancer.Delete() - if err != nil { - return nil, err - } - - // Add Endpoint to the Existing List - loadBalancer.HostComputeEndpoints = append(loadBalancer.HostComputeEndpoints, endpoint.Id) - - return loadBalancer.Create() -} - -// RemoveEndpoint removes an endpoint from a LoadBalancer -func (loadBalancer *HostComputeLoadBalancer) RemoveEndpoint(endpoint *HostComputeEndpoint) (*HostComputeLoadBalancer, error) { - logrus.Debugf("hcn::HostComputeLoadBalancer::RemoveEndpoint loadBalancer=%s endpoint=%s", loadBalancer.Id, endpoint.Id) - - err := loadBalancer.Delete() - if err != nil { - return nil, err - } - - // Create a list of all the endpoints besides the one being removed - var endpoints []string - for _, endpointReference := range loadBalancer.HostComputeEndpoints { - if endpointReference == endpoint.Id { - continue - } - endpoints = append(endpoints, endpointReference) - } - loadBalancer.HostComputeEndpoints = endpoints - return loadBalancer.Create() -} - -// AddLoadBalancer for the specified endpoints -func AddLoadBalancer(endpoints []HostComputeEndpoint, flags LoadBalancerFlags, portMappingFlags LoadBalancerPortMappingFlags, sourceVIP string, frontendVIPs []string, protocol uint16, internalPort uint16, externalPort uint16) (*HostComputeLoadBalancer, error) { - logrus.Debugf("hcn::HostComputeLoadBalancer::AddLoadBalancer endpointId=%v, LoadBalancerFlags=%v, LoadBalancerPortMappingFlags=%v, sourceVIP=%s, frontendVIPs=%v, protocol=%v, internalPort=%v, externalPort=%v", endpoints, flags, portMappingFlags, sourceVIP, frontendVIPs, protocol, internalPort, externalPort) - - loadBalancer := &HostComputeLoadBalancer{ - SourceVIP: sourceVIP, - PortMappings: []LoadBalancerPortMapping{ - { - Protocol: uint32(protocol), - InternalPort: internalPort, - ExternalPort: externalPort, - Flags: portMappingFlags, - }, - }, - FrontendVIPs: frontendVIPs, - SchemaVersion: SchemaVersion{ - Major: 2, - Minor: 0, - }, - Flags: flags, - } - - for _, endpoint := range endpoints { - loadBalancer.HostComputeEndpoints = append(loadBalancer.HostComputeEndpoints, endpoint.Id) - } - - return loadBalancer.Create() -} diff --git a/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/hcn/hcnnamespace.go b/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/hcn/hcnnamespace.go deleted file mode 100644 index d2ef2296099a..000000000000 --- a/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/hcn/hcnnamespace.go +++ /dev/null @@ -1,446 +0,0 @@ -package hcn - -import ( - "encoding/json" - "os" - "syscall" - - "github.com/Microsoft/go-winio/pkg/guid" - icni "github.com/Microsoft/hcsshim/internal/cni" - "github.com/Microsoft/hcsshim/internal/interop" - "github.com/Microsoft/hcsshim/internal/regstate" - "github.com/Microsoft/hcsshim/internal/runhcs" - "github.com/sirupsen/logrus" -) - -// NamespaceResourceEndpoint represents an Endpoint attached to a Namespace. -type NamespaceResourceEndpoint struct { - Id string `json:"ID,"` -} - -// NamespaceResourceContainer represents a Container attached to a Namespace. -type NamespaceResourceContainer struct { - Id string `json:"ID,"` -} - -// NamespaceResourceType determines whether the Namespace resource is a Container or Endpoint. -type NamespaceResourceType string - -var ( - // NamespaceResourceTypeContainer are contianers associated with a Namespace. - NamespaceResourceTypeContainer NamespaceResourceType = "Container" - // NamespaceResourceTypeEndpoint are endpoints associated with a Namespace. - NamespaceResourceTypeEndpoint NamespaceResourceType = "Endpoint" -) - -// NamespaceResource is associated with a namespace -type NamespaceResource struct { - Type NamespaceResourceType `json:","` // Container, Endpoint - Data json.RawMessage `json:","` -} - -// NamespaceType determines whether the Namespace is for a Host or Guest -type NamespaceType string - -var ( - // NamespaceTypeHost are host namespaces. - NamespaceTypeHost NamespaceType = "Host" - // NamespaceTypeHostDefault are host namespaces in the default compartment. - NamespaceTypeHostDefault NamespaceType = "HostDefault" - // NamespaceTypeGuest are guest namespaces. - NamespaceTypeGuest NamespaceType = "Guest" - // NamespaceTypeGuestDefault are guest namespaces in the default compartment. - NamespaceTypeGuestDefault NamespaceType = "GuestDefault" -) - -// HostComputeNamespace represents a namespace (AKA compartment) in -type HostComputeNamespace struct { - Id string `json:"ID,omitempty"` - NamespaceId uint32 `json:",omitempty"` - Type NamespaceType `json:",omitempty"` // Host, HostDefault, Guest, GuestDefault - Resources []NamespaceResource `json:",omitempty"` - SchemaVersion SchemaVersion `json:",omitempty"` -} - -// ModifyNamespaceSettingRequest is the structure used to send request to modify a namespace. -// Used to Add/Remove an endpoints and containers to/from a namespace. -type ModifyNamespaceSettingRequest struct { - ResourceType NamespaceResourceType `json:",omitempty"` // Container, Endpoint - RequestType RequestType `json:",omitempty"` // Add, Remove, Update, Refresh - Settings json.RawMessage `json:",omitempty"` -} - -func getNamespace(namespaceGuid guid.GUID, query string) (*HostComputeNamespace, error) { - // Open namespace. - var ( - namespaceHandle hcnNamespace - resultBuffer *uint16 - propertiesBuffer *uint16 - ) - hr := hcnOpenNamespace(&namespaceGuid, &namespaceHandle, &resultBuffer) - if err := checkForErrors("hcnOpenNamespace", hr, resultBuffer); err != nil { - return nil, err - } - // Query namespace. - hr = hcnQueryNamespaceProperties(namespaceHandle, query, &propertiesBuffer, &resultBuffer) - if err := checkForErrors("hcnQueryNamespaceProperties", hr, resultBuffer); err != nil { - return nil, err - } - properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer) - // Close namespace. - hr = hcnCloseNamespace(namespaceHandle) - if err := checkForErrors("hcnCloseNamespace", hr, nil); err != nil { - return nil, err - } - // Convert output to HostComputeNamespace - var outputNamespace HostComputeNamespace - if err := json.Unmarshal([]byte(properties), &outputNamespace); err != nil { - return nil, err - } - return &outputNamespace, nil -} - -func enumerateNamespaces(query string) ([]HostComputeNamespace, error) { - // Enumerate all Namespace Guids - var ( - resultBuffer *uint16 - namespaceBuffer *uint16 - ) - hr := hcnEnumerateNamespaces(query, &namespaceBuffer, &resultBuffer) - if err := checkForErrors("hcnEnumerateNamespaces", hr, resultBuffer); err != nil { - return nil, err - } - - namespaces := interop.ConvertAndFreeCoTaskMemString(namespaceBuffer) - var namespaceIds []guid.GUID - if err := json.Unmarshal([]byte(namespaces), &namespaceIds); err != nil { - return nil, err - } - - var outputNamespaces []HostComputeNamespace - for _, namespaceGuid := range namespaceIds { - namespace, err := getNamespace(namespaceGuid, query) - if err != nil { - return nil, err - } - outputNamespaces = append(outputNamespaces, *namespace) - } - return outputNamespaces, nil -} - -func createNamespace(settings string) (*HostComputeNamespace, error) { - // Create new namespace. - var ( - namespaceHandle hcnNamespace - resultBuffer *uint16 - propertiesBuffer *uint16 - ) - namespaceGuid := guid.GUID{} - hr := hcnCreateNamespace(&namespaceGuid, settings, &namespaceHandle, &resultBuffer) - if err := checkForErrors("hcnCreateNamespace", hr, resultBuffer); err != nil { - return nil, err - } - // Query namespace. - hcnQuery := defaultQuery() - query, err := json.Marshal(hcnQuery) - if err != nil { - return nil, err - } - hr = hcnQueryNamespaceProperties(namespaceHandle, string(query), &propertiesBuffer, &resultBuffer) - if err := checkForErrors("hcnQueryNamespaceProperties", hr, resultBuffer); err != nil { - return nil, err - } - properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer) - // Close namespace. - hr = hcnCloseNamespace(namespaceHandle) - if err := checkForErrors("hcnCloseNamespace", hr, nil); err != nil { - return nil, err - } - // Convert output to HostComputeNamespace - var outputNamespace HostComputeNamespace - if err := json.Unmarshal([]byte(properties), &outputNamespace); err != nil { - return nil, err - } - return &outputNamespace, nil -} - -func modifyNamespace(namespaceId string, settings string) (*HostComputeNamespace, error) { - namespaceGuid, err := guid.FromString(namespaceId) - if err != nil { - return nil, errInvalidNamespaceID - } - // Open namespace. - var ( - namespaceHandle hcnNamespace - resultBuffer *uint16 - propertiesBuffer *uint16 - ) - hr := hcnOpenNamespace(&namespaceGuid, &namespaceHandle, &resultBuffer) - if err := checkForErrors("hcnOpenNamespace", hr, resultBuffer); err != nil { - return nil, err - } - // Modify namespace. - hr = hcnModifyNamespace(namespaceHandle, settings, &resultBuffer) - if err := checkForErrors("hcnModifyNamespace", hr, resultBuffer); err != nil { - return nil, err - } - // Query namespace. - hcnQuery := defaultQuery() - query, err := json.Marshal(hcnQuery) - if err != nil { - return nil, err - } - hr = hcnQueryNamespaceProperties(namespaceHandle, string(query), &propertiesBuffer, &resultBuffer) - if err := checkForErrors("hcnQueryNamespaceProperties", hr, resultBuffer); err != nil { - return nil, err - } - properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer) - // Close namespace. - hr = hcnCloseNamespace(namespaceHandle) - if err := checkForErrors("hcnCloseNamespace", hr, nil); err != nil { - return nil, err - } - // Convert output to Namespace - var outputNamespace HostComputeNamespace - if err := json.Unmarshal([]byte(properties), &outputNamespace); err != nil { - return nil, err - } - return &outputNamespace, nil -} - -func deleteNamespace(namespaceId string) error { - namespaceGuid, err := guid.FromString(namespaceId) - if err != nil { - return errInvalidNamespaceID - } - var resultBuffer *uint16 - hr := hcnDeleteNamespace(&namespaceGuid, &resultBuffer) - if err := checkForErrors("hcnDeleteNamespace", hr, resultBuffer); err != nil { - return err - } - return nil -} - -// ListNamespaces makes a call to list all available namespaces. -func ListNamespaces() ([]HostComputeNamespace, error) { - hcnQuery := defaultQuery() - namespaces, err := ListNamespacesQuery(hcnQuery) - if err != nil { - return nil, err - } - return namespaces, nil -} - -// ListNamespacesQuery makes a call to query the list of available namespaces. -func ListNamespacesQuery(query HostComputeQuery) ([]HostComputeNamespace, error) { - queryJson, err := json.Marshal(query) - if err != nil { - return nil, err - } - - namespaces, err := enumerateNamespaces(string(queryJson)) - if err != nil { - return nil, err - } - return namespaces, nil -} - -// GetNamespaceByID returns the Namespace specified by Id. -func GetNamespaceByID(namespaceId string) (*HostComputeNamespace, error) { - hcnQuery := defaultQuery() - mapA := map[string]string{"ID": namespaceId} - filter, err := json.Marshal(mapA) - if err != nil { - return nil, err - } - hcnQuery.Filter = string(filter) - - namespaces, err := ListNamespacesQuery(hcnQuery) - if err != nil { - return nil, err - } - if len(namespaces) == 0 { - return nil, NamespaceNotFoundError{NamespaceID: namespaceId} - } - - return &namespaces[0], err -} - -// GetNamespaceEndpointIds returns the endpoints of the Namespace specified by Id. -func GetNamespaceEndpointIds(namespaceId string) ([]string, error) { - namespace, err := GetNamespaceByID(namespaceId) - if err != nil { - return nil, err - } - var endpointsIds []string - for _, resource := range namespace.Resources { - if resource.Type == "Endpoint" { - var endpointResource NamespaceResourceEndpoint - if err := json.Unmarshal([]byte(resource.Data), &endpointResource); err != nil { - return nil, err - } - endpointsIds = append(endpointsIds, endpointResource.Id) - } - } - return endpointsIds, nil -} - -// GetNamespaceContainerIds returns the containers of the Namespace specified by Id. -func GetNamespaceContainerIds(namespaceId string) ([]string, error) { - namespace, err := GetNamespaceByID(namespaceId) - if err != nil { - return nil, err - } - var containerIds []string - for _, resource := range namespace.Resources { - if resource.Type == "Container" { - var contaienrResource NamespaceResourceContainer - if err := json.Unmarshal([]byte(resource.Data), &contaienrResource); err != nil { - return nil, err - } - containerIds = append(containerIds, contaienrResource.Id) - } - } - return containerIds, nil -} - -// NewNamespace creates a new Namespace object -func NewNamespace(nsType NamespaceType) *HostComputeNamespace { - return &HostComputeNamespace{ - Type: nsType, - SchemaVersion: V2SchemaVersion(), - } -} - -// Create Namespace. -func (namespace *HostComputeNamespace) Create() (*HostComputeNamespace, error) { - logrus.Debugf("hcn::HostComputeNamespace::Create id=%s", namespace.Id) - - jsonString, err := json.Marshal(namespace) - if err != nil { - return nil, err - } - - logrus.Debugf("hcn::HostComputeNamespace::Create JSON: %s", jsonString) - namespace, hcnErr := createNamespace(string(jsonString)) - if hcnErr != nil { - return nil, hcnErr - } - return namespace, nil -} - -// Delete Namespace. -func (namespace *HostComputeNamespace) Delete() error { - logrus.Debugf("hcn::HostComputeNamespace::Delete id=%s", namespace.Id) - - if err := deleteNamespace(namespace.Id); err != nil { - return err - } - return nil -} - -// Sync Namespace endpoints with the appropriate sandbox container holding the -// network namespace open. If no sandbox container is found for this namespace -// this method is determined to be a success and will not return an error in -// this case. If the sandbox container is found and a sync is initiated any -// failures will be returned via this method. -// -// This call initiates a sync between endpoints and the matching UtilityVM -// hosting those endpoints. It is safe to call for any `NamespaceType` but -// `NamespaceTypeGuest` is the only case when a sync will actually occur. For -// `NamespaceTypeHost` the process container will be automatically synchronized -// when the the endpoint is added via `AddNamespaceEndpoint`. -// -// Note: This method sync's both additions and removals of endpoints from a -// `NamespaceTypeGuest` namespace. -func (namespace *HostComputeNamespace) Sync() error { - logrus.WithField("id", namespace.Id).Debugf("hcs::HostComputeNamespace::Sync") - - // We only attempt a sync for namespace guest. - if namespace.Type != NamespaceTypeGuest { - return nil - } - - // Look in the registry for the key to map from namespace id to pod-id - cfg, err := icni.LoadPersistedNamespaceConfig(namespace.Id) - if err != nil { - if regstate.IsNotFoundError(err) { - return nil - } - return err - } - req := runhcs.VMRequest{ - ID: cfg.ContainerID, - Op: runhcs.OpSyncNamespace, - } - shimPath := runhcs.VMPipePath(cfg.HostUniqueID) - if err := runhcs.IssueVMRequest(shimPath, &req); err != nil { - // The shim is likey gone. Simply ignore the sync as if it didn't exist. - if perr, ok := err.(*os.PathError); ok && perr.Err == syscall.ERROR_FILE_NOT_FOUND { - // Remove the reg key there is no point to try again - _ = cfg.Remove() - return nil - } - f := map[string]interface{}{ - "id": namespace.Id, - "container-id": cfg.ContainerID, - } - logrus.WithFields(f). - WithError(err). - Debugf("hcs::HostComputeNamespace::Sync failed to connect to shim pipe: '%s'", shimPath) - return err - } - return nil -} - -// ModifyNamespaceSettings updates the Endpoints/Containers of a Namespace. -func ModifyNamespaceSettings(namespaceId string, request *ModifyNamespaceSettingRequest) error { - logrus.Debugf("hcn::HostComputeNamespace::ModifyNamespaceSettings id=%s", namespaceId) - - namespaceSettings, err := json.Marshal(request) - if err != nil { - return err - } - - _, err = modifyNamespace(namespaceId, string(namespaceSettings)) - if err != nil { - return err - } - return nil -} - -// AddNamespaceEndpoint adds an endpoint to a Namespace. -func AddNamespaceEndpoint(namespaceId string, endpointId string) error { - logrus.Debugf("hcn::HostComputeEndpoint::AddNamespaceEndpoint id=%s", endpointId) - - mapA := map[string]string{"EndpointId": endpointId} - settingsJson, err := json.Marshal(mapA) - if err != nil { - return err - } - requestMessage := &ModifyNamespaceSettingRequest{ - ResourceType: NamespaceResourceTypeEndpoint, - RequestType: RequestTypeAdd, - Settings: settingsJson, - } - - return ModifyNamespaceSettings(namespaceId, requestMessage) -} - -// RemoveNamespaceEndpoint removes an endpoint from a Namespace. -func RemoveNamespaceEndpoint(namespaceId string, endpointId string) error { - logrus.Debugf("hcn::HostComputeNamespace::RemoveNamespaceEndpoint id=%s", endpointId) - - mapA := map[string]string{"EndpointId": endpointId} - settingsJson, err := json.Marshal(mapA) - if err != nil { - return err - } - requestMessage := &ModifyNamespaceSettingRequest{ - ResourceType: NamespaceResourceTypeEndpoint, - RequestType: RequestTypeRemove, - Settings: settingsJson, - } - - return ModifyNamespaceSettings(namespaceId, requestMessage) -} diff --git a/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/hcn/hcnnetwork.go b/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/hcn/hcnnetwork.go deleted file mode 100644 index c36b136387a9..000000000000 --- a/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/hcn/hcnnetwork.go +++ /dev/null @@ -1,462 +0,0 @@ -package hcn - -import ( - "encoding/json" - "errors" - - "github.com/Microsoft/go-winio/pkg/guid" - "github.com/Microsoft/hcsshim/internal/interop" - "github.com/sirupsen/logrus" -) - -// Route is associated with a subnet. -type Route struct { - NextHop string `json:",omitempty"` - DestinationPrefix string `json:",omitempty"` - Metric uint16 `json:",omitempty"` -} - -// Subnet is associated with a Ipam. -type Subnet struct { - IpAddressPrefix string `json:",omitempty"` - Policies []json.RawMessage `json:",omitempty"` - Routes []Route `json:",omitempty"` -} - -// Ipam (Internet Protocol Address Management) is associated with a network -// and represents the address space(s) of a network. -type Ipam struct { - Type string `json:",omitempty"` // Ex: Static, DHCP - Subnets []Subnet `json:",omitempty"` -} - -// MacRange is associated with MacPool and respresents the start and end addresses. -type MacRange struct { - StartMacAddress string `json:",omitempty"` - EndMacAddress string `json:",omitempty"` -} - -// MacPool is associated with a network and represents pool of MacRanges. -type MacPool struct { - Ranges []MacRange `json:",omitempty"` -} - -// Dns (Domain Name System is associated with a network). -type Dns struct { - Domain string `json:",omitempty"` - Search []string `json:",omitempty"` - ServerList []string `json:",omitempty"` - Options []string `json:",omitempty"` -} - -// NetworkType are various networks. -type NetworkType string - -// NetworkType const -const ( - NAT NetworkType = "NAT" - Transparent NetworkType = "Transparent" - L2Bridge NetworkType = "L2Bridge" - L2Tunnel NetworkType = "L2Tunnel" - ICS NetworkType = "ICS" - Private NetworkType = "Private" - Overlay NetworkType = "Overlay" -) - -// NetworkFlags are various network flags. -type NetworkFlags uint32 - -// NetworkFlags const -const ( - None NetworkFlags = 0 - EnableNonPersistent NetworkFlags = 8 -) - -// HostComputeNetwork represents a network -type HostComputeNetwork struct { - Id string `json:"ID,omitempty"` - Name string `json:",omitempty"` - Type NetworkType `json:",omitempty"` - Policies []NetworkPolicy `json:",omitempty"` - MacPool MacPool `json:",omitempty"` - Dns Dns `json:",omitempty"` - Ipams []Ipam `json:",omitempty"` - Flags NetworkFlags `json:",omitempty"` // 0: None - Health Health `json:",omitempty"` - SchemaVersion SchemaVersion `json:",omitempty"` -} - -// NetworkResourceType are the 3 different Network settings resources. -type NetworkResourceType string - -var ( - // NetworkResourceTypePolicy is for Network's policies. Ex: RemoteSubnet - NetworkResourceTypePolicy NetworkResourceType = "Policy" - // NetworkResourceTypeDNS is for Network's DNS settings. - NetworkResourceTypeDNS NetworkResourceType = "DNS" - // NetworkResourceTypeExtension is for Network's extension settings. - NetworkResourceTypeExtension NetworkResourceType = "Extension" -) - -// ModifyNetworkSettingRequest is the structure used to send request to modify an network. -// Used to update DNS/extension/policy on an network. -type ModifyNetworkSettingRequest struct { - ResourceType NetworkResourceType `json:",omitempty"` // Policy, DNS, Extension - RequestType RequestType `json:",omitempty"` // Add, Remove, Update, Refresh - Settings json.RawMessage `json:",omitempty"` -} - -type PolicyNetworkRequest struct { - Policies []NetworkPolicy `json:",omitempty"` -} - -func getNetwork(networkGuid guid.GUID, query string) (*HostComputeNetwork, error) { - // Open network. - var ( - networkHandle hcnNetwork - resultBuffer *uint16 - propertiesBuffer *uint16 - ) - hr := hcnOpenNetwork(&networkGuid, &networkHandle, &resultBuffer) - if err := checkForErrors("hcnOpenNetwork", hr, resultBuffer); err != nil { - return nil, err - } - // Query network. - hr = hcnQueryNetworkProperties(networkHandle, query, &propertiesBuffer, &resultBuffer) - if err := checkForErrors("hcnQueryNetworkProperties", hr, resultBuffer); err != nil { - return nil, err - } - properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer) - // Close network. - hr = hcnCloseNetwork(networkHandle) - if err := checkForErrors("hcnCloseNetwork", hr, nil); err != nil { - return nil, err - } - // Convert output to HostComputeNetwork - var outputNetwork HostComputeNetwork - - // If HNS sets the network type to NAT (i.e. '0' in HNS.Schema.Network.NetworkMode), - // the value will be omitted from the JSON blob. We therefore need to initialize NAT here before - // unmarshaling the JSON blob. - outputNetwork.Type = NAT - - if err := json.Unmarshal([]byte(properties), &outputNetwork); err != nil { - return nil, err - } - return &outputNetwork, nil -} - -func enumerateNetworks(query string) ([]HostComputeNetwork, error) { - // Enumerate all Network Guids - var ( - resultBuffer *uint16 - networkBuffer *uint16 - ) - hr := hcnEnumerateNetworks(query, &networkBuffer, &resultBuffer) - if err := checkForErrors("hcnEnumerateNetworks", hr, resultBuffer); err != nil { - return nil, err - } - - networks := interop.ConvertAndFreeCoTaskMemString(networkBuffer) - var networkIds []guid.GUID - if err := json.Unmarshal([]byte(networks), &networkIds); err != nil { - return nil, err - } - - var outputNetworks []HostComputeNetwork - for _, networkGuid := range networkIds { - network, err := getNetwork(networkGuid, query) - if err != nil { - return nil, err - } - outputNetworks = append(outputNetworks, *network) - } - return outputNetworks, nil -} - -func createNetwork(settings string) (*HostComputeNetwork, error) { - // Create new network. - var ( - networkHandle hcnNetwork - resultBuffer *uint16 - propertiesBuffer *uint16 - ) - networkGuid := guid.GUID{} - hr := hcnCreateNetwork(&networkGuid, settings, &networkHandle, &resultBuffer) - if err := checkForErrors("hcnCreateNetwork", hr, resultBuffer); err != nil { - return nil, err - } - // Query network. - hcnQuery := defaultQuery() - query, err := json.Marshal(hcnQuery) - if err != nil { - return nil, err - } - hr = hcnQueryNetworkProperties(networkHandle, string(query), &propertiesBuffer, &resultBuffer) - if err := checkForErrors("hcnQueryNetworkProperties", hr, resultBuffer); err != nil { - return nil, err - } - properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer) - // Close network. - hr = hcnCloseNetwork(networkHandle) - if err := checkForErrors("hcnCloseNetwork", hr, nil); err != nil { - return nil, err - } - // Convert output to HostComputeNetwork - var outputNetwork HostComputeNetwork - - // If HNS sets the network type to NAT (i.e. '0' in HNS.Schema.Network.NetworkMode), - // the value will be omitted from the JSON blob. We therefore need to initialize NAT here before - // unmarshaling the JSON blob. - outputNetwork.Type = NAT - - if err := json.Unmarshal([]byte(properties), &outputNetwork); err != nil { - return nil, err - } - return &outputNetwork, nil -} - -func modifyNetwork(networkId string, settings string) (*HostComputeNetwork, error) { - networkGuid, err := guid.FromString(networkId) - if err != nil { - return nil, errInvalidNetworkID - } - // Open Network - var ( - networkHandle hcnNetwork - resultBuffer *uint16 - propertiesBuffer *uint16 - ) - hr := hcnOpenNetwork(&networkGuid, &networkHandle, &resultBuffer) - if err := checkForErrors("hcnOpenNetwork", hr, resultBuffer); err != nil { - return nil, err - } - // Modify Network - hr = hcnModifyNetwork(networkHandle, settings, &resultBuffer) - if err := checkForErrors("hcnModifyNetwork", hr, resultBuffer); err != nil { - return nil, err - } - // Query network. - hcnQuery := defaultQuery() - query, err := json.Marshal(hcnQuery) - if err != nil { - return nil, err - } - hr = hcnQueryNetworkProperties(networkHandle, string(query), &propertiesBuffer, &resultBuffer) - if err := checkForErrors("hcnQueryNetworkProperties", hr, resultBuffer); err != nil { - return nil, err - } - properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer) - // Close network. - hr = hcnCloseNetwork(networkHandle) - if err := checkForErrors("hcnCloseNetwork", hr, nil); err != nil { - return nil, err - } - // Convert output to HostComputeNetwork - var outputNetwork HostComputeNetwork - - // If HNS sets the network type to NAT (i.e. '0' in HNS.Schema.Network.NetworkMode), - // the value will be omitted from the JSON blob. We therefore need to initialize NAT here before - // unmarshaling the JSON blob. - outputNetwork.Type = NAT - - if err := json.Unmarshal([]byte(properties), &outputNetwork); err != nil { - return nil, err - } - return &outputNetwork, nil -} - -func deleteNetwork(networkId string) error { - networkGuid, err := guid.FromString(networkId) - if err != nil { - return errInvalidNetworkID - } - var resultBuffer *uint16 - hr := hcnDeleteNetwork(&networkGuid, &resultBuffer) - if err := checkForErrors("hcnDeleteNetwork", hr, resultBuffer); err != nil { - return err - } - return nil -} - -// ListNetworks makes a call to list all available networks. -func ListNetworks() ([]HostComputeNetwork, error) { - hcnQuery := defaultQuery() - networks, err := ListNetworksQuery(hcnQuery) - if err != nil { - return nil, err - } - return networks, nil -} - -// ListNetworksQuery makes a call to query the list of available networks. -func ListNetworksQuery(query HostComputeQuery) ([]HostComputeNetwork, error) { - queryJson, err := json.Marshal(query) - if err != nil { - return nil, err - } - - networks, err := enumerateNetworks(string(queryJson)) - if err != nil { - return nil, err - } - return networks, nil -} - -// GetNetworkByID returns the network specified by Id. -func GetNetworkByID(networkID string) (*HostComputeNetwork, error) { - hcnQuery := defaultQuery() - mapA := map[string]string{"ID": networkID} - filter, err := json.Marshal(mapA) - if err != nil { - return nil, err - } - hcnQuery.Filter = string(filter) - - networks, err := ListNetworksQuery(hcnQuery) - if err != nil { - return nil, err - } - if len(networks) == 0 { - return nil, NetworkNotFoundError{NetworkID: networkID} - } - return &networks[0], err -} - -// GetNetworkByName returns the network specified by Name. -func GetNetworkByName(networkName string) (*HostComputeNetwork, error) { - hcnQuery := defaultQuery() - mapA := map[string]string{"Name": networkName} - filter, err := json.Marshal(mapA) - if err != nil { - return nil, err - } - hcnQuery.Filter = string(filter) - - networks, err := ListNetworksQuery(hcnQuery) - if err != nil { - return nil, err - } - if len(networks) == 0 { - return nil, NetworkNotFoundError{NetworkName: networkName} - } - return &networks[0], err -} - -// Create Network. -func (network *HostComputeNetwork) Create() (*HostComputeNetwork, error) { - logrus.Debugf("hcn::HostComputeNetwork::Create id=%s", network.Id) - for _, ipam := range network.Ipams { - for _, subnet := range ipam.Subnets { - if subnet.IpAddressPrefix != "" { - hasDefault := false - for _, route := range subnet.Routes { - if route.NextHop == "" { - return nil, errors.New("network create error, subnet has address prefix but no gateway specified") - } - if route.DestinationPrefix == "0.0.0.0/0" || route.DestinationPrefix == "::/0" { - hasDefault = true - } - } - if !hasDefault { - return nil, errors.New("network create error, no default gateway") - } - } - } - } - - jsonString, err := json.Marshal(network) - if err != nil { - return nil, err - } - - logrus.Debugf("hcn::HostComputeNetwork::Create JSON: %s", jsonString) - network, hcnErr := createNetwork(string(jsonString)) - if hcnErr != nil { - return nil, hcnErr - } - return network, nil -} - -// Delete Network. -func (network *HostComputeNetwork) Delete() error { - logrus.Debugf("hcn::HostComputeNetwork::Delete id=%s", network.Id) - - if err := deleteNetwork(network.Id); err != nil { - return err - } - return nil -} - -// ModifyNetworkSettings updates the Policy for a network. -func (network *HostComputeNetwork) ModifyNetworkSettings(request *ModifyNetworkSettingRequest) error { - logrus.Debugf("hcn::HostComputeNetwork::ModifyNetworkSettings id=%s", network.Id) - - networkSettingsRequest, err := json.Marshal(request) - if err != nil { - return err - } - - _, err = modifyNetwork(network.Id, string(networkSettingsRequest)) - if err != nil { - return err - } - return nil -} - -// AddPolicy applies a Policy (ex: RemoteSubnet) on the Network. -func (network *HostComputeNetwork) AddPolicy(networkPolicy PolicyNetworkRequest) error { - logrus.Debugf("hcn::HostComputeNetwork::AddPolicy id=%s", network.Id) - - settingsJson, err := json.Marshal(networkPolicy) - if err != nil { - return err - } - requestMessage := &ModifyNetworkSettingRequest{ - ResourceType: NetworkResourceTypePolicy, - RequestType: RequestTypeAdd, - Settings: settingsJson, - } - - return network.ModifyNetworkSettings(requestMessage) -} - -// RemovePolicy removes a Policy (ex: RemoteSubnet) from the Network. -func (network *HostComputeNetwork) RemovePolicy(networkPolicy PolicyNetworkRequest) error { - logrus.Debugf("hcn::HostComputeNetwork::RemovePolicy id=%s", network.Id) - - settingsJson, err := json.Marshal(networkPolicy) - if err != nil { - return err - } - requestMessage := &ModifyNetworkSettingRequest{ - ResourceType: NetworkResourceTypePolicy, - RequestType: RequestTypeRemove, - Settings: settingsJson, - } - - return network.ModifyNetworkSettings(requestMessage) -} - -// CreateEndpoint creates an endpoint on the Network. -func (network *HostComputeNetwork) CreateEndpoint(endpoint *HostComputeEndpoint) (*HostComputeEndpoint, error) { - isRemote := endpoint.Flags&EndpointFlagsRemoteEndpoint != 0 - logrus.Debugf("hcn::HostComputeNetwork::CreatEndpoint, networkId=%s remote=%t", network.Id, isRemote) - - endpoint.HostComputeNetwork = network.Id - endpointSettings, err := json.Marshal(endpoint) - if err != nil { - return nil, err - } - newEndpoint, err := createEndpoint(network.Id, string(endpointSettings)) - if err != nil { - return nil, err - } - return newEndpoint, nil -} - -// CreateRemoteEndpoint creates a remote endpoint on the Network. -func (network *HostComputeNetwork) CreateRemoteEndpoint(endpoint *HostComputeEndpoint) (*HostComputeEndpoint, error) { - endpoint.Flags = EndpointFlagsRemoteEndpoint | endpoint.Flags - return network.CreateEndpoint(endpoint) -} diff --git a/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/hcn/hcnpolicy.go b/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/hcn/hcnpolicy.go deleted file mode 100644 index 29651bb5f14a..000000000000 --- a/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/hcn/hcnpolicy.go +++ /dev/null @@ -1,329 +0,0 @@ -package hcn - -import ( - "encoding/json" -) - -// EndpointPolicyType are the potential Policies that apply to Endpoints. -type EndpointPolicyType string - -// EndpointPolicyType const -const ( - PortMapping EndpointPolicyType = "PortMapping" - ACL EndpointPolicyType = "ACL" - QOS EndpointPolicyType = "QOS" - L2Driver EndpointPolicyType = "L2Driver" - OutBoundNAT EndpointPolicyType = "OutBoundNAT" - SDNRoute EndpointPolicyType = "SDNRoute" - L4Proxy EndpointPolicyType = "L4Proxy" - L4WFPPROXY EndpointPolicyType = "L4WFPPROXY" - PortName EndpointPolicyType = "PortName" - EncapOverhead EndpointPolicyType = "EncapOverhead" - IOV EndpointPolicyType = "Iov" - // Endpoint and Network have InterfaceConstraint and ProviderAddress - NetworkProviderAddress EndpointPolicyType = "ProviderAddress" - NetworkInterfaceConstraint EndpointPolicyType = "InterfaceConstraint" - TierAcl EndpointPolicyType = "TierAcl" -) - -// EndpointPolicy is a collection of Policy settings for an Endpoint. -type EndpointPolicy struct { - Type EndpointPolicyType `json:""` - Settings json.RawMessage `json:",omitempty"` -} - -// NetworkPolicyType are the potential Policies that apply to Networks. -type NetworkPolicyType string - -// NetworkPolicyType const -const ( - SourceMacAddress NetworkPolicyType = "SourceMacAddress" - NetAdapterName NetworkPolicyType = "NetAdapterName" - VSwitchExtension NetworkPolicyType = "VSwitchExtension" - DrMacAddress NetworkPolicyType = "DrMacAddress" - AutomaticDNS NetworkPolicyType = "AutomaticDNS" - InterfaceConstraint NetworkPolicyType = "InterfaceConstraint" - ProviderAddress NetworkPolicyType = "ProviderAddress" - RemoteSubnetRoute NetworkPolicyType = "RemoteSubnetRoute" - VxlanPort NetworkPolicyType = "VxlanPort" - HostRoute NetworkPolicyType = "HostRoute" - SetPolicy NetworkPolicyType = "SetPolicy" - NetworkL4Proxy NetworkPolicyType = "L4Proxy" - LayerConstraint NetworkPolicyType = "LayerConstraint" -) - -// NetworkPolicy is a collection of Policy settings for a Network. -type NetworkPolicy struct { - Type NetworkPolicyType `json:""` - Settings json.RawMessage `json:",omitempty"` -} - -// SubnetPolicyType are the potential Policies that apply to Subnets. -type SubnetPolicyType string - -// SubnetPolicyType const -const ( - VLAN SubnetPolicyType = "VLAN" - VSID SubnetPolicyType = "VSID" -) - -// SubnetPolicy is a collection of Policy settings for a Subnet. -type SubnetPolicy struct { - Type SubnetPolicyType `json:""` - Settings json.RawMessage `json:",omitempty"` -} - -// NatFlags are flags for portmappings. -type NatFlags uint32 - -const ( - NatFlagsNone NatFlags = iota - NatFlagsLocalRoutedVip - NatFlagsIPv6 -) - -/// Endpoint Policy objects - -// PortMappingPolicySetting defines Port Mapping (NAT) -type PortMappingPolicySetting struct { - Protocol uint32 `json:",omitempty"` // EX: TCP = 6, UDP = 17 - InternalPort uint16 `json:",omitempty"` - ExternalPort uint16 `json:",omitempty"` - VIP string `json:",omitempty"` - Flags NatFlags `json:",omitempty"` -} - -// ActionType associated with ACLs. Value is either Allow or Block. -type ActionType string - -// DirectionType associated with ACLs. Value is either In or Out. -type DirectionType string - -// RuleType associated with ACLs. Value is either Host (WFP) or Switch (VFP). -type RuleType string - -const ( - // Allow traffic - ActionTypeAllow ActionType = "Allow" - // Block traffic - ActionTypeBlock ActionType = "Block" - // Pass traffic - ActionTypePass ActionType = "Pass" - - // In is traffic coming to the Endpoint - DirectionTypeIn DirectionType = "In" - // Out is traffic leaving the Endpoint - DirectionTypeOut DirectionType = "Out" - - // Host creates WFP (Windows Firewall) rules - RuleTypeHost RuleType = "Host" - // Switch creates VFP (Virtual Filter Platform) rules - RuleTypeSwitch RuleType = "Switch" -) - -// AclPolicySetting creates firewall rules on an endpoint -type AclPolicySetting struct { - Protocols string `json:",omitempty"` // EX: 6 (TCP), 17 (UDP), 1 (ICMPv4), 58 (ICMPv6), 2 (IGMP) - Action ActionType `json:","` - Direction DirectionType `json:","` - LocalAddresses string `json:",omitempty"` - RemoteAddresses string `json:",omitempty"` - LocalPorts string `json:",omitempty"` - RemotePorts string `json:",omitempty"` - RuleType RuleType `json:",omitempty"` - Priority uint16 `json:",omitempty"` -} - -// QosPolicySetting sets Quality of Service bandwidth caps on an Endpoint. -type QosPolicySetting struct { - MaximumOutgoingBandwidthInBytes uint64 -} - -// OutboundNatPolicySetting sets outbound Network Address Translation on an Endpoint. -type OutboundNatPolicySetting struct { - VirtualIP string `json:",omitempty"` - Exceptions []string `json:",omitempty"` - Destinations []string `json:",omitempty"` - Flags NatFlags `json:",omitempty"` -} - -// SDNRoutePolicySetting sets SDN Route on an Endpoint. -type SDNRoutePolicySetting struct { - DestinationPrefix string `json:",omitempty"` - NextHop string `json:",omitempty"` - NeedEncap bool `json:",omitempty"` -} - -// FiveTuple is nested in L4ProxyPolicySetting for WFP support. -type FiveTuple struct { - Protocols string `json:",omitempty"` - LocalAddresses string `json:",omitempty"` - RemoteAddresses string `json:",omitempty"` - LocalPorts string `json:",omitempty"` - RemotePorts string `json:",omitempty"` - Priority uint16 `json:",omitempty"` -} - -// ProxyExceptions exempts traffic to IpAddresses and Ports -type ProxyExceptions struct { - IpAddressExceptions []string `json:",omitempty"` - PortExceptions []string `json:",omitempty"` -} - -// L4WfpProxyPolicySetting sets Layer-4 Proxy on an endpoint. -type L4WfpProxyPolicySetting struct { - InboundProxyPort string `json:",omitempty"` - OutboundProxyPort string `json:",omitempty"` - FilterTuple FiveTuple `json:",omitempty"` - UserSID string `json:",omitempty"` - InboundExceptions ProxyExceptions `json:",omitempty"` - OutboundExceptions ProxyExceptions `json:",omitempty"` -} - -// PortnameEndpointPolicySetting sets the port name for an endpoint. -type PortnameEndpointPolicySetting struct { - Name string `json:",omitempty"` -} - -// EncapOverheadEndpointPolicySetting sets the encap overhead for an endpoint. -type EncapOverheadEndpointPolicySetting struct { - Overhead uint16 `json:",omitempty"` -} - -// IovPolicySetting sets the Iov settings for an endpoint. -type IovPolicySetting struct { - IovOffloadWeight uint32 `json:",omitempty"` - QueuePairsRequested uint32 `json:",omitempty"` - InterruptModeration uint32 `json:",omitempty"` -} - -/// Endpoint and Network Policy objects - -// ProviderAddressEndpointPolicySetting sets the PA for an endpoint. -type ProviderAddressEndpointPolicySetting struct { - ProviderAddress string `json:",omitempty"` -} - -// InterfaceConstraintPolicySetting limits an Endpoint or Network to a specific Nic. -type InterfaceConstraintPolicySetting struct { - InterfaceGuid string `json:",omitempty"` - InterfaceLuid uint64 `json:",omitempty"` - InterfaceIndex uint32 `json:",omitempty"` - InterfaceMediaType uint32 `json:",omitempty"` - InterfaceAlias string `json:",omitempty"` - InterfaceDescription string `json:",omitempty"` -} - -/// Network Policy objects - -// SourceMacAddressNetworkPolicySetting sets source MAC for a network. -type SourceMacAddressNetworkPolicySetting struct { - SourceMacAddress string `json:",omitempty"` -} - -// NetAdapterNameNetworkPolicySetting sets network adapter of a network. -type NetAdapterNameNetworkPolicySetting struct { - NetworkAdapterName string `json:",omitempty"` -} - -// VSwitchExtensionNetworkPolicySetting enables/disabled VSwitch extensions for a network. -type VSwitchExtensionNetworkPolicySetting struct { - ExtensionID string `json:",omitempty"` - Enable bool `json:",omitempty"` -} - -// DrMacAddressNetworkPolicySetting sets the DR MAC for a network. -type DrMacAddressNetworkPolicySetting struct { - Address string `json:",omitempty"` -} - -// AutomaticDNSNetworkPolicySetting enables/disables automatic DNS on a network. -type AutomaticDNSNetworkPolicySetting struct { - Enable bool `json:",omitempty"` -} - -type LayerConstraintNetworkPolicySetting struct { - LayerId string `json:",omitempty"` -} - -/// Subnet Policy objects - -// VlanPolicySetting isolates a subnet with VLAN tagging. -type VlanPolicySetting struct { - IsolationId uint32 `json:","` -} - -// VsidPolicySetting isolates a subnet with VSID tagging. -type VsidPolicySetting struct { - IsolationId uint32 `json:","` -} - -// RemoteSubnetRoutePolicySetting creates remote subnet route rules on a network -type RemoteSubnetRoutePolicySetting struct { - DestinationPrefix string - IsolationId uint16 - ProviderAddress string - DistributedRouterMacAddress string -} - -// SetPolicyTypes associated with SetPolicy. Value is IPSET. -type SetPolicyType string - -const ( - SetPolicyTypeIpSet SetPolicyType = "IPSET" -) - -// SetPolicySetting creates IPSets on network -type SetPolicySetting struct { - Id string - Name string - Type SetPolicyType - Values string -} - -// VxlanPortPolicySetting allows configuring the VXLAN TCP port -type VxlanPortPolicySetting struct { - Port uint16 -} - -// ProtocolType associated with L4ProxyPolicy -type ProtocolType uint32 - -const ( - ProtocolTypeUnknown ProtocolType = 0 - ProtocolTypeICMPv4 ProtocolType = 1 - ProtocolTypeIGMP ProtocolType = 2 - ProtocolTypeTCP ProtocolType = 6 - ProtocolTypeUDP ProtocolType = 17 - ProtocolTypeICMPv6 ProtocolType = 58 -) - -//L4ProxyPolicySetting applies proxy policy on network/endpoint -type L4ProxyPolicySetting struct { - IP string `json:",omitempty"` - Port string `json:",omitempty"` - Protocol ProtocolType `json:",omitempty"` - Exceptions []string `json:",omitempty"` - Destination string - OutboundNAT bool `json:",omitempty"` -} - -// TierAclRule represents an ACL within TierAclPolicySetting -type TierAclRule struct { - Id string `json:",omitempty"` - Protocols string `json:",omitempty"` - TierAclRuleAction ActionType `json:","` - LocalAddresses string `json:",omitempty"` - RemoteAddresses string `json:",omitempty"` - LocalPorts string `json:",omitempty"` - RemotePorts string `json:",omitempty"` - Priority uint16 `json:",omitempty"` -} - -// TierAclPolicySetting represents a Tier containing ACLs -type TierAclPolicySetting struct { - Name string `json:","` - Direction DirectionType `json:","` - Order uint16 `json:""` - TierAclRules []TierAclRule `json:",omitempty"` -} diff --git a/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/hcn/hcnroute.go b/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/hcn/hcnroute.go deleted file mode 100644 index d6d27079bcae..000000000000 --- a/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/hcn/hcnroute.go +++ /dev/null @@ -1,266 +0,0 @@ -package hcn - -import ( - "encoding/json" - "errors" - - "github.com/Microsoft/go-winio/pkg/guid" - "github.com/Microsoft/hcsshim/internal/interop" - "github.com/sirupsen/logrus" -) - -// HostComputeRoute represents SDN routes. -type HostComputeRoute struct { - ID string `json:"ID,omitempty"` - HostComputeEndpoints []string `json:",omitempty"` - Setting []SDNRoutePolicySetting `json:",omitempty"` - SchemaVersion SchemaVersion `json:",omitempty"` -} - -// ListRoutes makes a call to list all available routes. -func ListRoutes() ([]HostComputeRoute, error) { - hcnQuery := defaultQuery() - routes, err := ListRoutesQuery(hcnQuery) - if err != nil { - return nil, err - } - return routes, nil -} - -// ListRoutesQuery makes a call to query the list of available routes. -func ListRoutesQuery(query HostComputeQuery) ([]HostComputeRoute, error) { - queryJSON, err := json.Marshal(query) - if err != nil { - return nil, err - } - - routes, err := enumerateRoutes(string(queryJSON)) - if err != nil { - return nil, err - } - return routes, nil -} - -// GetRouteByID returns the route specified by Id. -func GetRouteByID(routeID string) (*HostComputeRoute, error) { - hcnQuery := defaultQuery() - mapA := map[string]string{"ID": routeID} - filter, err := json.Marshal(mapA) - if err != nil { - return nil, err - } - hcnQuery.Filter = string(filter) - - routes, err := ListRoutesQuery(hcnQuery) - if err != nil { - return nil, err - } - if len(routes) == 0 { - return nil, RouteNotFoundError{RouteId: routeID} - } - return &routes[0], err -} - -// Create Route. -func (route *HostComputeRoute) Create() (*HostComputeRoute, error) { - logrus.Debugf("hcn::HostComputeRoute::Create id=%s", route.ID) - - jsonString, err := json.Marshal(route) - if err != nil { - return nil, err - } - - logrus.Debugf("hcn::HostComputeRoute::Create JSON: %s", jsonString) - route, hcnErr := createRoute(string(jsonString)) - if hcnErr != nil { - return nil, hcnErr - } - return route, nil -} - -// Delete Route. -func (route *HostComputeRoute) Delete() error { - logrus.Debugf("hcn::HostComputeRoute::Delete id=%s", route.ID) - - existingRoute, _ := GetRouteByID(route.ID) - - if existingRoute != nil { - if err := deleteRoute(route.ID); err != nil { - return err - } - } - - return nil -} - -// AddEndpoint add an endpoint to a route -// Since HCNRoute doesn't implement modify functionality, add operation is essentially delete and add -func (route *HostComputeRoute) AddEndpoint(endpoint *HostComputeEndpoint) (*HostComputeRoute, error) { - logrus.Debugf("hcn::HostComputeRoute::AddEndpoint route=%s endpoint=%s", route.ID, endpoint.Id) - - err := route.Delete() - if err != nil { - return nil, err - } - - // Add Endpoint to the Existing List - route.HostComputeEndpoints = append(route.HostComputeEndpoints, endpoint.Id) - - return route.Create() -} - -// RemoveEndpoint removes an endpoint from a route -// Since HCNRoute doesn't implement modify functionality, remove operation is essentially delete and add -func (route *HostComputeRoute) RemoveEndpoint(endpoint *HostComputeEndpoint) (*HostComputeRoute, error) { - logrus.Debugf("hcn::HostComputeRoute::RemoveEndpoint route=%s endpoint=%s", route.ID, endpoint.Id) - - err := route.Delete() - if err != nil { - return nil, err - } - - // Create a list of all the endpoints besides the one being removed - i := 0 - for index, endpointReference := range route.HostComputeEndpoints { - if endpointReference == endpoint.Id { - i = index - break - } - } - - route.HostComputeEndpoints = append(route.HostComputeEndpoints[0:i], route.HostComputeEndpoints[i+1:]...) - return route.Create() -} - -// AddRoute for the specified endpoints and SDN Route setting -func AddRoute(endpoints []HostComputeEndpoint, destinationPrefix string, nextHop string, needEncapsulation bool) (*HostComputeRoute, error) { - logrus.Debugf("hcn::HostComputeRoute::AddRoute endpointId=%v, destinationPrefix=%v, nextHop=%v, needEncapsulation=%v", endpoints, destinationPrefix, nextHop, needEncapsulation) - - if len(endpoints) <= 0 { - return nil, errors.New("Missing endpoints") - } - - route := &HostComputeRoute{ - SchemaVersion: V2SchemaVersion(), - Setting: []SDNRoutePolicySetting{ - { - DestinationPrefix: destinationPrefix, - NextHop: nextHop, - NeedEncap: needEncapsulation, - }, - }, - } - - for _, endpoint := range endpoints { - route.HostComputeEndpoints = append(route.HostComputeEndpoints, endpoint.Id) - } - - return route.Create() -} - -func enumerateRoutes(query string) ([]HostComputeRoute, error) { - // Enumerate all routes Guids - var ( - resultBuffer *uint16 - routeBuffer *uint16 - ) - hr := hcnEnumerateRoutes(query, &routeBuffer, &resultBuffer) - if err := checkForErrors("hcnEnumerateRoutes", hr, resultBuffer); err != nil { - return nil, err - } - - routes := interop.ConvertAndFreeCoTaskMemString(routeBuffer) - var routeIds []guid.GUID - if err := json.Unmarshal([]byte(routes), &routeIds); err != nil { - return nil, err - } - - var outputRoutes []HostComputeRoute - for _, routeGUID := range routeIds { - route, err := getRoute(routeGUID, query) - if err != nil { - return nil, err - } - outputRoutes = append(outputRoutes, *route) - } - return outputRoutes, nil -} - -func getRoute(routeGUID guid.GUID, query string) (*HostComputeRoute, error) { - // Open routes. - var ( - routeHandle hcnRoute - resultBuffer *uint16 - propertiesBuffer *uint16 - ) - hr := hcnOpenRoute(&routeGUID, &routeHandle, &resultBuffer) - if err := checkForErrors("hcnOpenRoute", hr, resultBuffer); err != nil { - return nil, err - } - // Query routes. - hr = hcnQueryRouteProperties(routeHandle, query, &propertiesBuffer, &resultBuffer) - if err := checkForErrors("hcnQueryRouteProperties", hr, resultBuffer); err != nil { - return nil, err - } - properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer) - // Close routes. - hr = hcnCloseRoute(routeHandle) - if err := checkForErrors("hcnCloseRoute", hr, nil); err != nil { - return nil, err - } - // Convert output to HostComputeRoute - var outputRoute HostComputeRoute - if err := json.Unmarshal([]byte(properties), &outputRoute); err != nil { - return nil, err - } - return &outputRoute, nil -} - -func createRoute(settings string) (*HostComputeRoute, error) { - // Create new route. - var ( - routeHandle hcnRoute - resultBuffer *uint16 - propertiesBuffer *uint16 - ) - routeGUID := guid.GUID{} - hr := hcnCreateRoute(&routeGUID, settings, &routeHandle, &resultBuffer) - if err := checkForErrors("hcnCreateRoute", hr, resultBuffer); err != nil { - return nil, err - } - // Query route. - hcnQuery := defaultQuery() - query, err := json.Marshal(hcnQuery) - if err != nil { - return nil, err - } - hr = hcnQueryRouteProperties(routeHandle, string(query), &propertiesBuffer, &resultBuffer) - if err := checkForErrors("hcnQueryRouteProperties", hr, resultBuffer); err != nil { - return nil, err - } - properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer) - // Close Route. - hr = hcnCloseRoute(routeHandle) - if err := checkForErrors("hcnCloseRoute", hr, nil); err != nil { - return nil, err - } - // Convert output to HostComputeRoute - var outputRoute HostComputeRoute - if err := json.Unmarshal([]byte(properties), &outputRoute); err != nil { - return nil, err - } - return &outputRoute, nil -} - -func deleteRoute(routeID string) error { - routeGUID, err := guid.FromString(routeID) - if err != nil { - return errInvalidRouteID - } - var resultBuffer *uint16 - hr := hcnDeleteRoute(&routeGUID, &resultBuffer) - if err := checkForErrors("hcnDeleteRoute", hr, resultBuffer); err != nil { - return err - } - return nil -} diff --git a/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/hcn/hcnsupport.go b/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/hcn/hcnsupport.go deleted file mode 100644 index 64f9e3728b51..000000000000 --- a/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/hcn/hcnsupport.go +++ /dev/null @@ -1,143 +0,0 @@ -package hcn - -import ( - "fmt" - "sync" - - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -var ( - // featuresOnce handles assigning the supported features and printing the supported info to stdout only once to avoid unnecessary work - // multiple times. - featuresOnce sync.Once - featuresErr error - supportedFeatures SupportedFeatures -) - -// SupportedFeatures are the features provided by the Service. -type SupportedFeatures struct { - Acl AclFeatures `json:"ACL"` - Api ApiSupport `json:"API"` - RemoteSubnet bool `json:"RemoteSubnet"` - HostRoute bool `json:"HostRoute"` - DSR bool `json:"DSR"` - Slash32EndpointPrefixes bool `json:"Slash32EndpointPrefixes"` - AclSupportForProtocol252 bool `json:"AclSupportForProtocol252"` - SessionAffinity bool `json:"SessionAffinity"` - IPv6DualStack bool `json:"IPv6DualStack"` - SetPolicy bool `json:"SetPolicy"` - VxlanPort bool `json:"VxlanPort"` - L4Proxy bool `json:"L4Proxy"` // network policy that applies VFP rules to all endpoints on the network to redirect traffic - L4WfpProxy bool `json:"L4WfpProxy"` // endpoint policy that applies WFP filters to redirect traffic to/from that endpoint - TierAcl bool `json:"TierAcl"` -} - -// AclFeatures are the supported ACL possibilities. -type AclFeatures struct { - AclAddressLists bool `json:"AclAddressLists"` - AclNoHostRulePriority bool `json:"AclHostRulePriority"` - AclPortRanges bool `json:"AclPortRanges"` - AclRuleId bool `json:"AclRuleId"` -} - -// ApiSupport lists the supported API versions. -type ApiSupport struct { - V1 bool `json:"V1"` - V2 bool `json:"V2"` -} - -// GetCachedSupportedFeatures returns the features supported by the Service and an error if the query failed. If this has been called -// before it will return the supported features and error received from the first call. This can be used to optimize if many calls to the -// various hcn.IsXSupported methods need to be made. -func GetCachedSupportedFeatures() (SupportedFeatures, error) { - // Only query the HCN version and features supported once, instead of everytime this is invoked. The logs are useful to - // debug incidents where there's confusion on if a feature is supported on the host machine. The sync.Once helps to avoid redundant - // spam of these anytime a check needs to be made for if an HCN feature is supported. This is a common occurrence in kube-proxy - // for example. - featuresOnce.Do(func() { - supportedFeatures, featuresErr = getSupportedFeatures() - }) - - return supportedFeatures, featuresErr -} - -// GetSupportedFeatures returns the features supported by the Service. -// -// Deprecated: Use GetCachedSupportedFeatures instead. -func GetSupportedFeatures() SupportedFeatures { - features, err := GetCachedSupportedFeatures() - if err != nil { - // Expected on pre-1803 builds, all features will be false/unsupported - logrus.WithError(err).Errorf("unable to obtain supported features") - return features - } - return features -} - -func getSupportedFeatures() (SupportedFeatures, error) { - var features SupportedFeatures - globals, err := GetGlobals() - if err != nil { - // It's expected if this fails once, it should always fail. It should fail on pre 1803 builds for example. - return SupportedFeatures{}, errors.Wrap(err, "failed to query HCN version number: this is expected on pre 1803 builds.") - } - features.Acl = AclFeatures{ - AclAddressLists: isFeatureSupported(globals.Version, HNSVersion1803), - AclNoHostRulePriority: isFeatureSupported(globals.Version, HNSVersion1803), - AclPortRanges: isFeatureSupported(globals.Version, HNSVersion1803), - AclRuleId: isFeatureSupported(globals.Version, HNSVersion1803), - } - - features.Api = ApiSupport{ - V2: isFeatureSupported(globals.Version, V2ApiSupport), - V1: true, // HNSCall is still available. - } - - features.RemoteSubnet = isFeatureSupported(globals.Version, RemoteSubnetVersion) - features.HostRoute = isFeatureSupported(globals.Version, HostRouteVersion) - features.DSR = isFeatureSupported(globals.Version, DSRVersion) - features.Slash32EndpointPrefixes = isFeatureSupported(globals.Version, Slash32EndpointPrefixesVersion) - features.AclSupportForProtocol252 = isFeatureSupported(globals.Version, AclSupportForProtocol252Version) - features.SessionAffinity = isFeatureSupported(globals.Version, SessionAffinityVersion) - features.IPv6DualStack = isFeatureSupported(globals.Version, IPv6DualStackVersion) - features.SetPolicy = isFeatureSupported(globals.Version, SetPolicyVersion) - features.VxlanPort = isFeatureSupported(globals.Version, VxlanPortVersion) - features.L4Proxy = isFeatureSupported(globals.Version, L4ProxyPolicyVersion) - features.L4WfpProxy = isFeatureSupported(globals.Version, L4WfpProxyPolicyVersion) - features.TierAcl = isFeatureSupported(globals.Version, TierAclPolicyVersion) - - logrus.WithFields(logrus.Fields{ - "version": fmt.Sprintf("%+v", globals.Version), - "supportedFeatures": fmt.Sprintf("%+v", features), - }).Info("HCN feature check") - - return features, nil -} - -func isFeatureSupported(currentVersion Version, versionsSupported VersionRanges) bool { - isFeatureSupported := false - - for _, versionRange := range versionsSupported { - isFeatureSupported = isFeatureSupported || isFeatureInRange(currentVersion, versionRange) - } - - return isFeatureSupported -} - -func isFeatureInRange(currentVersion Version, versionRange VersionRange) bool { - if currentVersion.Major < versionRange.MinVersion.Major { - return false - } - if currentVersion.Major > versionRange.MaxVersion.Major { - return false - } - if currentVersion.Major == versionRange.MinVersion.Major && currentVersion.Minor < versionRange.MinVersion.Minor { - return false - } - if currentVersion.Major == versionRange.MaxVersion.Major && currentVersion.Minor > versionRange.MaxVersion.Minor { - return false - } - return true -} diff --git a/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/hcn/zsyscall_windows.go b/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/hcn/zsyscall_windows.go deleted file mode 100644 index 7ec5b58b66a8..000000000000 --- a/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/hcn/zsyscall_windows.go +++ /dev/null @@ -1,795 +0,0 @@ -// Code generated mksyscall_windows.exe DO NOT EDIT - -package hcn - -import ( - "syscall" - "unsafe" - - "golang.org/x/sys/windows" -) - -var _ unsafe.Pointer - -// Do the interface allocations only once for common -// Errno values. -const ( - errnoERROR_IO_PENDING = 997 -) - -var ( - errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) -) - -// errnoErr returns common boxed Errno values, to prevent -// allocations at runtime. -func errnoErr(e syscall.Errno) error { - switch e { - case 0: - return nil - case errnoERROR_IO_PENDING: - return errERROR_IO_PENDING - } - // TODO: add more here, after collecting data on the common - // error values see on Windows. (perhaps when running - // all.bat?) - return e -} - -var ( - modiphlpapi = windows.NewLazySystemDLL("iphlpapi.dll") - modvmcompute = windows.NewLazySystemDLL("vmcompute.dll") - modcomputenetwork = windows.NewLazySystemDLL("computenetwork.dll") - - procSetCurrentThreadCompartmentId = modiphlpapi.NewProc("SetCurrentThreadCompartmentId") - procHNSCall = modvmcompute.NewProc("HNSCall") - procHcnEnumerateNetworks = modcomputenetwork.NewProc("HcnEnumerateNetworks") - procHcnCreateNetwork = modcomputenetwork.NewProc("HcnCreateNetwork") - procHcnOpenNetwork = modcomputenetwork.NewProc("HcnOpenNetwork") - procHcnModifyNetwork = modcomputenetwork.NewProc("HcnModifyNetwork") - procHcnQueryNetworkProperties = modcomputenetwork.NewProc("HcnQueryNetworkProperties") - procHcnDeleteNetwork = modcomputenetwork.NewProc("HcnDeleteNetwork") - procHcnCloseNetwork = modcomputenetwork.NewProc("HcnCloseNetwork") - procHcnEnumerateEndpoints = modcomputenetwork.NewProc("HcnEnumerateEndpoints") - procHcnCreateEndpoint = modcomputenetwork.NewProc("HcnCreateEndpoint") - procHcnOpenEndpoint = modcomputenetwork.NewProc("HcnOpenEndpoint") - procHcnModifyEndpoint = modcomputenetwork.NewProc("HcnModifyEndpoint") - procHcnQueryEndpointProperties = modcomputenetwork.NewProc("HcnQueryEndpointProperties") - procHcnDeleteEndpoint = modcomputenetwork.NewProc("HcnDeleteEndpoint") - procHcnCloseEndpoint = modcomputenetwork.NewProc("HcnCloseEndpoint") - procHcnEnumerateNamespaces = modcomputenetwork.NewProc("HcnEnumerateNamespaces") - procHcnCreateNamespace = modcomputenetwork.NewProc("HcnCreateNamespace") - procHcnOpenNamespace = modcomputenetwork.NewProc("HcnOpenNamespace") - procHcnModifyNamespace = modcomputenetwork.NewProc("HcnModifyNamespace") - procHcnQueryNamespaceProperties = modcomputenetwork.NewProc("HcnQueryNamespaceProperties") - procHcnDeleteNamespace = modcomputenetwork.NewProc("HcnDeleteNamespace") - procHcnCloseNamespace = modcomputenetwork.NewProc("HcnCloseNamespace") - procHcnEnumerateLoadBalancers = modcomputenetwork.NewProc("HcnEnumerateLoadBalancers") - procHcnCreateLoadBalancer = modcomputenetwork.NewProc("HcnCreateLoadBalancer") - procHcnOpenLoadBalancer = modcomputenetwork.NewProc("HcnOpenLoadBalancer") - procHcnModifyLoadBalancer = modcomputenetwork.NewProc("HcnModifyLoadBalancer") - procHcnQueryLoadBalancerProperties = modcomputenetwork.NewProc("HcnQueryLoadBalancerProperties") - procHcnDeleteLoadBalancer = modcomputenetwork.NewProc("HcnDeleteLoadBalancer") - procHcnCloseLoadBalancer = modcomputenetwork.NewProc("HcnCloseLoadBalancer") - procHcnEnumerateSdnRoutes = modcomputenetwork.NewProc("HcnEnumerateSdnRoutes") - procHcnCreateSdnRoute = modcomputenetwork.NewProc("HcnCreateSdnRoute") - procHcnOpenSdnRoute = modcomputenetwork.NewProc("HcnOpenSdnRoute") - procHcnModifySdnRoute = modcomputenetwork.NewProc("HcnModifySdnRoute") - procHcnQuerySdnRouteProperties = modcomputenetwork.NewProc("HcnQuerySdnRouteProperties") - procHcnDeleteSdnRoute = modcomputenetwork.NewProc("HcnDeleteSdnRoute") - procHcnCloseSdnRoute = modcomputenetwork.NewProc("HcnCloseSdnRoute") -) - -func SetCurrentThreadCompartmentId(compartmentId uint32) (hr error) { - r0, _, _ := syscall.Syscall(procSetCurrentThreadCompartmentId.Addr(), 1, uintptr(compartmentId), 0, 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func _hnsCall(method string, path string, object string, response **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(method) - if hr != nil { - return - } - var _p1 *uint16 - _p1, hr = syscall.UTF16PtrFromString(path) - if hr != nil { - return - } - var _p2 *uint16 - _p2, hr = syscall.UTF16PtrFromString(object) - if hr != nil { - return - } - return __hnsCall(_p0, _p1, _p2, response) -} - -func __hnsCall(method *uint16, path *uint16, object *uint16, response **uint16) (hr error) { - if hr = procHNSCall.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall6(procHNSCall.Addr(), 4, uintptr(unsafe.Pointer(method)), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(object)), uintptr(unsafe.Pointer(response)), 0, 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcnEnumerateNetworks(query string, networks **uint16, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(query) - if hr != nil { - return - } - return _hcnEnumerateNetworks(_p0, networks, result) -} - -func _hcnEnumerateNetworks(query *uint16, networks **uint16, result **uint16) (hr error) { - if hr = procHcnEnumerateNetworks.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcnEnumerateNetworks.Addr(), 3, uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(networks)), uintptr(unsafe.Pointer(result))) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcnCreateNetwork(id *_guid, settings string, network *hcnNetwork, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(settings) - if hr != nil { - return - } - return _hcnCreateNetwork(id, _p0, network, result) -} - -func _hcnCreateNetwork(id *_guid, settings *uint16, network *hcnNetwork, result **uint16) (hr error) { - if hr = procHcnCreateNetwork.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall6(procHcnCreateNetwork.Addr(), 4, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(network)), uintptr(unsafe.Pointer(result)), 0, 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcnOpenNetwork(id *_guid, network *hcnNetwork, result **uint16) (hr error) { - if hr = procHcnOpenNetwork.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcnOpenNetwork.Addr(), 3, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(network)), uintptr(unsafe.Pointer(result))) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcnModifyNetwork(network hcnNetwork, settings string, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(settings) - if hr != nil { - return - } - return _hcnModifyNetwork(network, _p0, result) -} - -func _hcnModifyNetwork(network hcnNetwork, settings *uint16, result **uint16) (hr error) { - if hr = procHcnModifyNetwork.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcnModifyNetwork.Addr(), 3, uintptr(network), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result))) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcnQueryNetworkProperties(network hcnNetwork, query string, properties **uint16, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(query) - if hr != nil { - return - } - return _hcnQueryNetworkProperties(network, _p0, properties, result) -} - -func _hcnQueryNetworkProperties(network hcnNetwork, query *uint16, properties **uint16, result **uint16) (hr error) { - if hr = procHcnQueryNetworkProperties.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall6(procHcnQueryNetworkProperties.Addr(), 4, uintptr(network), uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result)), 0, 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcnDeleteNetwork(id *_guid, result **uint16) (hr error) { - if hr = procHcnDeleteNetwork.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcnDeleteNetwork.Addr(), 2, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(result)), 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcnCloseNetwork(network hcnNetwork) (hr error) { - if hr = procHcnCloseNetwork.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcnCloseNetwork.Addr(), 1, uintptr(network), 0, 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcnEnumerateEndpoints(query string, endpoints **uint16, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(query) - if hr != nil { - return - } - return _hcnEnumerateEndpoints(_p0, endpoints, result) -} - -func _hcnEnumerateEndpoints(query *uint16, endpoints **uint16, result **uint16) (hr error) { - if hr = procHcnEnumerateEndpoints.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcnEnumerateEndpoints.Addr(), 3, uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(endpoints)), uintptr(unsafe.Pointer(result))) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcnCreateEndpoint(network hcnNetwork, id *_guid, settings string, endpoint *hcnEndpoint, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(settings) - if hr != nil { - return - } - return _hcnCreateEndpoint(network, id, _p0, endpoint, result) -} - -func _hcnCreateEndpoint(network hcnNetwork, id *_guid, settings *uint16, endpoint *hcnEndpoint, result **uint16) (hr error) { - if hr = procHcnCreateEndpoint.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall6(procHcnCreateEndpoint.Addr(), 5, uintptr(network), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(endpoint)), uintptr(unsafe.Pointer(result)), 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcnOpenEndpoint(id *_guid, endpoint *hcnEndpoint, result **uint16) (hr error) { - if hr = procHcnOpenEndpoint.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcnOpenEndpoint.Addr(), 3, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(endpoint)), uintptr(unsafe.Pointer(result))) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcnModifyEndpoint(endpoint hcnEndpoint, settings string, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(settings) - if hr != nil { - return - } - return _hcnModifyEndpoint(endpoint, _p0, result) -} - -func _hcnModifyEndpoint(endpoint hcnEndpoint, settings *uint16, result **uint16) (hr error) { - if hr = procHcnModifyEndpoint.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcnModifyEndpoint.Addr(), 3, uintptr(endpoint), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result))) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcnQueryEndpointProperties(endpoint hcnEndpoint, query string, properties **uint16, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(query) - if hr != nil { - return - } - return _hcnQueryEndpointProperties(endpoint, _p0, properties, result) -} - -func _hcnQueryEndpointProperties(endpoint hcnEndpoint, query *uint16, properties **uint16, result **uint16) (hr error) { - if hr = procHcnQueryEndpointProperties.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall6(procHcnQueryEndpointProperties.Addr(), 4, uintptr(endpoint), uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result)), 0, 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcnDeleteEndpoint(id *_guid, result **uint16) (hr error) { - if hr = procHcnDeleteEndpoint.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcnDeleteEndpoint.Addr(), 2, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(result)), 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcnCloseEndpoint(endpoint hcnEndpoint) (hr error) { - if hr = procHcnCloseEndpoint.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcnCloseEndpoint.Addr(), 1, uintptr(endpoint), 0, 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcnEnumerateNamespaces(query string, namespaces **uint16, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(query) - if hr != nil { - return - } - return _hcnEnumerateNamespaces(_p0, namespaces, result) -} - -func _hcnEnumerateNamespaces(query *uint16, namespaces **uint16, result **uint16) (hr error) { - if hr = procHcnEnumerateNamespaces.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcnEnumerateNamespaces.Addr(), 3, uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(namespaces)), uintptr(unsafe.Pointer(result))) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcnCreateNamespace(id *_guid, settings string, namespace *hcnNamespace, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(settings) - if hr != nil { - return - } - return _hcnCreateNamespace(id, _p0, namespace, result) -} - -func _hcnCreateNamespace(id *_guid, settings *uint16, namespace *hcnNamespace, result **uint16) (hr error) { - if hr = procHcnCreateNamespace.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall6(procHcnCreateNamespace.Addr(), 4, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(namespace)), uintptr(unsafe.Pointer(result)), 0, 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcnOpenNamespace(id *_guid, namespace *hcnNamespace, result **uint16) (hr error) { - if hr = procHcnOpenNamespace.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcnOpenNamespace.Addr(), 3, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(namespace)), uintptr(unsafe.Pointer(result))) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcnModifyNamespace(namespace hcnNamespace, settings string, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(settings) - if hr != nil { - return - } - return _hcnModifyNamespace(namespace, _p0, result) -} - -func _hcnModifyNamespace(namespace hcnNamespace, settings *uint16, result **uint16) (hr error) { - if hr = procHcnModifyNamespace.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcnModifyNamespace.Addr(), 3, uintptr(namespace), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result))) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcnQueryNamespaceProperties(namespace hcnNamespace, query string, properties **uint16, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(query) - if hr != nil { - return - } - return _hcnQueryNamespaceProperties(namespace, _p0, properties, result) -} - -func _hcnQueryNamespaceProperties(namespace hcnNamespace, query *uint16, properties **uint16, result **uint16) (hr error) { - if hr = procHcnQueryNamespaceProperties.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall6(procHcnQueryNamespaceProperties.Addr(), 4, uintptr(namespace), uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result)), 0, 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcnDeleteNamespace(id *_guid, result **uint16) (hr error) { - if hr = procHcnDeleteNamespace.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcnDeleteNamespace.Addr(), 2, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(result)), 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcnCloseNamespace(namespace hcnNamespace) (hr error) { - if hr = procHcnCloseNamespace.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcnCloseNamespace.Addr(), 1, uintptr(namespace), 0, 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcnEnumerateLoadBalancers(query string, loadBalancers **uint16, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(query) - if hr != nil { - return - } - return _hcnEnumerateLoadBalancers(_p0, loadBalancers, result) -} - -func _hcnEnumerateLoadBalancers(query *uint16, loadBalancers **uint16, result **uint16) (hr error) { - if hr = procHcnEnumerateLoadBalancers.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcnEnumerateLoadBalancers.Addr(), 3, uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(loadBalancers)), uintptr(unsafe.Pointer(result))) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcnCreateLoadBalancer(id *_guid, settings string, loadBalancer *hcnLoadBalancer, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(settings) - if hr != nil { - return - } - return _hcnCreateLoadBalancer(id, _p0, loadBalancer, result) -} - -func _hcnCreateLoadBalancer(id *_guid, settings *uint16, loadBalancer *hcnLoadBalancer, result **uint16) (hr error) { - if hr = procHcnCreateLoadBalancer.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall6(procHcnCreateLoadBalancer.Addr(), 4, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(loadBalancer)), uintptr(unsafe.Pointer(result)), 0, 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcnOpenLoadBalancer(id *_guid, loadBalancer *hcnLoadBalancer, result **uint16) (hr error) { - if hr = procHcnOpenLoadBalancer.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcnOpenLoadBalancer.Addr(), 3, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(loadBalancer)), uintptr(unsafe.Pointer(result))) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcnModifyLoadBalancer(loadBalancer hcnLoadBalancer, settings string, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(settings) - if hr != nil { - return - } - return _hcnModifyLoadBalancer(loadBalancer, _p0, result) -} - -func _hcnModifyLoadBalancer(loadBalancer hcnLoadBalancer, settings *uint16, result **uint16) (hr error) { - if hr = procHcnModifyLoadBalancer.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcnModifyLoadBalancer.Addr(), 3, uintptr(loadBalancer), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result))) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcnQueryLoadBalancerProperties(loadBalancer hcnLoadBalancer, query string, properties **uint16, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(query) - if hr != nil { - return - } - return _hcnQueryLoadBalancerProperties(loadBalancer, _p0, properties, result) -} - -func _hcnQueryLoadBalancerProperties(loadBalancer hcnLoadBalancer, query *uint16, properties **uint16, result **uint16) (hr error) { - if hr = procHcnQueryLoadBalancerProperties.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall6(procHcnQueryLoadBalancerProperties.Addr(), 4, uintptr(loadBalancer), uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result)), 0, 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcnDeleteLoadBalancer(id *_guid, result **uint16) (hr error) { - if hr = procHcnDeleteLoadBalancer.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcnDeleteLoadBalancer.Addr(), 2, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(result)), 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcnCloseLoadBalancer(loadBalancer hcnLoadBalancer) (hr error) { - if hr = procHcnCloseLoadBalancer.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcnCloseLoadBalancer.Addr(), 1, uintptr(loadBalancer), 0, 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcnEnumerateRoutes(query string, routes **uint16, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(query) - if hr != nil { - return - } - return _hcnEnumerateRoutes(_p0, routes, result) -} - -func _hcnEnumerateRoutes(query *uint16, routes **uint16, result **uint16) (hr error) { - if hr = procHcnEnumerateSdnRoutes.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcnEnumerateSdnRoutes.Addr(), 3, uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(routes)), uintptr(unsafe.Pointer(result))) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcnCreateRoute(id *_guid, settings string, route *hcnRoute, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(settings) - if hr != nil { - return - } - return _hcnCreateRoute(id, _p0, route, result) -} - -func _hcnCreateRoute(id *_guid, settings *uint16, route *hcnRoute, result **uint16) (hr error) { - if hr = procHcnCreateSdnRoute.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall6(procHcnCreateSdnRoute.Addr(), 4, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(route)), uintptr(unsafe.Pointer(result)), 0, 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcnOpenRoute(id *_guid, route *hcnRoute, result **uint16) (hr error) { - if hr = procHcnOpenSdnRoute.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcnOpenSdnRoute.Addr(), 3, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(route)), uintptr(unsafe.Pointer(result))) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcnModifyRoute(route hcnRoute, settings string, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(settings) - if hr != nil { - return - } - return _hcnModifyRoute(route, _p0, result) -} - -func _hcnModifyRoute(route hcnRoute, settings *uint16, result **uint16) (hr error) { - if hr = procHcnModifySdnRoute.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcnModifySdnRoute.Addr(), 3, uintptr(route), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result))) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcnQueryRouteProperties(route hcnRoute, query string, properties **uint16, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(query) - if hr != nil { - return - } - return _hcnQueryRouteProperties(route, _p0, properties, result) -} - -func _hcnQueryRouteProperties(route hcnRoute, query *uint16, properties **uint16, result **uint16) (hr error) { - if hr = procHcnQuerySdnRouteProperties.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall6(procHcnQuerySdnRouteProperties.Addr(), 4, uintptr(route), uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result)), 0, 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcnDeleteRoute(id *_guid, result **uint16) (hr error) { - if hr = procHcnDeleteSdnRoute.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcnDeleteSdnRoute.Addr(), 2, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(result)), 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcnCloseRoute(route hcnRoute) (hr error) { - if hr = procHcnCloseSdnRoute.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcnCloseSdnRoute.Addr(), 1, uintptr(route), 0, 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} diff --git a/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/internal/cni/registry.go b/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/internal/cni/registry.go deleted file mode 100644 index 4a4fcea843f1..000000000000 --- a/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/internal/cni/registry.go +++ /dev/null @@ -1,110 +0,0 @@ -package cni - -import ( - "errors" - - "github.com/Microsoft/go-winio/pkg/guid" - "github.com/Microsoft/hcsshim/internal/regstate" -) - -const ( - cniRoot = "cni" - cniKey = "cfg" -) - -// PersistedNamespaceConfig is the registry version of the `NamespaceID` to UVM -// map. -type PersistedNamespaceConfig struct { - namespaceID string - stored bool - - ContainerID string - HostUniqueID guid.GUID -} - -// NewPersistedNamespaceConfig creates an in-memory namespace config that can be -// persisted to the registry. -func NewPersistedNamespaceConfig(namespaceID, containerID string, containerHostUniqueID guid.GUID) *PersistedNamespaceConfig { - return &PersistedNamespaceConfig{ - namespaceID: namespaceID, - ContainerID: containerID, - HostUniqueID: containerHostUniqueID, - } -} - -// LoadPersistedNamespaceConfig loads a persisted config from the registry that matches -// `namespaceID`. If not found returns `regstate.NotFoundError` -func LoadPersistedNamespaceConfig(namespaceID string) (*PersistedNamespaceConfig, error) { - sk, err := regstate.Open(cniRoot, false) - if err != nil { - return nil, err - } - defer sk.Close() - - pnc := PersistedNamespaceConfig{ - namespaceID: namespaceID, - stored: true, - } - if err := sk.Get(namespaceID, cniKey, &pnc); err != nil { - return nil, err - } - return &pnc, nil -} - -// Store stores or updates the in-memory config to its registry state. If the -// store failes returns the store error. -func (pnc *PersistedNamespaceConfig) Store() error { - if pnc.namespaceID == "" { - return errors.New("invalid namespaceID ''") - } - if pnc.ContainerID == "" { - return errors.New("invalid containerID ''") - } - empty := guid.GUID{} - if pnc.HostUniqueID == empty { - return errors.New("invalid containerHostUniqueID 'empy'") - } - sk, err := regstate.Open(cniRoot, false) - if err != nil { - return err - } - defer sk.Close() - - if pnc.stored { - if err := sk.Set(pnc.namespaceID, cniKey, pnc); err != nil { - return err - } - } else { - if err := sk.Create(pnc.namespaceID, cniKey, pnc); err != nil { - return err - } - } - pnc.stored = true - return nil -} - -// Remove removes any persisted state associated with this config. If the config -// is not found in the registery `Remove` returns no error. -func (pnc *PersistedNamespaceConfig) Remove() error { - if pnc.stored { - sk, err := regstate.Open(cniRoot, false) - if err != nil { - if regstate.IsNotFoundError(err) { - pnc.stored = false - return nil - } - return err - } - defer sk.Close() - - if err := sk.Remove(pnc.namespaceID); err != nil { - if regstate.IsNotFoundError(err) { - pnc.stored = false - return nil - } - return err - } - } - pnc.stored = false - return nil -} diff --git a/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/internal/regstate/regstate.go b/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/internal/regstate/regstate.go deleted file mode 100644 index 6086c1dc523f..000000000000 --- a/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/internal/regstate/regstate.go +++ /dev/null @@ -1,288 +0,0 @@ -package regstate - -import ( - "encoding/json" - "fmt" - "net/url" - "os" - "path/filepath" - "reflect" - "syscall" - - "golang.org/x/sys/windows" - "golang.org/x/sys/windows/registry" -) - -//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go regstate.go - -//sys regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) = advapi32.RegCreateKeyExW - -const ( - _REG_OPTION_VOLATILE = 1 - - _REG_OPENED_EXISTING_KEY = 2 -) - -type Key struct { - registry.Key - Name string -} - -var localMachine = &Key{registry.LOCAL_MACHINE, "HKEY_LOCAL_MACHINE"} -var localUser = &Key{registry.CURRENT_USER, "HKEY_CURRENT_USER"} - -var rootPath = `SOFTWARE\Microsoft\runhcs` - -type NotFoundError struct { - Id string -} - -func (err *NotFoundError) Error() string { - return fmt.Sprintf("ID '%s' was not found", err.Id) -} - -func IsNotFoundError(err error) bool { - _, ok := err.(*NotFoundError) - return ok -} - -type NoStateError struct { - ID string - Key string -} - -func (err *NoStateError) Error() string { - return fmt.Sprintf("state '%s' is not present for ID '%s'", err.Key, err.ID) -} - -func createVolatileKey(k *Key, path string, access uint32) (newk *Key, openedExisting bool, err error) { - var ( - h syscall.Handle - d uint32 - ) - fullpath := filepath.Join(k.Name, path) - pathPtr, _ := windows.UTF16PtrFromString(path) - err = regCreateKeyEx(syscall.Handle(k.Key), pathPtr, 0, nil, _REG_OPTION_VOLATILE, access, nil, &h, &d) - if err != nil { - return nil, false, &os.PathError{Op: "RegCreateKeyEx", Path: fullpath, Err: err} - } - return &Key{registry.Key(h), fullpath}, d == _REG_OPENED_EXISTING_KEY, nil -} - -func hive(perUser bool) *Key { - r := localMachine - if perUser { - r = localUser - } - return r -} - -func Open(root string, perUser bool) (*Key, error) { - k, _, err := createVolatileKey(hive(perUser), rootPath, registry.ALL_ACCESS) - if err != nil { - return nil, err - } - defer k.Close() - - k2, _, err := createVolatileKey(k, url.PathEscape(root), registry.ALL_ACCESS) - if err != nil { - return nil, err - } - return k2, nil -} - -func RemoveAll(root string, perUser bool) error { - k, err := hive(perUser).open(rootPath) - if err != nil { - return err - } - defer k.Close() - r, err := k.open(url.PathEscape(root)) - if err != nil { - return err - } - defer r.Close() - ids, err := r.Enumerate() - if err != nil { - return err - } - for _, id := range ids { - err = r.Remove(id) - if err != nil { - return err - } - } - r.Close() - return k.Remove(root) -} - -func (k *Key) Close() error { - err := k.Key.Close() - k.Key = 0 - return err -} - -func (k *Key) Enumerate() ([]string, error) { - escapedIDs, err := k.ReadSubKeyNames(0) - if err != nil { - return nil, err - } - var ids []string - for _, e := range escapedIDs { - id, err := url.PathUnescape(e) - if err == nil { - ids = append(ids, id) - } - } - return ids, nil -} - -func (k *Key) open(name string) (*Key, error) { - fullpath := filepath.Join(k.Name, name) - nk, err := registry.OpenKey(k.Key, name, registry.ALL_ACCESS) - if err != nil { - return nil, &os.PathError{Op: "RegOpenKey", Path: fullpath, Err: err} - } - return &Key{nk, fullpath}, nil -} - -func (k *Key) openid(id string) (*Key, error) { - escaped := url.PathEscape(id) - fullpath := filepath.Join(k.Name, escaped) - nk, err := k.open(escaped) - if perr, ok := err.(*os.PathError); ok && perr.Err == syscall.ERROR_FILE_NOT_FOUND { - return nil, &NotFoundError{id} - } - if err != nil { - return nil, &os.PathError{Op: "RegOpenKey", Path: fullpath, Err: err} - } - return nk, nil -} - -func (k *Key) Remove(id string) error { - escaped := url.PathEscape(id) - err := registry.DeleteKey(k.Key, escaped) - if err != nil { - if err == syscall.ERROR_FILE_NOT_FOUND { - return &NotFoundError{id} - } - return &os.PathError{Op: "RegDeleteKey", Path: filepath.Join(k.Name, escaped), Err: err} - } - return nil -} - -func (k *Key) set(id string, create bool, key string, state interface{}) error { - var sk *Key - var err error - if create { - var existing bool - eid := url.PathEscape(id) - sk, existing, err = createVolatileKey(k, eid, registry.ALL_ACCESS) - if err != nil { - return err - } - defer sk.Close() - if existing { - sk.Close() - return fmt.Errorf("container %s already exists", id) - } - } else { - sk, err = k.openid(id) - if err != nil { - return err - } - defer sk.Close() - } - switch reflect.TypeOf(state).Kind() { - case reflect.Bool: - v := uint32(0) - if state.(bool) { - v = 1 - } - err = sk.SetDWordValue(key, v) - case reflect.Int: - err = sk.SetQWordValue(key, uint64(state.(int))) - case reflect.String: - err = sk.SetStringValue(key, state.(string)) - default: - var js []byte - js, err = json.Marshal(state) - if err != nil { - return err - } - err = sk.SetBinaryValue(key, js) - } - if err != nil { - if err == syscall.ERROR_FILE_NOT_FOUND { - return &NoStateError{id, key} - } - return &os.PathError{Op: "RegSetValueEx", Path: sk.Name + ":" + key, Err: err} - } - return nil -} - -func (k *Key) Create(id, key string, state interface{}) error { - return k.set(id, true, key, state) -} - -func (k *Key) Set(id, key string, state interface{}) error { - return k.set(id, false, key, state) -} - -func (k *Key) Clear(id, key string) error { - sk, err := k.openid(id) - if err != nil { - return err - } - defer sk.Close() - err = sk.DeleteValue(key) - if err != nil { - if err == syscall.ERROR_FILE_NOT_FOUND { - return &NoStateError{id, key} - } - return &os.PathError{Op: "RegDeleteValue", Path: sk.Name + ":" + key, Err: err} - } - return nil -} - -func (k *Key) Get(id, key string, state interface{}) error { - sk, err := k.openid(id) - if err != nil { - return err - } - defer sk.Close() - - var js []byte - switch reflect.TypeOf(state).Elem().Kind() { - case reflect.Bool: - var v uint64 - v, _, err = sk.GetIntegerValue(key) - if err == nil { - *state.(*bool) = v != 0 - } - case reflect.Int: - var v uint64 - v, _, err = sk.GetIntegerValue(key) - if err == nil { - *state.(*int) = int(v) - } - case reflect.String: - var v string - v, _, err = sk.GetStringValue(key) - if err == nil { - *state.(*string) = string(v) - } - default: - js, _, err = sk.GetBinaryValue(key) - } - if err != nil { - if err == syscall.ERROR_FILE_NOT_FOUND { - return &NoStateError{id, key} - } - return &os.PathError{Op: "RegQueryValueEx", Path: sk.Name + ":" + key, Err: err} - } - if js != nil { - err = json.Unmarshal(js, state) - } - return err -} diff --git a/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/internal/regstate/zsyscall_windows.go b/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/internal/regstate/zsyscall_windows.go deleted file mode 100644 index 4e349ad49849..000000000000 --- a/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/internal/regstate/zsyscall_windows.go +++ /dev/null @@ -1,51 +0,0 @@ -// Code generated by 'go generate'; DO NOT EDIT. - -package regstate - -import ( - "syscall" - "unsafe" - - "golang.org/x/sys/windows" -) - -var _ unsafe.Pointer - -// Do the interface allocations only once for common -// Errno values. -const ( - errnoERROR_IO_PENDING = 997 -) - -var ( - errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) -) - -// errnoErr returns common boxed Errno values, to prevent -// allocations at runtime. -func errnoErr(e syscall.Errno) error { - switch e { - case 0: - return nil - case errnoERROR_IO_PENDING: - return errERROR_IO_PENDING - } - // TODO: add more here, after collecting data on the common - // error values see on Windows. (perhaps when running - // all.bat?) - return e -} - -var ( - modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") - - procRegCreateKeyExW = modadvapi32.NewProc("RegCreateKeyExW") -) - -func regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) { - r0, _, _ := syscall.Syscall9(procRegCreateKeyExW.Addr(), 9, uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(reserved), uintptr(unsafe.Pointer(class)), uintptr(options), uintptr(desired), uintptr(unsafe.Pointer(sa)), uintptr(unsafe.Pointer(result)), uintptr(unsafe.Pointer(disposition))) - if r0 != 0 { - regerrno = syscall.Errno(r0) - } - return -} diff --git a/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/internal/runhcs/container.go b/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/internal/runhcs/container.go deleted file mode 100644 index a161c204e2fa..000000000000 --- a/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/internal/runhcs/container.go +++ /dev/null @@ -1,71 +0,0 @@ -package runhcs - -import ( - "bytes" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "syscall" - "time" - - "github.com/Microsoft/go-winio/pkg/guid" -) - -// ContainerState represents the platform agnostic pieces relating to a -// running container's status and state -type ContainerState struct { - // Version is the OCI version for the container - Version string `json:"ociVersion"` - // ID is the container ID - ID string `json:"id"` - // InitProcessPid is the init process id in the parent namespace - InitProcessPid int `json:"pid"` - // Status is the current status of the container, running, paused, ... - Status string `json:"status"` - // Bundle is the path on the filesystem to the bundle - Bundle string `json:"bundle"` - // Rootfs is a path to a directory containing the container's root filesystem. - Rootfs string `json:"rootfs"` - // Created is the unix timestamp for the creation time of the container in UTC - Created time.Time `json:"created"` - // Annotations is the user defined annotations added to the config. - Annotations map[string]string `json:"annotations,omitempty"` - // The owner of the state directory (the owner of the container). - Owner string `json:"owner"` -} - -// GetErrorFromPipe returns reads from `pipe` and verifies if the operation -// returned success or error. If error converts that to an error and returns. If -// `p` is not nill will issue a `Kill` and `Wait` for exit. -func GetErrorFromPipe(pipe io.Reader, p *os.Process) error { - serr, err := ioutil.ReadAll(pipe) - if err != nil { - return err - } - - if bytes.Equal(serr, ShimSuccess) { - return nil - } - - extra := "" - if p != nil { - _ = p.Kill() - state, err := p.Wait() - if err != nil { - panic(err) - } - extra = fmt.Sprintf(", exit code %d", state.Sys().(syscall.WaitStatus).ExitCode) - } - if len(serr) == 0 { - return fmt.Errorf("unknown shim failure%s", extra) - } - - return errors.New(string(serr)) -} - -// VMPipePath returns the named pipe path for the vm shim. -func VMPipePath(hostUniqueID guid.GUID) string { - return SafePipePath("runhcs-vm-" + hostUniqueID.String()) -} diff --git a/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/internal/runhcs/util.go b/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/internal/runhcs/util.go deleted file mode 100644 index dcbb1903b8f0..000000000000 --- a/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/internal/runhcs/util.go +++ /dev/null @@ -1,16 +0,0 @@ -package runhcs - -import "net/url" - -const ( - SafePipePrefix = `\\.\pipe\ProtectedPrefix\Administrators\` -) - -// ShimSuccess is the byte stream returned on a successful operation. -var ShimSuccess = []byte{0, 'O', 'K', 0} - -func SafePipePath(name string) string { - // Use a pipe in the Administrators protected prefixed to prevent malicious - // squatting. - return SafePipePrefix + url.PathEscape(name) -} diff --git a/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/internal/runhcs/vm.go b/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/internal/runhcs/vm.go deleted file mode 100644 index 2c8957b88df7..000000000000 --- a/cluster-autoscaler/vendor/github.com/Microsoft/hcsshim/internal/runhcs/vm.go +++ /dev/null @@ -1,43 +0,0 @@ -package runhcs - -import ( - "encoding/json" - - "github.com/Microsoft/go-winio" -) - -// VMRequestOp is an operation that can be issued to a VM shim. -type VMRequestOp string - -const ( - // OpCreateContainer is a create container request. - OpCreateContainer VMRequestOp = "create" - // OpSyncNamespace is a `cni.NamespaceTypeGuest` sync request with the UVM. - OpSyncNamespace VMRequestOp = "sync" - // OpUnmountContainer is a container unmount request. - OpUnmountContainer VMRequestOp = "unmount" - // OpUnmountContainerDiskOnly is a container unmount disk request. - OpUnmountContainerDiskOnly VMRequestOp = "unmount-disk" -) - -// VMRequest is an operation request that is issued to a VM shim. -type VMRequest struct { - ID string - Op VMRequestOp -} - -// IssueVMRequest issues a request to a shim at the given pipe. -func IssueVMRequest(pipepath string, req *VMRequest) error { - pipe, err := winio.DialPipe(pipepath, nil) - if err != nil { - return err - } - defer pipe.Close() - if err := json.NewEncoder(pipe).Encode(req); err != nil { - return err - } - if err := GetErrorFromPipe(pipe, nil); err != nil { - return err - } - return nil -} diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/ARCHITECTURE.md b/cluster-autoscaler/vendor/github.com/cilium/ebpf/ARCHITECTURE.md index 6cbb31b6481e..8cd7e2486e70 100644 --- a/cluster-autoscaler/vendor/github.com/cilium/ebpf/ARCHITECTURE.md +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/ARCHITECTURE.md @@ -78,3 +78,9 @@ tend to use bpf_link to do so. Older hooks unfortunately use a combination of syscalls, netlink messages, etc. Adding support for a new link type should not pull in large dependencies like netlink, so XDP programs or tracepoints are out of scope. + +Each bpf_link_type has one corresponding Go type, e.g. `link.tracing` corresponds +to BPF_LINK_TRACING. In general, these types should be unexported as long as they +don't export methods outside of the Link interface. Each Go type may have multiple +exported constructors. For example `AttachTracing` and `AttachLSM` create a +tracing link, but are distinct functions since they may require different arguments. diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/MAINTAINERS.md b/cluster-autoscaler/vendor/github.com/cilium/ebpf/MAINTAINERS.md new file mode 100644 index 000000000000..9c18e7e76f54 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/MAINTAINERS.md @@ -0,0 +1,8 @@ +# Maintainers + + * [Lorenz Bauer] + * [Timo Beckers] (Isovalent) + + +[Lorenz Bauer]: https://github.com/lmb +[Timo Beckers]: https://github.com/ti-mo diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/Makefile b/cluster-autoscaler/vendor/github.com/cilium/ebpf/Makefile index 0bc15c0810cb..2d5f04c370eb 100644 --- a/cluster-autoscaler/vendor/github.com/cilium/ebpf/Makefile +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/Makefile @@ -1,17 +1,27 @@ # The development version of clang is distributed as the 'clang' binary, # while stable/released versions have a version number attached. # Pin the default clang to a stable version. -CLANG ?= clang-12 -CFLAGS := -target bpf -O2 -g -Wall -Werror $(CFLAGS) +CLANG ?= clang-14 +STRIP ?= llvm-strip-14 +OBJCOPY ?= llvm-objcopy-14 +CFLAGS := -O2 -g -Wall -Werror $(CFLAGS) + +CI_KERNEL_URL ?= https://github.com/cilium/ci-kernels/raw/master/ # Obtain an absolute path to the directory of the Makefile. # Assume the Makefile is in the root of the repository. REPODIR := $(shell dirname $(realpath $(firstword $(MAKEFILE_LIST)))) UIDGID := $(shell stat -c '%u:%g' ${REPODIR}) +# Prefer podman if installed, otherwise use docker. +# Note: Setting the var at runtime will always override. +CONTAINER_ENGINE ?= $(if $(shell command -v podman), podman, docker) +CONTAINER_RUN_ARGS ?= $(if $(filter ${CONTAINER_ENGINE}, podman), --log-driver=none, --user "${UIDGID}") + IMAGE := $(shell cat ${REPODIR}/testdata/docker/IMAGE) VERSION := $(shell cat ${REPODIR}/testdata/docker/VERSION) + # clang <8 doesn't tag relocs properly (STT_NOTYPE) # clang 9 is the first version emitting BTF TARGETS := \ @@ -26,48 +36,75 @@ TARGETS := \ testdata/strings \ testdata/freplace \ testdata/iproute2_map_compat \ - internal/btf/testdata/relocs + testdata/map_spin_lock \ + testdata/subprog_reloc \ + testdata/fwd_decl \ + btf/testdata/relocs \ + btf/testdata/relocs_read \ + btf/testdata/relocs_read_tgt -.PHONY: all clean docker-all docker-shell +.PHONY: all clean container-all container-shell generate -.DEFAULT_TARGET = docker-all +.DEFAULT_TARGET = container-all -# Build all ELF binaries using a Dockerized LLVM toolchain. -docker-all: - docker run --rm --user "${UIDGID}" \ +# Build all ELF binaries using a containerized LLVM toolchain. +container-all: + ${CONTAINER_ENGINE} run --rm ${CONTAINER_RUN_ARGS} \ -v "${REPODIR}":/ebpf -w /ebpf --env MAKEFLAGS \ --env CFLAGS="-fdebug-prefix-map=/ebpf=." \ + --env HOME="/tmp" \ "${IMAGE}:${VERSION}" \ - make all + $(MAKE) all -# (debug) Drop the user into a shell inside the Docker container as root. -docker-shell: - docker run --rm -ti \ +# (debug) Drop the user into a shell inside the container as root. +container-shell: + ${CONTAINER_ENGINE} run --rm -ti \ -v "${REPODIR}":/ebpf -w /ebpf \ "${IMAGE}:${VERSION}" clean: -$(RM) testdata/*.elf - -$(RM) internal/btf/testdata/*.elf + -$(RM) btf/testdata/*.elf -all: $(addsuffix -el.elf,$(TARGETS)) $(addsuffix -eb.elf,$(TARGETS)) +format: + find . -type f -name "*.c" | xargs clang-format -i + +all: format $(addsuffix -el.elf,$(TARGETS)) $(addsuffix -eb.elf,$(TARGETS)) generate ln -srf testdata/loader-$(CLANG)-el.elf testdata/loader-el.elf ln -srf testdata/loader-$(CLANG)-eb.elf testdata/loader-eb.elf +# $BPF_CLANG is used in go:generate invocations. +generate: export BPF_CLANG := $(CLANG) +generate: export BPF_CFLAGS := $(CFLAGS) +generate: + go generate ./cmd/bpf2go/test + go generate ./internal/sys + cd examples/ && go generate ./... + testdata/loader-%-el.elf: testdata/loader.c - $* $(CFLAGS) -mlittle-endian -c $< -o $@ + $* $(CFLAGS) -target bpfel -c $< -o $@ + $(STRIP) -g $@ testdata/loader-%-eb.elf: testdata/loader.c - $* $(CFLAGS) -mbig-endian -c $< -o $@ + $* $(CFLAGS) -target bpfeb -c $< -o $@ + $(STRIP) -g $@ %-el.elf: %.c - $(CLANG) $(CFLAGS) -mlittle-endian -c $< -o $@ + $(CLANG) $(CFLAGS) -target bpfel -c $< -o $@ + $(STRIP) -g $@ %-eb.elf : %.c - $(CLANG) $(CFLAGS) -mbig-endian -c $< -o $@ + $(CLANG) $(CFLAGS) -target bpfeb -c $< -o $@ + $(STRIP) -g $@ -# Usage: make VMLINUX=/path/to/vmlinux vmlinux-btf -.PHONY: vmlinux-btf -vmlinux-btf: internal/btf/testdata/vmlinux-btf.gz -internal/btf/testdata/vmlinux-btf.gz: $(VMLINUX) - objcopy --dump-section .BTF=/dev/stdout "$<" /dev/null | gzip > "$@" +.PHONY: generate-btf +generate-btf: KERNEL_VERSION?=5.18 +generate-btf: + $(eval TMP := $(shell mktemp -d)) + curl -fL "$(CI_KERNEL_URL)/linux-$(KERNEL_VERSION).bz" -o "$(TMP)/bzImage" + ./testdata/extract-vmlinux "$(TMP)/bzImage" > "$(TMP)/vmlinux" + $(OBJCOPY) --dump-section .BTF=/dev/stdout "$(TMP)/vmlinux" /dev/null | gzip > "btf/testdata/vmlinux.btf.gz" + curl -fL "$(CI_KERNEL_URL)/linux-$(KERNEL_VERSION)-selftests-bpf.tgz" -o "$(TMP)/selftests.tgz" + tar -xf "$(TMP)/selftests.tgz" --to-stdout tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.ko | \ + $(OBJCOPY) --dump-section .BTF="btf/testdata/btf_testmod.btf" - /dev/null + $(RM) -r "$(TMP)" diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/README.md b/cluster-autoscaler/vendor/github.com/cilium/ebpf/README.md index 01e2fff92bbc..3e490de71101 100644 --- a/cluster-autoscaler/vendor/github.com/cilium/ebpf/README.md +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/README.md @@ -45,13 +45,17 @@ This library includes the following packages: `PERF_EVENT_ARRAY` * [ringbuf](https://pkg.go.dev/github.com/cilium/ebpf/ringbuf) allows reading from a `BPF_MAP_TYPE_RINGBUF` map - +* [features](https://pkg.go.dev/github.com/cilium/ebpf/features) implements the equivalent + of `bpftool feature probe` for discovering BPF-related kernel features using native Go. +* [rlimit](https://pkg.go.dev/github.com/cilium/ebpf/rlimit) provides a convenient API to lift + the `RLIMIT_MEMLOCK` constraint on kernels before 5.11. ## Requirements * A version of Go that is [supported by upstream](https://golang.org/doc/devel/release.html#policy) -* Linux >= 4.9. CI is run against LTS releases. +* Linux >= 4.9. CI is run against kernel.org LTS releases. 4.4 should work but is + not tested against. ## Regenerating Testdata @@ -59,6 +63,9 @@ Run `make` in the root of this repository to rebuild testdata in all subpackages. This requires Docker, as it relies on a standardized build environment to keep the build output stable. +It is possible to regenerate data using Podman by overriding the `CONTAINER_*` +variables: `CONTAINER_ENGINE=podman CONTAINER_RUN_ARGS= make`. + The toolchain image build files are kept in [testdata/docker/](testdata/docker/). ## License diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/func.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/func.go index bfa5d59c976e..a14e9e2c3ce9 100644 --- a/cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/func.go +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/func.go @@ -5,6 +5,10 @@ package asm // BuiltinFunc is a built-in eBPF function. type BuiltinFunc int32 +func (_ BuiltinFunc) Max() BuiltinFunc { + return maxBuiltinFunc - 1 +} + // eBPF built-in functions // // You can regenerate this list using the following gawk script: @@ -190,6 +194,43 @@ const ( FnSysBpf FnBtfFindByNameKind FnSysClose + FnTimerInit + FnTimerSetCallback + FnTimerStart + FnTimerCancel + FnGetFuncIp + FnGetAttachCookie + FnTaskPtRegs + FnGetBranchSnapshot + FnTraceVprintk + FnSkcToUnixSock + FnKallsymsLookupName + FnFindVma + FnLoop + FnStrncmp + FnGetFuncArg + FnGetFuncRet + FnGetFuncArgCnt + FnGetRetval + FnSetRetval + FnXdpGetBuffLen + FnXdpLoadBytes + FnXdpStoreBytes + FnCopyFromUserTask + FnSkbSetTstamp + FnImaFileHash + FnKptrXchg + FnMapLookupPercpuElem + FnSkcToMptcpSock + FnDynptrFromMem + FnRingbufReserveDynptr + FnRingbufSubmitDynptr + FnRingbufDiscardDynptr + FnDynptrRead + FnDynptrWrite + FnDynptrData + + maxBuiltinFunc ) // Call emits a function call. diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/func_string.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/func_string.go index 5a0e333639a3..b7431b7f605a 100644 --- a/cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/func_string.go +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/func_string.go @@ -177,11 +177,47 @@ func _() { _ = x[FnSysBpf-166] _ = x[FnBtfFindByNameKind-167] _ = x[FnSysClose-168] + _ = x[FnTimerInit-169] + _ = x[FnTimerSetCallback-170] + _ = x[FnTimerStart-171] + _ = x[FnTimerCancel-172] + _ = x[FnGetFuncIp-173] + _ = x[FnGetAttachCookie-174] + _ = x[FnTaskPtRegs-175] + _ = x[FnGetBranchSnapshot-176] + _ = x[FnTraceVprintk-177] + _ = x[FnSkcToUnixSock-178] + _ = x[FnKallsymsLookupName-179] + _ = x[FnFindVma-180] + _ = x[FnLoop-181] + _ = x[FnStrncmp-182] + _ = x[FnGetFuncArg-183] + _ = x[FnGetFuncRet-184] + _ = x[FnGetFuncArgCnt-185] + _ = x[FnGetRetval-186] + _ = x[FnSetRetval-187] + _ = x[FnXdpGetBuffLen-188] + _ = x[FnXdpLoadBytes-189] + _ = x[FnXdpStoreBytes-190] + _ = x[FnCopyFromUserTask-191] + _ = x[FnSkbSetTstamp-192] + _ = x[FnImaFileHash-193] + _ = x[FnKptrXchg-194] + _ = x[FnMapLookupPercpuElem-195] + _ = x[FnSkcToMptcpSock-196] + _ = x[FnDynptrFromMem-197] + _ = x[FnRingbufReserveDynptr-198] + _ = x[FnRingbufSubmitDynptr-199] + _ = x[FnRingbufDiscardDynptr-200] + _ = x[FnDynptrRead-201] + _ = x[FnDynptrWrite-202] + _ = x[FnDynptrData-203] + _ = x[maxBuiltinFunc-204] } -const _BuiltinFunc_name = "FnUnspecFnMapLookupElemFnMapUpdateElemFnMapDeleteElemFnProbeReadFnKtimeGetNsFnTracePrintkFnGetPrandomU32FnGetSmpProcessorIdFnSkbStoreBytesFnL3CsumReplaceFnL4CsumReplaceFnTailCallFnCloneRedirectFnGetCurrentPidTgidFnGetCurrentUidGidFnGetCurrentCommFnGetCgroupClassidFnSkbVlanPushFnSkbVlanPopFnSkbGetTunnelKeyFnSkbSetTunnelKeyFnPerfEventReadFnRedirectFnGetRouteRealmFnPerfEventOutputFnSkbLoadBytesFnGetStackidFnCsumDiffFnSkbGetTunnelOptFnSkbSetTunnelOptFnSkbChangeProtoFnSkbChangeTypeFnSkbUnderCgroupFnGetHashRecalcFnGetCurrentTaskFnProbeWriteUserFnCurrentTaskUnderCgroupFnSkbChangeTailFnSkbPullDataFnCsumUpdateFnSetHashInvalidFnGetNumaNodeIdFnSkbChangeHeadFnXdpAdjustHeadFnProbeReadStrFnGetSocketCookieFnGetSocketUidFnSetHashFnSetsockoptFnSkbAdjustRoomFnRedirectMapFnSkRedirectMapFnSockMapUpdateFnXdpAdjustMetaFnPerfEventReadValueFnPerfProgReadValueFnGetsockoptFnOverrideReturnFnSockOpsCbFlagsSetFnMsgRedirectMapFnMsgApplyBytesFnMsgCorkBytesFnMsgPullDataFnBindFnXdpAdjustTailFnSkbGetXfrmStateFnGetStackFnSkbLoadBytesRelativeFnFibLookupFnSockHashUpdateFnMsgRedirectHashFnSkRedirectHashFnLwtPushEncapFnLwtSeg6StoreBytesFnLwtSeg6AdjustSrhFnLwtSeg6ActionFnRcRepeatFnRcKeydownFnSkbCgroupIdFnGetCurrentCgroupIdFnGetLocalStorageFnSkSelectReuseportFnSkbAncestorCgroupIdFnSkLookupTcpFnSkLookupUdpFnSkReleaseFnMapPushElemFnMapPopElemFnMapPeekElemFnMsgPushDataFnMsgPopDataFnRcPointerRelFnSpinLockFnSpinUnlockFnSkFullsockFnTcpSockFnSkbEcnSetCeFnGetListenerSockFnSkcLookupTcpFnTcpCheckSyncookieFnSysctlGetNameFnSysctlGetCurrentValueFnSysctlGetNewValueFnSysctlSetNewValueFnStrtolFnStrtoulFnSkStorageGetFnSkStorageDeleteFnSendSignalFnTcpGenSyncookieFnSkbOutputFnProbeReadUserFnProbeReadKernelFnProbeReadUserStrFnProbeReadKernelStrFnTcpSendAckFnSendSignalThreadFnJiffies64FnReadBranchRecordsFnGetNsCurrentPidTgidFnXdpOutputFnGetNetnsCookieFnGetCurrentAncestorCgroupIdFnSkAssignFnKtimeGetBootNsFnSeqPrintfFnSeqWriteFnSkCgroupIdFnSkAncestorCgroupIdFnRingbufOutputFnRingbufReserveFnRingbufSubmitFnRingbufDiscardFnRingbufQueryFnCsumLevelFnSkcToTcp6SockFnSkcToTcpSockFnSkcToTcpTimewaitSockFnSkcToTcpRequestSockFnSkcToUdp6SockFnGetTaskStackFnLoadHdrOptFnStoreHdrOptFnReserveHdrOptFnInodeStorageGetFnInodeStorageDeleteFnDPathFnCopyFromUserFnSnprintfBtfFnSeqPrintfBtfFnSkbCgroupClassidFnRedirectNeighFnPerCpuPtrFnThisCpuPtrFnRedirectPeerFnTaskStorageGetFnTaskStorageDeleteFnGetCurrentTaskBtfFnBprmOptsSetFnKtimeGetCoarseNsFnImaInodeHashFnSockFromFileFnCheckMtuFnForEachMapElemFnSnprintfFnSysBpfFnBtfFindByNameKindFnSysClose" +const _BuiltinFunc_name = "FnUnspecFnMapLookupElemFnMapUpdateElemFnMapDeleteElemFnProbeReadFnKtimeGetNsFnTracePrintkFnGetPrandomU32FnGetSmpProcessorIdFnSkbStoreBytesFnL3CsumReplaceFnL4CsumReplaceFnTailCallFnCloneRedirectFnGetCurrentPidTgidFnGetCurrentUidGidFnGetCurrentCommFnGetCgroupClassidFnSkbVlanPushFnSkbVlanPopFnSkbGetTunnelKeyFnSkbSetTunnelKeyFnPerfEventReadFnRedirectFnGetRouteRealmFnPerfEventOutputFnSkbLoadBytesFnGetStackidFnCsumDiffFnSkbGetTunnelOptFnSkbSetTunnelOptFnSkbChangeProtoFnSkbChangeTypeFnSkbUnderCgroupFnGetHashRecalcFnGetCurrentTaskFnProbeWriteUserFnCurrentTaskUnderCgroupFnSkbChangeTailFnSkbPullDataFnCsumUpdateFnSetHashInvalidFnGetNumaNodeIdFnSkbChangeHeadFnXdpAdjustHeadFnProbeReadStrFnGetSocketCookieFnGetSocketUidFnSetHashFnSetsockoptFnSkbAdjustRoomFnRedirectMapFnSkRedirectMapFnSockMapUpdateFnXdpAdjustMetaFnPerfEventReadValueFnPerfProgReadValueFnGetsockoptFnOverrideReturnFnSockOpsCbFlagsSetFnMsgRedirectMapFnMsgApplyBytesFnMsgCorkBytesFnMsgPullDataFnBindFnXdpAdjustTailFnSkbGetXfrmStateFnGetStackFnSkbLoadBytesRelativeFnFibLookupFnSockHashUpdateFnMsgRedirectHashFnSkRedirectHashFnLwtPushEncapFnLwtSeg6StoreBytesFnLwtSeg6AdjustSrhFnLwtSeg6ActionFnRcRepeatFnRcKeydownFnSkbCgroupIdFnGetCurrentCgroupIdFnGetLocalStorageFnSkSelectReuseportFnSkbAncestorCgroupIdFnSkLookupTcpFnSkLookupUdpFnSkReleaseFnMapPushElemFnMapPopElemFnMapPeekElemFnMsgPushDataFnMsgPopDataFnRcPointerRelFnSpinLockFnSpinUnlockFnSkFullsockFnTcpSockFnSkbEcnSetCeFnGetListenerSockFnSkcLookupTcpFnTcpCheckSyncookieFnSysctlGetNameFnSysctlGetCurrentValueFnSysctlGetNewValueFnSysctlSetNewValueFnStrtolFnStrtoulFnSkStorageGetFnSkStorageDeleteFnSendSignalFnTcpGenSyncookieFnSkbOutputFnProbeReadUserFnProbeReadKernelFnProbeReadUserStrFnProbeReadKernelStrFnTcpSendAckFnSendSignalThreadFnJiffies64FnReadBranchRecordsFnGetNsCurrentPidTgidFnXdpOutputFnGetNetnsCookieFnGetCurrentAncestorCgroupIdFnSkAssignFnKtimeGetBootNsFnSeqPrintfFnSeqWriteFnSkCgroupIdFnSkAncestorCgroupIdFnRingbufOutputFnRingbufReserveFnRingbufSubmitFnRingbufDiscardFnRingbufQueryFnCsumLevelFnSkcToTcp6SockFnSkcToTcpSockFnSkcToTcpTimewaitSockFnSkcToTcpRequestSockFnSkcToUdp6SockFnGetTaskStackFnLoadHdrOptFnStoreHdrOptFnReserveHdrOptFnInodeStorageGetFnInodeStorageDeleteFnDPathFnCopyFromUserFnSnprintfBtfFnSeqPrintfBtfFnSkbCgroupClassidFnRedirectNeighFnPerCpuPtrFnThisCpuPtrFnRedirectPeerFnTaskStorageGetFnTaskStorageDeleteFnGetCurrentTaskBtfFnBprmOptsSetFnKtimeGetCoarseNsFnImaInodeHashFnSockFromFileFnCheckMtuFnForEachMapElemFnSnprintfFnSysBpfFnBtfFindByNameKindFnSysCloseFnTimerInitFnTimerSetCallbackFnTimerStartFnTimerCancelFnGetFuncIpFnGetAttachCookieFnTaskPtRegsFnGetBranchSnapshotFnTraceVprintkFnSkcToUnixSockFnKallsymsLookupNameFnFindVmaFnLoopFnStrncmpFnGetFuncArgFnGetFuncRetFnGetFuncArgCntFnGetRetvalFnSetRetvalFnXdpGetBuffLenFnXdpLoadBytesFnXdpStoreBytesFnCopyFromUserTaskFnSkbSetTstampFnImaFileHashFnKptrXchgFnMapLookupPercpuElemFnSkcToMptcpSockFnDynptrFromMemFnRingbufReserveDynptrFnRingbufSubmitDynptrFnRingbufDiscardDynptrFnDynptrReadFnDynptrWriteFnDynptrDatamaxBuiltinFunc" -var _BuiltinFunc_index = [...]uint16{0, 8, 23, 38, 53, 64, 76, 89, 104, 123, 138, 153, 168, 178, 193, 212, 230, 246, 264, 277, 289, 306, 323, 338, 348, 363, 380, 394, 406, 416, 433, 450, 466, 481, 497, 512, 528, 544, 568, 583, 596, 608, 624, 639, 654, 669, 683, 700, 714, 723, 735, 750, 763, 778, 793, 808, 828, 847, 859, 875, 894, 910, 925, 939, 952, 958, 973, 990, 1000, 1022, 1033, 1049, 1066, 1082, 1096, 1115, 1133, 1148, 1158, 1169, 1182, 1202, 1219, 1238, 1259, 1272, 1285, 1296, 1309, 1321, 1334, 1347, 1359, 1373, 1383, 1395, 1407, 1416, 1429, 1446, 1460, 1479, 1494, 1517, 1536, 1555, 1563, 1572, 1586, 1603, 1615, 1632, 1643, 1658, 1675, 1693, 1713, 1725, 1743, 1754, 1773, 1794, 1805, 1821, 1849, 1859, 1875, 1886, 1896, 1908, 1928, 1943, 1959, 1974, 1990, 2004, 2015, 2030, 2044, 2066, 2087, 2102, 2116, 2128, 2141, 2156, 2173, 2193, 2200, 2214, 2227, 2241, 2259, 2274, 2285, 2297, 2311, 2327, 2346, 2365, 2378, 2396, 2410, 2424, 2434, 2450, 2460, 2468, 2487, 2497} +var _BuiltinFunc_index = [...]uint16{0, 8, 23, 38, 53, 64, 76, 89, 104, 123, 138, 153, 168, 178, 193, 212, 230, 246, 264, 277, 289, 306, 323, 338, 348, 363, 380, 394, 406, 416, 433, 450, 466, 481, 497, 512, 528, 544, 568, 583, 596, 608, 624, 639, 654, 669, 683, 700, 714, 723, 735, 750, 763, 778, 793, 808, 828, 847, 859, 875, 894, 910, 925, 939, 952, 958, 973, 990, 1000, 1022, 1033, 1049, 1066, 1082, 1096, 1115, 1133, 1148, 1158, 1169, 1182, 1202, 1219, 1238, 1259, 1272, 1285, 1296, 1309, 1321, 1334, 1347, 1359, 1373, 1383, 1395, 1407, 1416, 1429, 1446, 1460, 1479, 1494, 1517, 1536, 1555, 1563, 1572, 1586, 1603, 1615, 1632, 1643, 1658, 1675, 1693, 1713, 1725, 1743, 1754, 1773, 1794, 1805, 1821, 1849, 1859, 1875, 1886, 1896, 1908, 1928, 1943, 1959, 1974, 1990, 2004, 2015, 2030, 2044, 2066, 2087, 2102, 2116, 2128, 2141, 2156, 2173, 2193, 2200, 2214, 2227, 2241, 2259, 2274, 2285, 2297, 2311, 2327, 2346, 2365, 2378, 2396, 2410, 2424, 2434, 2450, 2460, 2468, 2487, 2497, 2508, 2526, 2538, 2551, 2562, 2579, 2591, 2610, 2624, 2639, 2659, 2668, 2674, 2683, 2695, 2707, 2722, 2733, 2744, 2759, 2773, 2788, 2806, 2820, 2833, 2843, 2864, 2880, 2895, 2917, 2938, 2960, 2972, 2985, 2997, 3011} func (i BuiltinFunc) String() string { if i < 0 || i >= BuiltinFunc(len(_BuiltinFunc_index)-1) { diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/instruction.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/instruction.go index 64d717d156d5..f17d88b51869 100644 --- a/cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/instruction.go +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/instruction.go @@ -8,8 +8,10 @@ import ( "fmt" "io" "math" + "sort" "strings" + "github.com/cilium/ebpf/internal/sys" "github.com/cilium/ebpf/internal/unix" ) @@ -19,6 +21,10 @@ const InstructionSize = 8 // RawInstructionOffset is an offset in units of raw BPF instructions. type RawInstructionOffset uint64 +var ErrUnreferencedSymbol = errors.New("unreferenced symbol") +var ErrUnsatisfiedMapReference = errors.New("unsatisfied map reference") +var ErrUnsatisfiedProgramReference = errors.New("unsatisfied program reference") + // Bytes returns the offset of an instruction in bytes. func (rio RawInstructionOffset) Bytes() uint64 { return uint64(rio) * InstructionSize @@ -26,50 +32,57 @@ func (rio RawInstructionOffset) Bytes() uint64 { // Instruction is a single eBPF instruction. type Instruction struct { - OpCode OpCode - Dst Register - Src Register - Offset int16 - Constant int64 - Reference string - Symbol string -} - -// Sym creates a symbol. -func (ins Instruction) Sym(name string) Instruction { - ins.Symbol = name - return ins + OpCode OpCode + Dst Register + Src Register + Offset int16 + Constant int64 + + // Metadata contains optional metadata about this instruction. + Metadata Metadata } // Unmarshal decodes a BPF instruction. func (ins *Instruction) Unmarshal(r io.Reader, bo binary.ByteOrder) (uint64, error) { - var bi bpfInstruction - err := binary.Read(r, bo, &bi) - if err != nil { + data := make([]byte, InstructionSize) + if _, err := io.ReadFull(r, data); err != nil { return 0, err } - ins.OpCode = bi.OpCode - ins.Offset = bi.Offset - ins.Constant = int64(bi.Constant) - ins.Dst, ins.Src, err = bi.Registers.Unmarshal(bo) - if err != nil { - return 0, fmt.Errorf("can't unmarshal registers: %s", err) + ins.OpCode = OpCode(data[0]) + + regs := data[1] + switch bo { + case binary.LittleEndian: + ins.Dst, ins.Src = Register(regs&0xF), Register(regs>>4) + case binary.BigEndian: + ins.Dst, ins.Src = Register(regs>>4), Register(regs&0xf) } - if !bi.OpCode.IsDWordLoad() { + ins.Offset = int16(bo.Uint16(data[2:4])) + // Convert to int32 before widening to int64 + // to ensure the signed bit is carried over. + ins.Constant = int64(int32(bo.Uint32(data[4:8]))) + + if !ins.OpCode.IsDWordLoad() { return InstructionSize, nil } - var bi2 bpfInstruction - if err := binary.Read(r, bo, &bi2); err != nil { + // Pull another instruction from the stream to retrieve the second + // half of the 64-bit immediate value. + if _, err := io.ReadFull(r, data); err != nil { // No Wrap, to avoid io.EOF clash return 0, errors.New("64bit immediate is missing second half") } - if bi2.OpCode != 0 || bi2.Offset != 0 || bi2.Registers != 0 { + + // Require that all fields other than the value are zero. + if bo.Uint32(data[0:4]) != 0 { return 0, errors.New("64bit immediate has non-zero fields") } - ins.Constant = int64(uint64(uint32(bi2.Constant))<<32 | uint64(uint32(bi.Constant))) + + cons1 := uint32(ins.Constant) + cons2 := int32(bo.Uint32(data[4:8])) + ins.Constant = int64(cons2)<<32 | int64(cons1) return 2 * InstructionSize, nil } @@ -93,14 +106,12 @@ func (ins Instruction) Marshal(w io.Writer, bo binary.ByteOrder) (uint64, error) return 0, fmt.Errorf("can't marshal registers: %s", err) } - bpfi := bpfInstruction{ - ins.OpCode, - regs, - ins.Offset, - cons, - } - - if err := binary.Write(w, bo, &bpfi); err != nil { + data := make([]byte, InstructionSize) + data[0] = byte(ins.OpCode) + data[1] = byte(regs) + bo.PutUint16(data[2:4], uint16(ins.Offset)) + bo.PutUint32(data[4:8], uint32(cons)) + if _, err := w.Write(data); err != nil { return 0, err } @@ -108,42 +119,76 @@ func (ins Instruction) Marshal(w io.Writer, bo binary.ByteOrder) (uint64, error) return InstructionSize, nil } - bpfi = bpfInstruction{ - Constant: int32(ins.Constant >> 32), - } - - if err := binary.Write(w, bo, &bpfi); err != nil { + // The first half of the second part of a double-wide instruction + // must be zero. The second half carries the value. + bo.PutUint32(data[0:4], 0) + bo.PutUint32(data[4:8], uint32(ins.Constant>>32)) + if _, err := w.Write(data); err != nil { return 0, err } return 2 * InstructionSize, nil } +// AssociateMap associates a Map with this Instruction. +// +// Implicitly clears the Instruction's Reference field. +// +// Returns an error if the Instruction is not a map load. +func (ins *Instruction) AssociateMap(m FDer) error { + if !ins.IsLoadFromMap() { + return errors.New("not a load from a map") + } + + ins.Metadata.Set(referenceMeta{}, nil) + ins.Metadata.Set(mapMeta{}, m) + + return nil +} + // RewriteMapPtr changes an instruction to use a new map fd. // // Returns an error if the instruction doesn't load a map. +// +// Deprecated: use AssociateMap instead. If you cannot provide a Map, +// wrap an fd in a type implementing FDer. func (ins *Instruction) RewriteMapPtr(fd int) error { - if !ins.OpCode.IsDWordLoad() { - return fmt.Errorf("%s is not a 64 bit load", ins.OpCode) - } - - if ins.Src != PseudoMapFD && ins.Src != PseudoMapValue { + if !ins.IsLoadFromMap() { return errors.New("not a load from a map") } + ins.encodeMapFD(fd) + + return nil +} + +func (ins *Instruction) encodeMapFD(fd int) { // Preserve the offset value for direct map loads. offset := uint64(ins.Constant) & (math.MaxUint32 << 32) rawFd := uint64(uint32(fd)) ins.Constant = int64(offset | rawFd) - return nil } // MapPtr returns the map fd for this instruction. // // The result is undefined if the instruction is not a load from a map, // see IsLoadFromMap. +// +// Deprecated: use Map() instead. func (ins *Instruction) MapPtr() int { - return int(int32(uint64(ins.Constant) & math.MaxUint32)) + // If there is a map associated with the instruction, return its FD. + if fd := ins.Metadata.Get(mapMeta{}); fd != nil { + return fd.(FDer).FD() + } + + // Fall back to the fd stored in the Constant field + return ins.mapFd() +} + +// mapFd returns the map file descriptor stored in the 32 least significant +// bits of ins' Constant field. +func (ins *Instruction) mapFd() int { + return int(int32(ins.Constant)) } // RewriteMapOffset changes the offset of a direct load from a map. @@ -181,6 +226,18 @@ func (ins *Instruction) IsFunctionCall() bool { return ins.OpCode.JumpOp() == Call && ins.Src == PseudoCall } +// IsLoadOfFunctionPointer returns true if the instruction loads a function pointer. +func (ins *Instruction) IsLoadOfFunctionPointer() bool { + return ins.OpCode.IsDWordLoad() && ins.Src == PseudoFunc +} + +// IsFunctionReference returns true if the instruction references another BPF +// function, either by invoking a Call jump operation or by loading a function +// pointer. +func (ins *Instruction) IsFunctionReference() bool { + return ins.IsFunctionCall() || ins.IsLoadOfFunctionPointer() +} + // IsBuiltinCall returns true if the instruction is a built-in call, i.e. BPF helper call. func (ins *Instruction) IsBuiltinCall() bool { return ins.OpCode.JumpOp() == Call && ins.Src == R0 && ins.Dst == R0 @@ -213,21 +270,30 @@ func (ins Instruction) Format(f fmt.State, c rune) { } if ins.IsLoadFromMap() { - fd := ins.MapPtr() + fd := ins.mapFd() + m := ins.Map() switch ins.Src { case PseudoMapFD: - fmt.Fprintf(f, "LoadMapPtr dst: %s fd: %d", ins.Dst, fd) + if m != nil { + fmt.Fprintf(f, "LoadMapPtr dst: %s map: %s", ins.Dst, m) + } else { + fmt.Fprintf(f, "LoadMapPtr dst: %s fd: %d", ins.Dst, fd) + } case PseudoMapValue: - fmt.Fprintf(f, "LoadMapValue dst: %s, fd: %d off: %d", ins.Dst, fd, ins.mapOffset()) + if m != nil { + fmt.Fprintf(f, "LoadMapValue dst: %s, map: %s off: %d", ins.Dst, m, ins.mapOffset()) + } else { + fmt.Fprintf(f, "LoadMapValue dst: %s, fd: %d off: %d", ins.Dst, fd, ins.mapOffset()) + } } goto ref } fmt.Fprintf(f, "%v ", op) - switch cls := op.Class(); cls { - case LdClass, LdXClass, StClass, StXClass: + switch cls := op.Class(); { + case cls.isLoadOrStore(): switch op.Mode() { case ImmMode: fmt.Fprintf(f, "dst: %s imm: %d", ins.Dst, ins.Constant) @@ -241,7 +307,7 @@ func (ins Instruction) Format(f fmt.State, c rune) { fmt.Fprintf(f, "dst: %s src: %s", ins.Dst, ins.Src) } - case ALU64Class, ALUClass: + case cls.IsALU(): fmt.Fprintf(f, "dst: %s ", ins.Dst) if op.ALUOp() == Swap || op.Source() == ImmSource { fmt.Fprintf(f, "imm: %d", ins.Constant) @@ -249,7 +315,7 @@ func (ins Instruction) Format(f fmt.State, c rune) { fmt.Fprintf(f, "src: %s", ins.Src) } - case JumpClass: + case cls.IsJump(): switch jop := op.JumpOp(); jop { case Call: if ins.Src == PseudoCall { @@ -270,42 +336,212 @@ func (ins Instruction) Format(f fmt.State, c rune) { } ref: - if ins.Reference != "" { - fmt.Fprintf(f, " <%s>", ins.Reference) + if ins.Reference() != "" { + fmt.Fprintf(f, " <%s>", ins.Reference()) } } +func (ins Instruction) equal(other Instruction) bool { + return ins.OpCode == other.OpCode && + ins.Dst == other.Dst && + ins.Src == other.Src && + ins.Offset == other.Offset && + ins.Constant == other.Constant +} + +// Size returns the amount of bytes ins would occupy in binary form. +func (ins Instruction) Size() uint64 { + return uint64(InstructionSize * ins.OpCode.rawInstructions()) +} + +type symbolMeta struct{} + +// WithSymbol marks the Instruction as a Symbol, which other Instructions +// can point to using corresponding calls to WithReference. +func (ins Instruction) WithSymbol(name string) Instruction { + ins.Metadata.Set(symbolMeta{}, name) + return ins +} + +// Sym creates a symbol. +// +// Deprecated: use WithSymbol instead. +func (ins Instruction) Sym(name string) Instruction { + return ins.WithSymbol(name) +} + +// Symbol returns the value ins has been marked with using WithSymbol, +// otherwise returns an empty string. A symbol is often an Instruction +// at the start of a function body. +func (ins Instruction) Symbol() string { + sym, _ := ins.Metadata.Get(symbolMeta{}).(string) + return sym +} + +type referenceMeta struct{} + +// WithReference makes ins reference another Symbol or map by name. +func (ins Instruction) WithReference(ref string) Instruction { + ins.Metadata.Set(referenceMeta{}, ref) + return ins +} + +// Reference returns the Symbol or map name referenced by ins, if any. +func (ins Instruction) Reference() string { + ref, _ := ins.Metadata.Get(referenceMeta{}).(string) + return ref +} + +type mapMeta struct{} + +// Map returns the Map referenced by ins, if any. +// An Instruction will contain a Map if e.g. it references an existing, +// pinned map that was opened during ELF loading. +func (ins Instruction) Map() FDer { + fd, _ := ins.Metadata.Get(mapMeta{}).(FDer) + return fd +} + +type sourceMeta struct{} + +// WithSource adds source information about the Instruction. +func (ins Instruction) WithSource(src fmt.Stringer) Instruction { + ins.Metadata.Set(sourceMeta{}, src) + return ins +} + +// Source returns source information about the Instruction. The field is +// present when the compiler emits BTF line info about the Instruction and +// usually contains the line of source code responsible for it. +func (ins Instruction) Source() fmt.Stringer { + str, _ := ins.Metadata.Get(sourceMeta{}).(fmt.Stringer) + return str +} + +// A Comment can be passed to Instruction.WithSource to add a comment +// to an instruction. +type Comment string + +func (s Comment) String() string { + return string(s) +} + +// FDer represents a resource tied to an underlying file descriptor. +// Used as a stand-in for e.g. ebpf.Map since that type cannot be +// imported here and FD() is the only method we rely on. +type FDer interface { + FD() int +} + // Instructions is an eBPF program. type Instructions []Instruction +// Unmarshal unmarshals an Instructions from a binary instruction stream. +// All instructions in insns are replaced by instructions decoded from r. +func (insns *Instructions) Unmarshal(r io.Reader, bo binary.ByteOrder) error { + if len(*insns) > 0 { + *insns = nil + } + + var offset uint64 + for { + var ins Instruction + n, err := ins.Unmarshal(r, bo) + if errors.Is(err, io.EOF) { + break + } + if err != nil { + return fmt.Errorf("offset %d: %w", offset, err) + } + + *insns = append(*insns, ins) + offset += n + } + + return nil +} + +// Name returns the name of the function insns belongs to, if any. +func (insns Instructions) Name() string { + if len(insns) == 0 { + return "" + } + return insns[0].Symbol() +} + func (insns Instructions) String() string { return fmt.Sprint(insns) } +// Size returns the amount of bytes insns would occupy in binary form. +func (insns Instructions) Size() uint64 { + var sum uint64 + for _, ins := range insns { + sum += ins.Size() + } + return sum +} + +// AssociateMap updates all Instructions that Reference the given symbol +// to point to an existing Map m instead. +// +// Returns ErrUnreferencedSymbol error if no references to symbol are found +// in insns. If symbol is anything else than the symbol name of map (e.g. +// a bpf2bpf subprogram), an error is returned. +func (insns Instructions) AssociateMap(symbol string, m FDer) error { + if symbol == "" { + return errors.New("empty symbol") + } + + var found bool + for i := range insns { + ins := &insns[i] + if ins.Reference() != symbol { + continue + } + + if err := ins.AssociateMap(m); err != nil { + return err + } + + found = true + } + + if !found { + return fmt.Errorf("symbol %s: %w", symbol, ErrUnreferencedSymbol) + } + + return nil +} + // RewriteMapPtr rewrites all loads of a specific map pointer to a new fd. // -// Returns an error if the symbol isn't used, see IsUnreferencedSymbol. +// Returns ErrUnreferencedSymbol if the symbol isn't used. +// +// Deprecated: use AssociateMap instead. func (insns Instructions) RewriteMapPtr(symbol string, fd int) error { if symbol == "" { return errors.New("empty symbol") } - found := false + var found bool for i := range insns { ins := &insns[i] - if ins.Reference != symbol { + if ins.Reference() != symbol { continue } - if err := ins.RewriteMapPtr(fd); err != nil { - return err + if !ins.IsLoadFromMap() { + return errors.New("not a load from a map") } + ins.encodeMapFD(fd) + found = true } if !found { - return &unreferencedSymbolError{symbol} + return fmt.Errorf("symbol %s: %w", symbol, ErrUnreferencedSymbol) } return nil @@ -317,31 +553,61 @@ func (insns Instructions) SymbolOffsets() (map[string]int, error) { offsets := make(map[string]int) for i, ins := range insns { - if ins.Symbol == "" { + if ins.Symbol() == "" { continue } - if _, ok := offsets[ins.Symbol]; ok { - return nil, fmt.Errorf("duplicate symbol %s", ins.Symbol) + if _, ok := offsets[ins.Symbol()]; ok { + return nil, fmt.Errorf("duplicate symbol %s", ins.Symbol()) } - offsets[ins.Symbol] = i + offsets[ins.Symbol()] = i } return offsets, nil } +// FunctionReferences returns a set of symbol names these Instructions make +// bpf-to-bpf calls to. +func (insns Instructions) FunctionReferences() []string { + calls := make(map[string]struct{}) + for _, ins := range insns { + if ins.Constant != -1 { + // BPF-to-BPF calls have -1 constants. + continue + } + + if ins.Reference() == "" { + continue + } + + if !ins.IsFunctionReference() { + continue + } + + calls[ins.Reference()] = struct{}{} + } + + result := make([]string, 0, len(calls)) + for call := range calls { + result = append(result, call) + } + + sort.Strings(result) + return result +} + // ReferenceOffsets returns the set of references and their offset in // the instructions. func (insns Instructions) ReferenceOffsets() map[string][]int { offsets := make(map[string][]int) for i, ins := range insns { - if ins.Reference == "" { + if ins.Reference() == "" { continue } - offsets[ins.Reference] = append(offsets[ins.Reference], i) + offsets[ins.Reference()] = append(offsets[ins.Reference()], i) } return offsets @@ -392,18 +658,36 @@ func (insns Instructions) Format(f fmt.State, c rune) { iter := insns.Iterate() for iter.Next() { - if iter.Ins.Symbol != "" { - fmt.Fprintf(f, "%s%s:\n", symIndent, iter.Ins.Symbol) + if iter.Ins.Symbol() != "" { + fmt.Fprintf(f, "%s%s:\n", symIndent, iter.Ins.Symbol()) + } + if src := iter.Ins.Source(); src != nil { + line := strings.TrimSpace(src.String()) + if line != "" { + fmt.Fprintf(f, "%s%*s; %s\n", indent, offsetWidth, " ", line) + } } fmt.Fprintf(f, "%s%*d: %v\n", indent, offsetWidth, iter.Offset, iter.Ins) } } // Marshal encodes a BPF program into the kernel format. +// +// insns may be modified if there are unresolved jumps or bpf2bpf calls. +// +// Returns ErrUnsatisfiedProgramReference if there is a Reference Instruction +// without a matching Symbol Instruction within insns. func (insns Instructions) Marshal(w io.Writer, bo binary.ByteOrder) error { + if err := insns.encodeFunctionReferences(); err != nil { + return err + } + + if err := insns.encodeMapPointers(); err != nil { + return err + } + for i, ins := range insns { - _, err := ins.Marshal(w, bo) - if err != nil { + if _, err := ins.Marshal(w, bo); err != nil { return fmt.Errorf("instruction %d: %w", i, err) } } @@ -429,6 +713,95 @@ func (insns Instructions) Tag(bo binary.ByteOrder) (string, error) { return hex.EncodeToString(h.Sum(nil)[:unix.BPF_TAG_SIZE]), nil } +// encodeFunctionReferences populates the Offset (or Constant, depending on +// the instruction type) field of instructions with a Reference field to point +// to the offset of the corresponding instruction with a matching Symbol field. +// +// Only Reference Instructions that are either jumps or BPF function references +// (calls or function pointer loads) are populated. +// +// Returns ErrUnsatisfiedProgramReference if there is a Reference Instruction +// without at least one corresponding Symbol Instruction within insns. +func (insns Instructions) encodeFunctionReferences() error { + // Index the offsets of instructions tagged as a symbol. + symbolOffsets := make(map[string]RawInstructionOffset) + iter := insns.Iterate() + for iter.Next() { + ins := iter.Ins + + if ins.Symbol() == "" { + continue + } + + if _, ok := symbolOffsets[ins.Symbol()]; ok { + return fmt.Errorf("duplicate symbol %s", ins.Symbol()) + } + + symbolOffsets[ins.Symbol()] = iter.Offset + } + + // Find all instructions tagged as references to other symbols. + // Depending on the instruction type, populate their constant or offset + // fields to point to the symbol they refer to within the insn stream. + iter = insns.Iterate() + for iter.Next() { + i := iter.Index + offset := iter.Offset + ins := iter.Ins + + if ins.Reference() == "" { + continue + } + + switch { + case ins.IsFunctionReference() && ins.Constant == -1: + symOffset, ok := symbolOffsets[ins.Reference()] + if !ok { + return fmt.Errorf("%s at insn %d: symbol %q: %w", ins.OpCode, i, ins.Reference(), ErrUnsatisfiedProgramReference) + } + + ins.Constant = int64(symOffset - offset - 1) + + case ins.OpCode.Class().IsJump() && ins.Offset == -1: + symOffset, ok := symbolOffsets[ins.Reference()] + if !ok { + return fmt.Errorf("%s at insn %d: symbol %q: %w", ins.OpCode, i, ins.Reference(), ErrUnsatisfiedProgramReference) + } + + ins.Offset = int16(symOffset - offset - 1) + } + } + + return nil +} + +// encodeMapPointers finds all Map Instructions and encodes their FDs +// into their Constant fields. +func (insns Instructions) encodeMapPointers() error { + iter := insns.Iterate() + for iter.Next() { + ins := iter.Ins + + if !ins.IsLoadFromMap() { + continue + } + + m := ins.Map() + if m == nil { + continue + } + + fd := m.FD() + if fd < 0 { + return fmt.Errorf("map %s: %w", m, sys.ErrClosedFd) + } + + ins.encodeMapFD(m.FD()) + } + + return nil +} + // Iterate allows iterating a BPF program while keeping track of // various offsets. // @@ -464,13 +837,6 @@ func (iter *InstructionIterator) Next() bool { return true } -type bpfInstruction struct { - OpCode OpCode - Registers bpfRegisters - Offset int16 - Constant int32 -} - type bpfRegisters uint8 func newBPFRegisters(dst, src Register, bo binary.ByteOrder) (bpfRegisters, error) { @@ -484,28 +850,10 @@ func newBPFRegisters(dst, src Register, bo binary.ByteOrder) (bpfRegisters, erro } } -func (r bpfRegisters) Unmarshal(bo binary.ByteOrder) (dst, src Register, err error) { - switch bo { - case binary.LittleEndian: - return Register(r & 0xF), Register(r >> 4), nil - case binary.BigEndian: - return Register(r >> 4), Register(r & 0xf), nil - default: - return 0, 0, fmt.Errorf("unrecognized ByteOrder %T", bo) - } -} - -type unreferencedSymbolError struct { - symbol string -} - -func (use *unreferencedSymbolError) Error() string { - return fmt.Sprintf("unreferenced symbol %s", use.symbol) -} - // IsUnreferencedSymbol returns true if err was caused by // an unreferenced symbol. +// +// Deprecated: use errors.Is(err, asm.ErrUnreferencedSymbol). func IsUnreferencedSymbol(err error) bool { - _, ok := err.(*unreferencedSymbolError) - return ok + return errors.Is(err, ErrUnreferencedSymbol) } diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/jump.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/jump.go index 7757179de649..e31e42cac52c 100644 --- a/cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/jump.go +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/jump.go @@ -60,50 +60,68 @@ func (op JumpOp) Op(source Source) OpCode { return OpCode(JumpClass).SetJumpOp(op).SetSource(source) } -// Imm compares dst to value, and adjusts PC by offset if the condition is fulfilled. +// Imm compares 64 bit dst to 64 bit value (sign extended), and adjusts PC by offset if the condition is fulfilled. func (op JumpOp) Imm(dst Register, value int32, label string) Instruction { - if op == Exit || op == Call || op == Ja { - return Instruction{OpCode: InvalidOpCode} - } + return Instruction{ + OpCode: op.opCode(JumpClass, ImmSource), + Dst: dst, + Offset: -1, + Constant: int64(value), + }.WithReference(label) +} +// Imm32 compares 32 bit dst to 32 bit value, and adjusts PC by offset if the condition is fulfilled. +// Requires kernel 5.1. +func (op JumpOp) Imm32(dst Register, value int32, label string) Instruction { return Instruction{ - OpCode: OpCode(JumpClass).SetJumpOp(op).SetSource(ImmSource), - Dst: dst, - Offset: -1, - Constant: int64(value), - Reference: label, - } + OpCode: op.opCode(Jump32Class, ImmSource), + Dst: dst, + Offset: -1, + Constant: int64(value), + }.WithReference(label) } -// Reg compares dst to src, and adjusts PC by offset if the condition is fulfilled. +// Reg compares 64 bit dst to 64 bit src, and adjusts PC by offset if the condition is fulfilled. func (op JumpOp) Reg(dst, src Register, label string) Instruction { - if op == Exit || op == Call || op == Ja { - return Instruction{OpCode: InvalidOpCode} - } + return Instruction{ + OpCode: op.opCode(JumpClass, RegSource), + Dst: dst, + Src: src, + Offset: -1, + }.WithReference(label) +} +// Reg32 compares 32 bit dst to 32 bit src, and adjusts PC by offset if the condition is fulfilled. +// Requires kernel 5.1. +func (op JumpOp) Reg32(dst, src Register, label string) Instruction { return Instruction{ - OpCode: OpCode(JumpClass).SetJumpOp(op).SetSource(RegSource), - Dst: dst, - Src: src, - Offset: -1, - Reference: label, + OpCode: op.opCode(Jump32Class, RegSource), + Dst: dst, + Src: src, + Offset: -1, + }.WithReference(label) +} + +func (op JumpOp) opCode(class Class, source Source) OpCode { + if op == Exit || op == Call || op == Ja { + return InvalidOpCode } + + return OpCode(class).SetJumpOp(op).SetSource(source) } // Label adjusts PC to the address of the label. func (op JumpOp) Label(label string) Instruction { if op == Call { return Instruction{ - OpCode: OpCode(JumpClass).SetJumpOp(Call), - Src: PseudoCall, - Constant: -1, - Reference: label, - } + OpCode: OpCode(JumpClass).SetJumpOp(Call), + Src: PseudoCall, + Constant: -1, + }.WithReference(label) } return Instruction{ - OpCode: OpCode(JumpClass).SetJumpOp(op), - Offset: -1, - Reference: label, - } + OpCode: OpCode(JumpClass).SetJumpOp(op), + Offset: -1, + }.WithReference(label) } diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/metadata.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/metadata.go new file mode 100644 index 000000000000..dd368a936036 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/metadata.go @@ -0,0 +1,80 @@ +package asm + +// Metadata contains metadata about an instruction. +type Metadata struct { + head *metaElement +} + +type metaElement struct { + next *metaElement + key, value interface{} +} + +// Find the element containing key. +// +// Returns nil if there is no such element. +func (m *Metadata) find(key interface{}) *metaElement { + for e := m.head; e != nil; e = e.next { + if e.key == key { + return e + } + } + return nil +} + +// Remove an element from the linked list. +// +// Copies as many elements of the list as necessary to remove r, but doesn't +// perform a full copy. +func (m *Metadata) remove(r *metaElement) { + current := &m.head + for e := m.head; e != nil; e = e.next { + if e == r { + // We've found the element we want to remove. + *current = e.next + + // No need to copy the tail. + return + } + + // There is another element in front of the one we want to remove. + // We have to copy it to be able to change metaElement.next. + cpy := &metaElement{key: e.key, value: e.value} + *current = cpy + current = &cpy.next + } +} + +// Set a key to a value. +// +// If value is nil, the key is removed. Avoids modifying old metadata by +// copying if necessary. +func (m *Metadata) Set(key, value interface{}) { + if e := m.find(key); e != nil { + if e.value == value { + // Key is present and the value is the same. Nothing to do. + return + } + + // Key is present with a different value. Create a copy of the list + // which doesn't have the element in it. + m.remove(e) + } + + // m.head is now a linked list that doesn't contain key. + if value == nil { + return + } + + m.head = &metaElement{key: key, value: value, next: m.head} +} + +// Get the value of a key. +// +// Returns nil if no value with the given key is present. +func (m *Metadata) Get(key interface{}) interface{} { + if e := m.find(key); e != nil { + return e.value + } + return nil +} diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/opcode.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/opcode.go index 6edc3cf5917d..b11917e18bbc 100644 --- a/cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/opcode.go +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/opcode.go @@ -7,14 +7,6 @@ import ( //go:generate stringer -output opcode_string.go -type=Class -type encoding int - -const ( - unknownEncoding encoding = iota - loadOrStore - jumpOrALU -) - // Class of operations // // msb lsb @@ -26,31 +18,52 @@ type Class uint8 const classMask OpCode = 0x07 const ( - // LdClass load memory + // LdClass loads immediate values into registers. + // Also used for non-standard load operations from cBPF. LdClass Class = 0x00 - // LdXClass load memory from constant + // LdXClass loads memory into registers. LdXClass Class = 0x01 - // StClass load register from memory + // StClass stores immediate values to memory. StClass Class = 0x02 - // StXClass load register from constant + // StXClass stores registers to memory. StXClass Class = 0x03 - // ALUClass arithmetic operators + // ALUClass describes arithmetic operators. ALUClass Class = 0x04 - // JumpClass jump operators + // JumpClass describes jump operators. JumpClass Class = 0x05 - // ALU64Class arithmetic in 64 bit mode + // Jump32Class describes jump operators with 32-bit comparisons. + // Requires kernel 5.1. + Jump32Class Class = 0x06 + // ALU64Class describes arithmetic operators in 64-bit mode. ALU64Class Class = 0x07 ) -func (cls Class) encoding() encoding { - switch cls { - case LdClass, LdXClass, StClass, StXClass: - return loadOrStore - case ALU64Class, ALUClass, JumpClass: - return jumpOrALU - default: - return unknownEncoding - } +// IsLoad checks if this is either LdClass or LdXClass. +func (cls Class) IsLoad() bool { + return cls == LdClass || cls == LdXClass +} + +// IsStore checks if this is either StClass or StXClass. +func (cls Class) IsStore() bool { + return cls == StClass || cls == StXClass +} + +func (cls Class) isLoadOrStore() bool { + return cls.IsLoad() || cls.IsStore() +} + +// IsALU checks if this is either ALUClass or ALU64Class. +func (cls Class) IsALU() bool { + return cls == ALUClass || cls == ALU64Class +} + +// IsJump checks if this is either JumpClass or Jump32Class. +func (cls Class) IsJump() bool { + return cls == JumpClass || cls == Jump32Class +} + +func (cls Class) isJumpOrALU() bool { + return cls.IsJump() || cls.IsALU() } // OpCode is a packed eBPF opcode. @@ -86,7 +99,7 @@ func (op OpCode) Class() Class { // Mode returns the mode for load and store operations. func (op OpCode) Mode() Mode { - if op.Class().encoding() != loadOrStore { + if !op.Class().isLoadOrStore() { return InvalidMode } return Mode(op & modeMask) @@ -94,7 +107,7 @@ func (op OpCode) Mode() Mode { // Size returns the size for load and store operations. func (op OpCode) Size() Size { - if op.Class().encoding() != loadOrStore { + if !op.Class().isLoadOrStore() { return InvalidSize } return Size(op & sizeMask) @@ -102,7 +115,7 @@ func (op OpCode) Size() Size { // Source returns the source for branch and ALU operations. func (op OpCode) Source() Source { - if op.Class().encoding() != jumpOrALU || op.ALUOp() == Swap { + if !op.Class().isJumpOrALU() || op.ALUOp() == Swap { return InvalidSource } return Source(op & sourceMask) @@ -110,7 +123,7 @@ func (op OpCode) Source() Source { // ALUOp returns the ALUOp. func (op OpCode) ALUOp() ALUOp { - if op.Class().encoding() != jumpOrALU { + if !op.Class().IsALU() { return InvalidALUOp } return ALUOp(op & aluMask) @@ -125,18 +138,27 @@ func (op OpCode) Endianness() Endianness { } // JumpOp returns the JumpOp. +// Returns InvalidJumpOp if it doesn't encode a jump. func (op OpCode) JumpOp() JumpOp { - if op.Class().encoding() != jumpOrALU { + if !op.Class().IsJump() { return InvalidJumpOp } - return JumpOp(op & jumpMask) + + jumpOp := JumpOp(op & jumpMask) + + // Some JumpOps are only supported by JumpClass, not Jump32Class. + if op.Class() == Jump32Class && (jumpOp == Exit || jumpOp == Call || jumpOp == Ja) { + return InvalidJumpOp + } + + return jumpOp } // SetMode sets the mode on load and store operations. // // Returns InvalidOpCode if op is of the wrong class. func (op OpCode) SetMode(mode Mode) OpCode { - if op.Class().encoding() != loadOrStore || !valid(OpCode(mode), modeMask) { + if !op.Class().isLoadOrStore() || !valid(OpCode(mode), modeMask) { return InvalidOpCode } return (op & ^modeMask) | OpCode(mode) @@ -146,7 +168,7 @@ func (op OpCode) SetMode(mode Mode) OpCode { // // Returns InvalidOpCode if op is of the wrong class. func (op OpCode) SetSize(size Size) OpCode { - if op.Class().encoding() != loadOrStore || !valid(OpCode(size), sizeMask) { + if !op.Class().isLoadOrStore() || !valid(OpCode(size), sizeMask) { return InvalidOpCode } return (op & ^sizeMask) | OpCode(size) @@ -156,7 +178,7 @@ func (op OpCode) SetSize(size Size) OpCode { // // Returns InvalidOpCode if op is of the wrong class. func (op OpCode) SetSource(source Source) OpCode { - if op.Class().encoding() != jumpOrALU || !valid(OpCode(source), sourceMask) { + if !op.Class().isJumpOrALU() || !valid(OpCode(source), sourceMask) { return InvalidOpCode } return (op & ^sourceMask) | OpCode(source) @@ -166,8 +188,7 @@ func (op OpCode) SetSource(source Source) OpCode { // // Returns InvalidOpCode if op is of the wrong class. func (op OpCode) SetALUOp(alu ALUOp) OpCode { - class := op.Class() - if (class != ALUClass && class != ALU64Class) || !valid(OpCode(alu), aluMask) { + if !op.Class().IsALU() || !valid(OpCode(alu), aluMask) { return InvalidOpCode } return (op & ^aluMask) | OpCode(alu) @@ -177,17 +198,25 @@ func (op OpCode) SetALUOp(alu ALUOp) OpCode { // // Returns InvalidOpCode if op is of the wrong class. func (op OpCode) SetJumpOp(jump JumpOp) OpCode { - if op.Class() != JumpClass || !valid(OpCode(jump), jumpMask) { + if !op.Class().IsJump() || !valid(OpCode(jump), jumpMask) { + return InvalidOpCode + } + + newOp := (op & ^jumpMask) | OpCode(jump) + + // Check newOp is legal. + if newOp.JumpOp() == InvalidJumpOp { return InvalidOpCode } - return (op & ^jumpMask) | OpCode(jump) + + return newOp } func (op OpCode) String() string { var f strings.Builder - switch class := op.Class(); class { - case LdClass, LdXClass, StClass, StXClass: + switch class := op.Class(); { + case class.isLoadOrStore(): f.WriteString(strings.TrimSuffix(class.String(), "Class")) mode := op.Mode() @@ -204,7 +233,7 @@ func (op OpCode) String() string { f.WriteString("B") } - case ALU64Class, ALUClass: + case class.IsALU(): f.WriteString(op.ALUOp().String()) if op.ALUOp() == Swap { @@ -218,8 +247,13 @@ func (op OpCode) String() string { f.WriteString(strings.TrimSuffix(op.Source().String(), "Source")) } - case JumpClass: + case class.IsJump(): f.WriteString(op.JumpOp().String()) + + if class == Jump32Class { + f.WriteString("32") + } + if jop := op.JumpOp(); jop != Exit && jop != Call { f.WriteString(strings.TrimSuffix(op.Source().String(), "Source")) } diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/opcode_string.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/opcode_string.go index 079ce1db0b83..58bc3e7e7f08 100644 --- a/cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/opcode_string.go +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/opcode_string.go @@ -14,25 +14,17 @@ func _() { _ = x[StXClass-3] _ = x[ALUClass-4] _ = x[JumpClass-5] + _ = x[Jump32Class-6] _ = x[ALU64Class-7] } -const ( - _Class_name_0 = "LdClassLdXClassStClassStXClassALUClassJumpClass" - _Class_name_1 = "ALU64Class" -) +const _Class_name = "LdClassLdXClassStClassStXClassALUClassJumpClassJump32ClassALU64Class" -var ( - _Class_index_0 = [...]uint8{0, 7, 15, 22, 30, 38, 47} -) +var _Class_index = [...]uint8{0, 7, 15, 22, 30, 38, 47, 58, 68} func (i Class) String() string { - switch { - case 0 <= i && i <= 5: - return _Class_name_0[_Class_index_0[i]:_Class_index_0[i+1]] - case i == 7: - return _Class_name_1 - default: + if i >= Class(len(_Class_index)-1) { return "Class(" + strconv.FormatInt(int64(i), 10) + ")" } + return _Class_name[_Class_index[i]:_Class_index[i+1]] } diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/register.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/register.go index 76cb44bffc71..dd5d44f1c191 100644 --- a/cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/register.go +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/asm/register.go @@ -38,6 +38,7 @@ const ( PseudoMapFD = R1 // BPF_PSEUDO_MAP_FD PseudoMapValue = R2 // BPF_PSEUDO_MAP_VALUE PseudoCall = R1 // BPF_PSEUDO_CALL + PseudoFunc = R4 // BPF_PSEUDO_FUNC ) func (r Register) String() string { diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/btf/btf.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/btf/btf.go new file mode 100644 index 000000000000..a5969332aaa8 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/btf/btf.go @@ -0,0 +1,897 @@ +package btf + +import ( + "bufio" + "bytes" + "debug/elf" + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "os" + "reflect" + + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/unix" +) + +const btfMagic = 0xeB9F + +// Errors returned by BTF functions. +var ( + ErrNotSupported = internal.ErrNotSupported + ErrNotFound = errors.New("not found") + ErrNoExtendedInfo = errors.New("no extended info") +) + +// ID represents the unique ID of a BTF object. +type ID = sys.BTFID + +// Spec represents decoded BTF. +type Spec struct { + // Data from .BTF. + rawTypes []rawType + strings *stringTable + + // All types contained by the spec. For the base type, the position of + // a type in the slice is its ID. + types types + + // Type IDs indexed by type. + typeIDs map[Type]TypeID + + // Types indexed by essential name. + // Includes all struct flavors and types with the same name. + namedTypes map[essentialName][]Type + + byteOrder binary.ByteOrder +} + +type btfHeader struct { + Magic uint16 + Version uint8 + Flags uint8 + HdrLen uint32 + + TypeOff uint32 + TypeLen uint32 + StringOff uint32 + StringLen uint32 +} + +// typeStart returns the offset from the beginning of the .BTF section +// to the start of its type entries. +func (h *btfHeader) typeStart() int64 { + return int64(h.HdrLen + h.TypeOff) +} + +// stringStart returns the offset from the beginning of the .BTF section +// to the start of its string table. +func (h *btfHeader) stringStart() int64 { + return int64(h.HdrLen + h.StringOff) +} + +// LoadSpec opens file and calls LoadSpecFromReader on it. +func LoadSpec(file string) (*Spec, error) { + fh, err := os.Open(file) + if err != nil { + return nil, err + } + defer fh.Close() + + return LoadSpecFromReader(fh) +} + +// LoadSpecFromReader reads from an ELF or a raw BTF blob. +// +// Returns ErrNotFound if reading from an ELF which contains no BTF. ExtInfos +// may be nil. +func LoadSpecFromReader(rd io.ReaderAt) (*Spec, error) { + file, err := internal.NewSafeELFFile(rd) + if err != nil { + if bo := guessRawBTFByteOrder(rd); bo != nil { + // Try to parse a naked BTF blob. This will return an error if + // we encounter a Datasec, since we can't fix it up. + spec, err := loadRawSpec(io.NewSectionReader(rd, 0, math.MaxInt64), bo, nil, nil) + return spec, err + } + + return nil, err + } + + return loadSpecFromELF(file) +} + +// LoadSpecAndExtInfosFromReader reads from an ELF. +// +// ExtInfos may be nil if the ELF doesn't contain section metadta. +// Returns ErrNotFound if the ELF contains no BTF. +func LoadSpecAndExtInfosFromReader(rd io.ReaderAt) (*Spec, *ExtInfos, error) { + file, err := internal.NewSafeELFFile(rd) + if err != nil { + return nil, nil, err + } + + spec, err := loadSpecFromELF(file) + if err != nil { + return nil, nil, err + } + + extInfos, err := loadExtInfosFromELF(file, spec.types, spec.strings) + if err != nil && !errors.Is(err, ErrNotFound) { + return nil, nil, err + } + + return spec, extInfos, nil +} + +// variableOffsets extracts all symbols offsets from an ELF and indexes them by +// section and variable name. +// +// References to variables in BTF data sections carry unsigned 32-bit offsets. +// Some ELF symbols (e.g. in vmlinux) may point to virtual memory that is well +// beyond this range. Since these symbols cannot be described by BTF info, +// ignore them here. +func variableOffsets(file *internal.SafeELFFile) (map[variable]uint32, error) { + symbols, err := file.Symbols() + if err != nil { + return nil, fmt.Errorf("can't read symbols: %v", err) + } + + variableOffsets := make(map[variable]uint32) + for _, symbol := range symbols { + if idx := symbol.Section; idx >= elf.SHN_LORESERVE && idx <= elf.SHN_HIRESERVE { + // Ignore things like SHN_ABS + continue + } + + if symbol.Value > math.MaxUint32 { + // VarSecinfo offset is u32, cannot reference symbols in higher regions. + continue + } + + if int(symbol.Section) >= len(file.Sections) { + return nil, fmt.Errorf("symbol %s: invalid section %d", symbol.Name, symbol.Section) + } + + secName := file.Sections[symbol.Section].Name + variableOffsets[variable{secName, symbol.Name}] = uint32(symbol.Value) + } + + return variableOffsets, nil +} + +func loadSpecFromELF(file *internal.SafeELFFile) (*Spec, error) { + var ( + btfSection *elf.Section + sectionSizes = make(map[string]uint32) + ) + + for _, sec := range file.Sections { + switch sec.Name { + case ".BTF": + btfSection = sec + default: + if sec.Type != elf.SHT_PROGBITS && sec.Type != elf.SHT_NOBITS { + break + } + + if sec.Size > math.MaxUint32 { + return nil, fmt.Errorf("section %s exceeds maximum size", sec.Name) + } + + sectionSizes[sec.Name] = uint32(sec.Size) + } + } + + if btfSection == nil { + return nil, fmt.Errorf("btf: %w", ErrNotFound) + } + + vars, err := variableOffsets(file) + if err != nil { + return nil, err + } + + if btfSection.ReaderAt == nil { + return nil, fmt.Errorf("compressed BTF is not supported") + } + + rawTypes, rawStrings, err := parseBTF(btfSection.ReaderAt, file.ByteOrder, nil) + if err != nil { + return nil, err + } + + err = fixupDatasec(rawTypes, rawStrings, sectionSizes, vars) + if err != nil { + return nil, err + } + + return inflateSpec(rawTypes, rawStrings, file.ByteOrder, nil) +} + +func loadRawSpec(btf io.ReaderAt, bo binary.ByteOrder, + baseTypes types, baseStrings *stringTable) (*Spec, error) { + + rawTypes, rawStrings, err := parseBTF(btf, bo, baseStrings) + if err != nil { + return nil, err + } + + return inflateSpec(rawTypes, rawStrings, bo, baseTypes) +} + +func inflateSpec(rawTypes []rawType, rawStrings *stringTable, bo binary.ByteOrder, + baseTypes types) (*Spec, error) { + + types, err := inflateRawTypes(rawTypes, baseTypes, rawStrings) + if err != nil { + return nil, err + } + + typeIDs, typesByName := indexTypes(types, TypeID(len(baseTypes))) + + return &Spec{ + rawTypes: rawTypes, + namedTypes: typesByName, + typeIDs: typeIDs, + types: types, + strings: rawStrings, + byteOrder: bo, + }, nil +} + +func indexTypes(types []Type, typeIDOffset TypeID) (map[Type]TypeID, map[essentialName][]Type) { + namedTypes := 0 + for _, typ := range types { + if typ.TypeName() != "" { + // Do a pre-pass to figure out how big types by name has to be. + // Most types have unique names, so it's OK to ignore essentialName + // here. + namedTypes++ + } + } + + typeIDs := make(map[Type]TypeID, len(types)) + typesByName := make(map[essentialName][]Type, namedTypes) + + for i, typ := range types { + if name := newEssentialName(typ.TypeName()); name != "" { + typesByName[name] = append(typesByName[name], typ) + } + typeIDs[typ] = TypeID(i) + typeIDOffset + } + + return typeIDs, typesByName +} + +// LoadKernelSpec returns the current kernel's BTF information. +// +// Defaults to /sys/kernel/btf/vmlinux and falls back to scanning the file system +// for vmlinux ELFs. Returns an error wrapping ErrNotSupported if BTF is not enabled. +func LoadKernelSpec() (*Spec, error) { + fh, err := os.Open("/sys/kernel/btf/vmlinux") + if err == nil { + defer fh.Close() + + return loadRawSpec(fh, internal.NativeEndian, nil, nil) + } + + file, err := findVMLinux() + if err != nil { + return nil, err + } + defer file.Close() + + return loadSpecFromELF(file) +} + +// findVMLinux scans multiple well-known paths for vmlinux kernel images. +func findVMLinux() (*internal.SafeELFFile, error) { + release, err := internal.KernelRelease() + if err != nil { + return nil, err + } + + // use same list of locations as libbpf + // https://github.com/libbpf/libbpf/blob/9a3a42608dbe3731256a5682a125ac1e23bced8f/src/btf.c#L3114-L3122 + locations := []string{ + "/boot/vmlinux-%s", + "/lib/modules/%s/vmlinux-%[1]s", + "/lib/modules/%s/build/vmlinux", + "/usr/lib/modules/%s/kernel/vmlinux", + "/usr/lib/debug/boot/vmlinux-%s", + "/usr/lib/debug/boot/vmlinux-%s.debug", + "/usr/lib/debug/lib/modules/%s/vmlinux", + } + + for _, loc := range locations { + file, err := internal.OpenSafeELFFile(fmt.Sprintf(loc, release)) + if errors.Is(err, os.ErrNotExist) { + continue + } + return file, err + } + + return nil, fmt.Errorf("no BTF found for kernel version %s: %w", release, internal.ErrNotSupported) +} + +// parseBTFHeader parses the header of the .BTF section. +func parseBTFHeader(r io.Reader, bo binary.ByteOrder) (*btfHeader, error) { + var header btfHeader + if err := binary.Read(r, bo, &header); err != nil { + return nil, fmt.Errorf("can't read header: %v", err) + } + + if header.Magic != btfMagic { + return nil, fmt.Errorf("incorrect magic value %v", header.Magic) + } + + if header.Version != 1 { + return nil, fmt.Errorf("unexpected version %v", header.Version) + } + + if header.Flags != 0 { + return nil, fmt.Errorf("unsupported flags %v", header.Flags) + } + + remainder := int64(header.HdrLen) - int64(binary.Size(&header)) + if remainder < 0 { + return nil, errors.New("header length shorter than btfHeader size") + } + + if _, err := io.CopyN(internal.DiscardZeroes{}, r, remainder); err != nil { + return nil, fmt.Errorf("header padding: %v", err) + } + + return &header, nil +} + +func guessRawBTFByteOrder(r io.ReaderAt) binary.ByteOrder { + buf := new(bufio.Reader) + for _, bo := range []binary.ByteOrder{ + binary.LittleEndian, + binary.BigEndian, + } { + buf.Reset(io.NewSectionReader(r, 0, math.MaxInt64)) + if _, err := parseBTFHeader(buf, bo); err == nil { + return bo + } + } + + return nil +} + +// parseBTF reads a .BTF section into memory and parses it into a list of +// raw types and a string table. +func parseBTF(btf io.ReaderAt, bo binary.ByteOrder, baseStrings *stringTable) ([]rawType, *stringTable, error) { + buf := internal.NewBufferedSectionReader(btf, 0, math.MaxInt64) + header, err := parseBTFHeader(buf, bo) + if err != nil { + return nil, nil, fmt.Errorf("parsing .BTF header: %v", err) + } + + rawStrings, err := readStringTable(io.NewSectionReader(btf, header.stringStart(), int64(header.StringLen)), + baseStrings) + if err != nil { + return nil, nil, fmt.Errorf("can't read type names: %w", err) + } + + buf.Reset(io.NewSectionReader(btf, header.typeStart(), int64(header.TypeLen))) + rawTypes, err := readTypes(buf, bo, header.TypeLen) + if err != nil { + return nil, nil, fmt.Errorf("can't read types: %w", err) + } + + return rawTypes, rawStrings, nil +} + +type variable struct { + section string + name string +} + +func fixupDatasec(rawTypes []rawType, rawStrings *stringTable, sectionSizes map[string]uint32, variableOffsets map[variable]uint32) error { + for i, rawType := range rawTypes { + if rawType.Kind() != kindDatasec { + continue + } + + name, err := rawStrings.Lookup(rawType.NameOff) + if err != nil { + return err + } + + if name == ".kconfig" || name == ".ksyms" { + return fmt.Errorf("reference to %s: %w", name, ErrNotSupported) + } + + if rawTypes[i].SizeType != 0 { + continue + } + + size, ok := sectionSizes[name] + if !ok { + return fmt.Errorf("data section %s: missing size", name) + } + + rawTypes[i].SizeType = size + + secinfos := rawType.data.([]btfVarSecinfo) + for j, secInfo := range secinfos { + id := int(secInfo.Type - 1) + if id >= len(rawTypes) { + return fmt.Errorf("data section %s: invalid type id %d for variable %d", name, id, j) + } + + varName, err := rawStrings.Lookup(rawTypes[id].NameOff) + if err != nil { + return fmt.Errorf("data section %s: can't get name for type %d: %w", name, id, err) + } + + offset, ok := variableOffsets[variable{name, varName}] + if !ok { + return fmt.Errorf("data section %s: missing offset for variable %s", name, varName) + } + + secinfos[j].Offset = offset + } + } + + return nil +} + +// Copy creates a copy of Spec. +func (s *Spec) Copy() *Spec { + types := copyTypes(s.types, nil) + + typeIDOffset := TypeID(0) + if len(s.types) != 0 { + typeIDOffset = s.typeIDs[s.types[0]] + } + typeIDs, typesByName := indexTypes(types, typeIDOffset) + + // NB: Other parts of spec are not copied since they are immutable. + return &Spec{ + s.rawTypes, + s.strings, + types, + typeIDs, + typesByName, + s.byteOrder, + } +} + +type marshalOpts struct { + ByteOrder binary.ByteOrder + StripFuncLinkage bool +} + +func (s *Spec) marshal(opts marshalOpts) ([]byte, error) { + var ( + buf bytes.Buffer + header = new(btfHeader) + headerLen = binary.Size(header) + ) + + // Reserve space for the header. We have to write it last since + // we don't know the size of the type section yet. + _, _ = buf.Write(make([]byte, headerLen)) + + // Write type section, just after the header. + for _, raw := range s.rawTypes { + switch { + case opts.StripFuncLinkage && raw.Kind() == kindFunc: + raw.SetLinkage(StaticFunc) + } + + if err := raw.Marshal(&buf, opts.ByteOrder); err != nil { + return nil, fmt.Errorf("can't marshal BTF: %w", err) + } + } + + typeLen := uint32(buf.Len() - headerLen) + + // Write string section after type section. + stringsLen := s.strings.Length() + buf.Grow(stringsLen) + if err := s.strings.Marshal(&buf); err != nil { + return nil, err + } + + // Fill out the header, and write it out. + header = &btfHeader{ + Magic: btfMagic, + Version: 1, + Flags: 0, + HdrLen: uint32(headerLen), + TypeOff: 0, + TypeLen: typeLen, + StringOff: typeLen, + StringLen: uint32(stringsLen), + } + + raw := buf.Bytes() + err := binary.Write(sliceWriter(raw[:headerLen]), opts.ByteOrder, header) + if err != nil { + return nil, fmt.Errorf("can't write header: %v", err) + } + + return raw, nil +} + +type sliceWriter []byte + +func (sw sliceWriter) Write(p []byte) (int, error) { + if len(p) != len(sw) { + return 0, errors.New("size doesn't match") + } + + return copy(sw, p), nil +} + +// TypeByID returns the BTF Type with the given type ID. +// +// Returns an error wrapping ErrNotFound if a Type with the given ID +// does not exist in the Spec. +func (s *Spec) TypeByID(id TypeID) (Type, error) { + return s.types.ByID(id) +} + +// TypeID returns the ID for a given Type. +// +// Returns an error wrapping ErrNoFound if the type isn't part of the Spec. +func (s *Spec) TypeID(typ Type) (TypeID, error) { + if _, ok := typ.(*Void); ok { + // Equality is weird for void, since it is a zero sized type. + return 0, nil + } + + id, ok := s.typeIDs[typ] + if !ok { + return 0, fmt.Errorf("no ID for type %s: %w", typ, ErrNotFound) + } + + return id, nil +} + +// AnyTypesByName returns a list of BTF Types with the given name. +// +// If the BTF blob describes multiple compilation units like vmlinux, multiple +// Types with the same name and kind can exist, but might not describe the same +// data structure. +// +// Returns an error wrapping ErrNotFound if no matching Type exists in the Spec. +func (s *Spec) AnyTypesByName(name string) ([]Type, error) { + types := s.namedTypes[newEssentialName(name)] + if len(types) == 0 { + return nil, fmt.Errorf("type name %s: %w", name, ErrNotFound) + } + + // Return a copy to prevent changes to namedTypes. + result := make([]Type, 0, len(types)) + for _, t := range types { + // Match against the full name, not just the essential one + // in case the type being looked up is a struct flavor. + if t.TypeName() == name { + result = append(result, t) + } + } + return result, nil +} + +// AnyTypeByName returns a Type with the given name. +// +// Returns an error if multiple types of that name exist. +func (s *Spec) AnyTypeByName(name string) (Type, error) { + types, err := s.AnyTypesByName(name) + if err != nil { + return nil, err + } + + if len(types) > 1 { + return nil, fmt.Errorf("found multiple types: %v", types) + } + + return types[0], nil +} + +// TypeByName searches for a Type with a specific name. Since multiple +// Types with the same name can exist, the parameter typ is taken to +// narrow down the search in case of a clash. +// +// typ must be a non-nil pointer to an implementation of a Type. +// On success, the address of the found Type will be copied to typ. +// +// Returns an error wrapping ErrNotFound if no matching +// Type exists in the Spec. If multiple candidates are found, +// an error is returned. +func (s *Spec) TypeByName(name string, typ interface{}) error { + typValue := reflect.ValueOf(typ) + if typValue.Kind() != reflect.Ptr { + return fmt.Errorf("%T is not a pointer", typ) + } + + typPtr := typValue.Elem() + if !typPtr.CanSet() { + return fmt.Errorf("%T cannot be set", typ) + } + + wanted := typPtr.Type() + if !wanted.AssignableTo(reflect.TypeOf((*Type)(nil)).Elem()) { + return fmt.Errorf("%T does not satisfy Type interface", typ) + } + + types, err := s.AnyTypesByName(name) + if err != nil { + return err + } + + var candidate Type + for _, typ := range types { + if reflect.TypeOf(typ) != wanted { + continue + } + + if candidate != nil { + return fmt.Errorf("type %s: multiple candidates for %T", name, typ) + } + + candidate = typ + } + + if candidate == nil { + return fmt.Errorf("type %s: %w", name, ErrNotFound) + } + + typPtr.Set(reflect.ValueOf(candidate)) + + return nil +} + +// LoadSplitSpecFromReader loads split BTF from a reader. +// +// Types from base are used to resolve references in the split BTF. +// The returned Spec only contains types from the split BTF, not from the base. +func LoadSplitSpecFromReader(r io.ReaderAt, base *Spec) (*Spec, error) { + return loadRawSpec(r, internal.NativeEndian, base.types, base.strings) +} + +// TypesIterator iterates over types of a given spec. +type TypesIterator struct { + spec *Spec + index int + // The last visited type in the spec. + Type Type +} + +// Iterate returns the types iterator. +func (s *Spec) Iterate() *TypesIterator { + return &TypesIterator{spec: s, index: 0} +} + +// Next returns true as long as there are any remaining types. +func (iter *TypesIterator) Next() bool { + if len(iter.spec.types) <= iter.index { + return false + } + + iter.Type = iter.spec.types[iter.index] + iter.index++ + return true +} + +// Handle is a reference to BTF loaded into the kernel. +type Handle struct { + fd *sys.FD + + // Size of the raw BTF in bytes. + size uint32 +} + +// NewHandle loads BTF into the kernel. +// +// Returns ErrNotSupported if BTF is not supported. +func NewHandle(spec *Spec) (*Handle, error) { + if err := haveBTF(); err != nil { + return nil, err + } + + if spec.byteOrder != internal.NativeEndian { + return nil, fmt.Errorf("can't load %s BTF on %s", spec.byteOrder, internal.NativeEndian) + } + + btf, err := spec.marshal(marshalOpts{ + ByteOrder: internal.NativeEndian, + StripFuncLinkage: haveFuncLinkage() != nil, + }) + if err != nil { + return nil, fmt.Errorf("can't marshal BTF: %w", err) + } + + if uint64(len(btf)) > math.MaxUint32 { + return nil, errors.New("BTF exceeds the maximum size") + } + + attr := &sys.BtfLoadAttr{ + Btf: sys.NewSlicePointer(btf), + BtfSize: uint32(len(btf)), + } + + fd, err := sys.BtfLoad(attr) + if err != nil { + logBuf := make([]byte, 64*1024) + attr.BtfLogBuf = sys.NewSlicePointer(logBuf) + attr.BtfLogSize = uint32(len(logBuf)) + attr.BtfLogLevel = 1 + // NB: The syscall will never return ENOSPC as of 5.18-rc4. + _, _ = sys.BtfLoad(attr) + return nil, internal.ErrorWithLog(err, logBuf) + } + + return &Handle{fd, attr.BtfSize}, nil +} + +// NewHandleFromID returns the BTF handle for a given id. +// +// Prefer calling [ebpf.Program.Handle] or [ebpf.Map.Handle] if possible. +// +// Returns ErrNotExist, if there is no BTF with the given id. +// +// Requires CAP_SYS_ADMIN. +func NewHandleFromID(id ID) (*Handle, error) { + fd, err := sys.BtfGetFdById(&sys.BtfGetFdByIdAttr{ + Id: uint32(id), + }) + if err != nil { + return nil, fmt.Errorf("get FD for ID %d: %w", id, err) + } + + info, err := newHandleInfoFromFD(fd) + if err != nil { + _ = fd.Close() + return nil, err + } + + return &Handle{fd, info.size}, nil +} + +// Spec parses the kernel BTF into Go types. +// +// base is used to decode split BTF and may be nil. +func (h *Handle) Spec(base *Spec) (*Spec, error) { + var btfInfo sys.BtfInfo + btfBuffer := make([]byte, h.size) + btfInfo.Btf, btfInfo.BtfSize = sys.NewSlicePointerLen(btfBuffer) + + if err := sys.ObjInfo(h.fd, &btfInfo); err != nil { + return nil, err + } + + var baseTypes types + var baseStrings *stringTable + if base != nil { + baseTypes = base.types + baseStrings = base.strings + } + + return loadRawSpec(bytes.NewReader(btfBuffer), internal.NativeEndian, baseTypes, baseStrings) +} + +// Close destroys the handle. +// +// Subsequent calls to FD will return an invalid value. +func (h *Handle) Close() error { + if h == nil { + return nil + } + + return h.fd.Close() +} + +// FD returns the file descriptor for the handle. +func (h *Handle) FD() int { + return h.fd.Int() +} + +// Info returns metadata about the handle. +func (h *Handle) Info() (*HandleInfo, error) { + return newHandleInfoFromFD(h.fd) +} + +func marshalBTF(types interface{}, strings []byte, bo binary.ByteOrder) []byte { + const minHeaderLength = 24 + + typesLen := uint32(binary.Size(types)) + header := btfHeader{ + Magic: btfMagic, + Version: 1, + HdrLen: minHeaderLength, + TypeOff: 0, + TypeLen: typesLen, + StringOff: typesLen, + StringLen: uint32(len(strings)), + } + + buf := new(bytes.Buffer) + _ = binary.Write(buf, bo, &header) + _ = binary.Write(buf, bo, types) + buf.Write(strings) + + return buf.Bytes() +} + +var haveBTF = internal.FeatureTest("BTF", "5.1", func() error { + var ( + types struct { + Integer btfType + Var btfType + btfVar struct{ Linkage uint32 } + } + strings = []byte{0, 'a', 0} + ) + + // We use a BTF_KIND_VAR here, to make sure that + // the kernel understands BTF at least as well as we + // do. BTF_KIND_VAR was introduced ~5.1. + types.Integer.SetKind(kindPointer) + types.Var.NameOff = 1 + types.Var.SetKind(kindVar) + types.Var.SizeType = 1 + + btf := marshalBTF(&types, strings, internal.NativeEndian) + + fd, err := sys.BtfLoad(&sys.BtfLoadAttr{ + Btf: sys.NewSlicePointer(btf), + BtfSize: uint32(len(btf)), + }) + if errors.Is(err, unix.EINVAL) || errors.Is(err, unix.EPERM) { + // Treat both EINVAL and EPERM as not supported: loading the program + // might still succeed without BTF. + return internal.ErrNotSupported + } + if err != nil { + return err + } + + fd.Close() + return nil +}) + +var haveFuncLinkage = internal.FeatureTest("BTF func linkage", "5.6", func() error { + if err := haveBTF(); err != nil { + return err + } + + var ( + types struct { + FuncProto btfType + Func btfType + } + strings = []byte{0, 'a', 0} + ) + + types.FuncProto.SetKind(kindFuncProto) + types.Func.SetKind(kindFunc) + types.Func.SizeType = 1 // aka FuncProto + types.Func.NameOff = 1 + types.Func.SetLinkage(GlobalFunc) + + btf := marshalBTF(&types, strings, internal.NativeEndian) + + fd, err := sys.BtfLoad(&sys.BtfLoadAttr{ + Btf: sys.NewSlicePointer(btf), + BtfSize: uint32(len(btf)), + }) + if errors.Is(err, unix.EINVAL) { + return internal.ErrNotSupported + } + if err != nil { + return err + } + + fd.Close() + return nil +}) diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/btf/btf_types.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/btf/btf_types.go similarity index 73% rename from cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/btf/btf_types.go rename to cluster-autoscaler/vendor/github.com/cilium/ebpf/btf/btf_types.go index d98c73ca594a..4810180494e6 100644 --- a/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/btf/btf_types.go +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/btf/btf_types.go @@ -130,13 +130,22 @@ func mask(len uint32) uint32 { return (1 << len) - 1 } +func readBits(value, len, shift uint32) uint32 { + return (value >> shift) & mask(len) +} + +func writeBits(value, len, shift, new uint32) uint32 { + value &^= mask(len) << shift + value |= (new & mask(len)) << shift + return value +} + func (bt *btfType) info(len, shift uint32) uint32 { - return (bt.Info >> shift) & mask(len) + return readBits(bt.Info, len, shift) } func (bt *btfType) setInfo(value, len, shift uint32) { - bt.Info &^= mask(len) << shift - bt.Info |= (value & mask(len)) << shift + bt.Info = writeBits(bt.Info, len, shift, value) } func (bt *btfType) Kind() btfKind { @@ -177,6 +186,10 @@ func (bt *btfType) Size() uint32 { return bt.SizeType } +func (bt *btfType) SetSize(size uint32) { + bt.SizeType = size +} + type rawType struct { btfType data interface{} @@ -194,6 +207,50 @@ func (rt *rawType) Marshal(w io.Writer, bo binary.ByteOrder) error { return binary.Write(w, bo, rt.data) } +// btfInt encodes additional data for integers. +// +// ? ? ? ? e e e e o o o o o o o o ? ? ? ? ? ? ? ? b b b b b b b b +// ? = undefined +// e = encoding +// o = offset (bitfields?) +// b = bits (bitfields) +type btfInt struct { + Raw uint32 +} + +const ( + btfIntEncodingLen = 4 + btfIntEncodingShift = 24 + btfIntOffsetLen = 8 + btfIntOffsetShift = 16 + btfIntBitsLen = 8 + btfIntBitsShift = 0 +) + +func (bi btfInt) Encoding() IntEncoding { + return IntEncoding(readBits(bi.Raw, btfIntEncodingLen, btfIntEncodingShift)) +} + +func (bi *btfInt) SetEncoding(e IntEncoding) { + bi.Raw = writeBits(uint32(bi.Raw), btfIntEncodingLen, btfIntEncodingShift, uint32(e)) +} + +func (bi btfInt) Offset() Bits { + return Bits(readBits(bi.Raw, btfIntOffsetLen, btfIntOffsetShift)) +} + +func (bi *btfInt) SetOffset(offset uint32) { + bi.Raw = writeBits(bi.Raw, btfIntOffsetLen, btfIntOffsetShift, offset) +} + +func (bi btfInt) Bits() Bits { + return Bits(readBits(bi.Raw, btfIntBitsLen, btfIntBitsShift)) +} + +func (bi *btfInt) SetBits(bits byte) { + bi.Raw = writeBits(bi.Raw, btfIntBitsLen, btfIntBitsShift, uint32(bits)) +} + type btfArray struct { Type TypeID IndexType TypeID @@ -226,11 +283,14 @@ type btfParam struct { Type TypeID } -func readTypes(r io.Reader, bo binary.ByteOrder) ([]rawType, error) { - var ( - header btfType - types []rawType - ) +func readTypes(r io.Reader, bo binary.ByteOrder, typeLen uint32) ([]rawType, error) { + var header btfType + // because of the interleaving between types and struct members it is difficult to + // precompute the numbers of raw types this will parse + // this "guess" is a good first estimation + sizeOfbtfType := uintptr(binary.Size(btfType{})) + tyMaxCount := uintptr(typeLen) / sizeOfbtfType / 2 + types := make([]rawType, 0, tyMaxCount) for id := TypeID(1); ; id++ { if err := binary.Read(r, bo, &header); err == io.EOF { @@ -242,7 +302,7 @@ func readTypes(r io.Reader, bo binary.ByteOrder) ([]rawType, error) { var data interface{} switch header.Kind() { case kindInt: - data = new(uint32) + data = new(btfInt) case kindPointer: case kindArray: data = new(btfArray) @@ -281,7 +341,3 @@ func readTypes(r io.Reader, bo binary.ByteOrder) ([]rawType, error) { types = append(types, rawType{header, data}) } } - -func intEncoding(raw uint32) (IntEncoding, uint32, byte) { - return IntEncoding((raw & 0x0f000000) >> 24), (raw & 0x00ff0000) >> 16, byte(raw & 0x000000ff) -} diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/btf/btf_types_string.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/btf/btf_types_string.go similarity index 100% rename from cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/btf/btf_types_string.go rename to cluster-autoscaler/vendor/github.com/cilium/ebpf/btf/btf_types_string.go diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/btf/core.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/btf/core.go similarity index 65% rename from cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/btf/core.go rename to cluster-autoscaler/vendor/github.com/cilium/ebpf/btf/core.go index d02df9d50bb3..c48754809355 100644 --- a/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/btf/core.go +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/btf/core.go @@ -1,11 +1,11 @@ package btf import ( + "encoding/binary" "errors" "fmt" "math" "reflect" - "sort" "strconv" "strings" @@ -17,50 +17,58 @@ import ( // COREFixup is the result of computing a CO-RE relocation for a target. type COREFixup struct { - Kind COREKind - Local uint32 - Target uint32 - Poison bool + kind coreKind + local uint32 + target uint32 + // True if there is no valid fixup. The instruction is replaced with an + // invalid dummy. + poison bool + // True if the validation of the local value should be skipped. Used by + // some kinds of bitfield relocations. + skipLocalValidation bool } -func (f COREFixup) equal(other COREFixup) bool { - return f.Local == other.Local && f.Target == other.Target +func (f *COREFixup) equal(other COREFixup) bool { + return f.local == other.local && f.target == other.target } -func (f COREFixup) String() string { - if f.Poison { - return fmt.Sprintf("%s=poison", f.Kind) +func (f *COREFixup) String() string { + if f.poison { + return fmt.Sprintf("%s=poison", f.kind) } - return fmt.Sprintf("%s=%d->%d", f.Kind, f.Local, f.Target) + return fmt.Sprintf("%s=%d->%d", f.kind, f.local, f.target) } -func (f COREFixup) apply(ins *asm.Instruction) error { - if f.Poison { - return errors.New("can't poison individual instruction") +func (f *COREFixup) Apply(ins *asm.Instruction) error { + if f.poison { + const badRelo = 0xbad2310 + + *ins = asm.BuiltinFunc(badRelo).Call() + return nil } switch class := ins.OpCode.Class(); class { case asm.LdXClass, asm.StClass, asm.StXClass: - if want := int16(f.Local); want != ins.Offset { - return fmt.Errorf("invalid offset %d, expected %d", ins.Offset, want) + if want := int16(f.local); !f.skipLocalValidation && want != ins.Offset { + return fmt.Errorf("invalid offset %d, expected %d", ins.Offset, f.local) } - if f.Target > math.MaxInt16 { - return fmt.Errorf("offset %d exceeds MaxInt16", f.Target) + if f.target > math.MaxInt16 { + return fmt.Errorf("offset %d exceeds MaxInt16", f.target) } - ins.Offset = int16(f.Target) + ins.Offset = int16(f.target) case asm.LdClass: if !ins.IsConstantLoad(asm.DWord) { return fmt.Errorf("not a dword-sized immediate load") } - if want := int64(f.Local); want != ins.Constant { - return fmt.Errorf("invalid immediate %d, expected %d", ins.Constant, want) + if want := int64(f.local); !f.skipLocalValidation && want != ins.Constant { + return fmt.Errorf("invalid immediate %d, expected %d (fixup: %v)", ins.Constant, want, f) } - ins.Constant = int64(f.Target) + ins.Constant = int64(f.target) case asm.ALUClass: if ins.OpCode.ALUOp() == asm.Swap { @@ -74,15 +82,15 @@ func (f COREFixup) apply(ins *asm.Instruction) error { return fmt.Errorf("invalid source %s", src) } - if want := int64(f.Local); want != ins.Constant { - return fmt.Errorf("invalid immediate %d, expected %d", ins.Constant, want) + if want := int64(f.local); !f.skipLocalValidation && want != ins.Constant { + return fmt.Errorf("invalid immediate %d, expected %d (fixup: %v, kind: %v, ins: %v)", ins.Constant, want, f, f.kind, ins) } - if f.Target > math.MaxInt32 { - return fmt.Errorf("immediate %d exceeds MaxInt32", f.Target) + if f.target > math.MaxInt32 { + return fmt.Errorf("immediate %d exceeds MaxInt32", f.target) } - ins.Constant = int64(f.Target) + ins.Constant = int64(f.target) default: return fmt.Errorf("invalid class %s", class) @@ -92,57 +100,14 @@ func (f COREFixup) apply(ins *asm.Instruction) error { } func (f COREFixup) isNonExistant() bool { - return f.Kind.checksForExistence() && f.Target == 0 -} - -type COREFixups map[uint64]COREFixup - -// Apply a set of CO-RE relocations to a BPF program. -func (fs COREFixups) Apply(insns asm.Instructions) (asm.Instructions, error) { - if len(fs) == 0 { - cpy := make(asm.Instructions, len(insns)) - copy(cpy, insns) - return insns, nil - } - - cpy := make(asm.Instructions, 0, len(insns)) - iter := insns.Iterate() - for iter.Next() { - fixup, ok := fs[iter.Offset.Bytes()] - if !ok { - cpy = append(cpy, *iter.Ins) - continue - } - - ins := *iter.Ins - if fixup.Poison { - const badRelo = asm.BuiltinFunc(0xbad2310) - - cpy = append(cpy, badRelo.Call()) - if ins.OpCode.IsDWordLoad() { - // 64 bit constant loads occupy two raw bpf instructions, so - // we need to add another instruction as padding. - cpy = append(cpy, badRelo.Call()) - } - - continue - } - - if err := fixup.apply(&ins); err != nil { - return nil, fmt.Errorf("instruction %d, offset %d: %s: %w", iter.Index, iter.Offset.Bytes(), fixup.Kind, err) - } - - cpy = append(cpy, ins) - } - - return cpy, nil + return f.kind.checksForExistence() && f.target == 0 } -// COREKind is the type of CO-RE relocation -type COREKind uint32 +// coreKind is the type of CO-RE relocation as specified in BPF source code. +type coreKind uint32 const ( - reloFieldByteOffset COREKind = iota /* field byte offset */ + reloFieldByteOffset coreKind = iota /* field byte offset */ reloFieldByteSize /* field size in bytes */ reloFieldExists /* field existence in target kernel */ reloFieldSigned /* field signedness (0 - unsigned, 1 - signed) */ @@ -156,7 +121,11 @@ const ( reloEnumvalValue /* enum value integer value */ ) -func (k COREKind) String() string { +func (k coreKind) checksForExistence() bool { + return k == reloEnumvalExists || k == reloTypeExists || k == reloFieldExists +} + +func (k coreKind) String() string { switch k { case reloFieldByteOffset: return "byte_off" @@ -187,19 +156,28 @@ func (k COREKind) String() string { } } -func (k COREKind) checksForExistence() bool { - return k == reloEnumvalExists || k == reloTypeExists || k == reloFieldExists -} - -func coreRelocate(local, target *Spec, relos coreRelos) (COREFixups, error) { +// CORERelocate calculates the difference in types between local and target. +// +// Returns a list of fixups which can be applied to instructions to make them +// match the target type(s). +// +// Fixups are returned in the order of relos, e.g. fixup[i] is the solution +// for relos[i]. +func CORERelocate(local, target *Spec, relos []*CORERelocation) ([]COREFixup, error) { if local.byteOrder != target.byteOrder { return nil, fmt.Errorf("can't relocate %s against %s", local.byteOrder, target.byteOrder) } - var ids []TypeID - relosByID := make(map[TypeID]coreRelos) - result := make(COREFixups, len(relos)) - for _, relo := range relos { + type reloGroup struct { + relos []*CORERelocation + // Position of each relocation in relos. + indices []int + } + + // Split relocations into per Type lists. + relosByType := make(map[Type]*reloGroup) + result := make([]COREFixup, len(relos)) + for i, relo := range relos { if relo.kind == reloTypeIDLocal { // Filtering out reloTypeIDLocal here makes our lives a lot easier // down the line, since it doesn't have a target at all. @@ -207,47 +185,42 @@ func coreRelocate(local, target *Spec, relos coreRelos) (COREFixups, error) { return nil, fmt.Errorf("%s: unexpected accessor %v", relo.kind, relo.accessor) } - result[uint64(relo.insnOff)] = COREFixup{ - relo.kind, - uint32(relo.typeID), - uint32(relo.typeID), - false, + id, err := local.TypeID(relo.typ) + if err != nil { + return nil, fmt.Errorf("%s: %w", relo.kind, err) + } + + result[i] = COREFixup{ + kind: relo.kind, + local: uint32(id), + target: uint32(id), } continue } - relos, ok := relosByID[relo.typeID] + group, ok := relosByType[relo.typ] if !ok { - ids = append(ids, relo.typeID) + group = &reloGroup{} + relosByType[relo.typ] = group } - relosByID[relo.typeID] = append(relos, relo) + group.relos = append(group.relos, relo) + group.indices = append(group.indices, i) } - // Ensure we work on relocations in a deterministic order. - sort.Slice(ids, func(i, j int) bool { - return ids[i] < ids[j] - }) - - for _, id := range ids { - if int(id) >= len(local.types) { - return nil, fmt.Errorf("invalid type id %d", id) - } - - localType := local.types[id] - named, ok := localType.(NamedType) - if !ok || named.TypeName() == "" { + for localType, group := range relosByType { + localTypeName := localType.TypeName() + if localTypeName == "" { return nil, fmt.Errorf("relocate unnamed or anonymous type %s: %w", localType, ErrNotSupported) } - relos := relosByID[id] - targets := target.namedTypes[essentialName(named.TypeName())] - fixups, err := coreCalculateFixups(localType, targets, relos) + targets := target.namedTypes[newEssentialName(localTypeName)] + fixups, err := coreCalculateFixups(local, target, localType, targets, group.relos) if err != nil { return nil, fmt.Errorf("relocate %s: %w", localType, err) } - for i, relo := range relos { - result[uint64(relo.insnOff)] = fixups[i] + for j, index := range group.indices { + result[index] = fixups[j] } } @@ -262,30 +235,30 @@ var errImpossibleRelocation = errors.New("impossible relocation") // // The best target is determined by scoring: the less poisoning we have to do // the better the target is. -func coreCalculateFixups(local Type, targets []NamedType, relos coreRelos) ([]COREFixup, error) { - localID := local.ID() - local, err := copyType(local, skipQualifierAndTypedef) +func coreCalculateFixups(localSpec, targetSpec *Spec, local Type, targets []Type, relos []*CORERelocation) ([]COREFixup, error) { + localID, err := localSpec.TypeID(local) if err != nil { - return nil, err + return nil, fmt.Errorf("local type ID: %w", err) } + local = Copy(local, UnderlyingType) bestScore := len(relos) var bestFixups []COREFixup for i := range targets { - targetID := targets[i].ID() - target, err := copyType(targets[i], skipQualifierAndTypedef) + targetID, err := targetSpec.TypeID(targets[i]) if err != nil { - return nil, err + return nil, fmt.Errorf("target type ID: %w", err) } + target := Copy(targets[i], UnderlyingType) score := 0 // lower is better fixups := make([]COREFixup, 0, len(relos)) for _, relo := range relos { - fixup, err := coreCalculateFixup(local, localID, target, targetID, relo) + fixup, err := coreCalculateFixup(localSpec.byteOrder, local, localID, target, targetID, relo) if err != nil { return nil, fmt.Errorf("target %s: %w", target, err) } - if fixup.Poison || fixup.isNonExistant() { + if fixup.poison || fixup.isNonExistant() { score++ } fixups = append(fixups, fixup) @@ -307,17 +280,23 @@ func coreCalculateFixups(local Type, targets []NamedType, relos coreRelos) ([]CO // the fixups agree with each other. for i, fixup := range bestFixups { if !fixup.equal(fixups[i]) { - return nil, fmt.Errorf("%s: multiple types match: %w", fixup.Kind, errAmbiguousRelocation) + return nil, fmt.Errorf("%s: multiple types match: %w", fixup.kind, errAmbiguousRelocation) } } } if bestFixups == nil { // Nothing at all matched, probably because there are no suitable - // targets at all. Poison everything! + // targets at all. + // + // Poison everything except checksForExistence. bestFixups = make([]COREFixup, len(relos)) for i, relo := range relos { - bestFixups[i] = COREFixup{Kind: relo.kind, Poison: true} + if relo.kind.checksForExistence() { + bestFixups[i] = COREFixup{kind: relo.kind, local: 1, target: 0} + } else { + bestFixups[i] = COREFixup{kind: relo.kind, poison: true} + } } } @@ -326,15 +305,18 @@ func coreCalculateFixups(local Type, targets []NamedType, relos coreRelos) ([]CO // coreCalculateFixup calculates the fixup for a single local type, target type // and relocation. -func coreCalculateFixup(local Type, localID TypeID, target Type, targetID TypeID, relo coreRelo) (COREFixup, error) { +func coreCalculateFixup(byteOrder binary.ByteOrder, local Type, localID TypeID, target Type, targetID TypeID, relo *CORERelocation) (COREFixup, error) { fixup := func(local, target uint32) (COREFixup, error) { - return COREFixup{relo.kind, local, target, false}, nil + return COREFixup{kind: relo.kind, local: local, target: target}, nil + } + fixupWithoutValidation := func(local, target uint32) (COREFixup, error) { + return COREFixup{kind: relo.kind, local: local, target: target, skipLocalValidation: true}, nil } poison := func() (COREFixup, error) { if relo.kind.checksForExistence() { return fixup(1, 0) } - return COREFixup{relo.kind, 0, 0, true}, nil + return COREFixup{kind: relo.kind, poison: true}, nil } zero := COREFixup{} @@ -390,7 +372,20 @@ func coreCalculateFixup(local Type, localID TypeID, target Type, targetID TypeID return fixup(uint32(localValue.Value), uint32(targetValue.Value)) } - case reloFieldByteOffset, reloFieldByteSize, reloFieldExists: + case reloFieldSigned: + switch local.(type) { + case *Enum: + return fixup(1, 1) + case *Int: + return fixup( + uint32(local.(*Int).Encoding&Signed), + uint32(target.(*Int).Encoding&Signed), + ) + default: + return fixupWithoutValidation(0, 0) + } + + case reloFieldByteOffset, reloFieldByteSize, reloFieldExists, reloFieldLShiftU64, reloFieldRShiftU64: if _, ok := target.(*Fwd); ok { // We can't relocate fields using a forward declaration, so // skip it. If a non-forward declaration is present in the BTF @@ -406,12 +401,17 @@ func coreCalculateFixup(local Type, localID TypeID, target Type, targetID TypeID return zero, fmt.Errorf("target %s: %w", target, err) } + maybeSkipValidation := func(f COREFixup, err error) (COREFixup, error) { + f.skipLocalValidation = localField.bitfieldSize > 0 + return f, err + } + switch relo.kind { case reloFieldExists: return fixup(1, 1) case reloFieldByteOffset: - return fixup(localField.offset/8, targetField.offset/8) + return maybeSkipValidation(fixup(localField.offset, targetField.offset)) case reloFieldByteSize: localSize, err := Sizeof(localField.Type) @@ -423,9 +423,34 @@ func coreCalculateFixup(local Type, localID TypeID, target Type, targetID TypeID if err != nil { return zero, err } + return maybeSkipValidation(fixup(uint32(localSize), uint32(targetSize))) + + case reloFieldLShiftU64: + var target uint32 + if byteOrder == binary.LittleEndian { + targetSize, err := targetField.sizeBits() + if err != nil { + return zero, err + } - return fixup(uint32(localSize), uint32(targetSize)) + target = uint32(64 - targetField.bitfieldOffset - targetSize) + } else { + loadWidth, err := Sizeof(targetField.Type) + if err != nil { + return zero, err + } + + target = uint32(64 - Bits(loadWidth*8) + targetField.bitfieldOffset) + } + return fixupWithoutValidation(0, target) + + case reloFieldRShiftU64: + targetSize, err := targetField.sizeBits() + if err != nil { + return zero, err + } + return fixupWithoutValidation(0, uint32(64-targetSize)) } } @@ -462,7 +487,7 @@ func coreCalculateFixup(local Type, localID TypeID, target Type, targetID TypeID */ type coreAccessor []int -func parseCoreAccessor(accessor string) (coreAccessor, error) { +func parseCOREAccessor(accessor string) (coreAccessor, error) { if accessor == "" { return nil, fmt.Errorf("empty accessor") } @@ -508,18 +533,73 @@ func (ca coreAccessor) enumValue(t Type) (*EnumValue, error) { return &e.Values[i], nil } +// coreField represents the position of a "child" of a composite type from the +// start of that type. +// +// /- start of composite +// | offset * 8 | bitfieldOffset | bitfieldSize | ... | +// \- start of field end of field -/ type coreField struct { - Type Type + Type Type + + // The position of the field from the start of the composite type in bytes. offset uint32 + + // The offset of the bitfield in bits from the start of the field. + bitfieldOffset Bits + + // The size of the bitfield in bits. + // + // Zero if the field is not a bitfield. + bitfieldSize Bits +} + +func (cf *coreField) adjustOffsetToNthElement(n int) error { + size, err := Sizeof(cf.Type) + if err != nil { + return err + } + + cf.offset += uint32(n) * uint32(size) + return nil } -func adjustOffset(base uint32, t Type, n int) (uint32, error) { - size, err := Sizeof(t) +func (cf *coreField) adjustOffsetBits(offset Bits) error { + align, err := alignof(cf.Type) if err != nil { - return 0, err + return err + } + + // We can compute the load offset by: + // 1) converting the bit offset to bytes with a flooring division. + // 2) dividing and multiplying that offset by the alignment, yielding the + // load size aligned offset. + offsetBytes := uint32(offset/8) / uint32(align) * uint32(align) + + // The number of bits remaining is the bit offset less the number of bits + // we can "skip" with the aligned offset. + cf.bitfieldOffset = offset - Bits(offsetBytes*8) + + // We know that cf.offset is aligned at to at least align since we get it + // from the compiler via BTF. Adding an aligned offsetBytes preserves the + // alignment. + cf.offset += offsetBytes + return nil +} + +func (cf *coreField) sizeBits() (Bits, error) { + if cf.bitfieldSize > 0 { + return cf.bitfieldSize, nil } - return base + (uint32(n) * uint32(size) * 8), nil + // Someone is trying to access a non-bitfield via a bit shift relocation. + // This happens when a field changes from a bitfield to a regular field + // between kernel versions. Synthesise the size to make the shifts work. + size, err := Sizeof(cf.Type) + if err != nil { + return 0, nil + } + return Bits(size * 8), nil } // coreFindField descends into the local type using the accessor and tries to @@ -527,32 +607,33 @@ func adjustOffset(base uint32, t Type, n int) (uint32, error) { // // Returns the field and the offset of the field from the start of // target in bits. -func coreFindField(local Type, localAcc coreAccessor, target Type) (_, _ coreField, _ error) { +func coreFindField(localT Type, localAcc coreAccessor, targetT Type) (coreField, coreField, error) { + local := coreField{Type: localT} + target := coreField{Type: targetT} + // The first index is used to offset a pointer of the base type like // when accessing an array. - localOffset, err := adjustOffset(0, local, localAcc[0]) - if err != nil { + if err := local.adjustOffsetToNthElement(localAcc[0]); err != nil { return coreField{}, coreField{}, err } - targetOffset, err := adjustOffset(0, target, localAcc[0]) - if err != nil { + if err := target.adjustOffsetToNthElement(localAcc[0]); err != nil { return coreField{}, coreField{}, err } - if err := coreAreMembersCompatible(local, target); err != nil { + if err := coreAreMembersCompatible(local.Type, target.Type); err != nil { return coreField{}, coreField{}, fmt.Errorf("fields: %w", err) } var localMaybeFlex, targetMaybeFlex bool - for _, acc := range localAcc[1:] { - switch localType := local.(type) { + for i, acc := range localAcc[1:] { + switch localType := local.Type.(type) { case composite: // For composite types acc is used to find the field in the local type, // and then we try to find a field in target with the same name. localMembers := localType.members() if acc >= len(localMembers) { - return coreField{}, coreField{}, fmt.Errorf("invalid accessor %d for %s", acc, local) + return coreField{}, coreField{}, fmt.Errorf("invalid accessor %d for %s", acc, localType) } localMember := localMembers[acc] @@ -563,13 +644,15 @@ func coreFindField(local Type, localAcc coreAccessor, target Type) (_, _ coreFie } // This is an anonymous struct or union, ignore it. - local = localMember.Type - localOffset += localMember.OffsetBits + local = coreField{ + Type: localMember.Type, + offset: local.offset + localMember.Offset.Bytes(), + } localMaybeFlex = false continue } - targetType, ok := target.(composite) + targetType, ok := target.Type.(composite) if !ok { return coreField{}, coreField{}, fmt.Errorf("target not composite: %w", errImpossibleRelocation) } @@ -579,20 +662,43 @@ func coreFindField(local Type, localAcc coreAccessor, target Type) (_, _ coreFie return coreField{}, coreField{}, err } - if targetMember.BitfieldSize > 0 { - return coreField{}, coreField{}, fmt.Errorf("field %q is a bitfield: %w", targetMember.Name, ErrNotSupported) + local = coreField{ + Type: localMember.Type, + offset: local.offset, + bitfieldSize: localMember.BitfieldSize, } - - local = localMember.Type localMaybeFlex = acc == len(localMembers)-1 - localOffset += localMember.OffsetBits - target = targetMember.Type + + target = coreField{ + Type: targetMember.Type, + offset: target.offset, + bitfieldSize: targetMember.BitfieldSize, + } targetMaybeFlex = last - targetOffset += targetMember.OffsetBits + + if local.bitfieldSize == 0 && target.bitfieldSize == 0 { + local.offset += localMember.Offset.Bytes() + target.offset += targetMember.Offset.Bytes() + break + } + + // Either of the members is a bitfield. Make sure we're at the + // end of the accessor. + if next := i + 1; next < len(localAcc[1:]) { + return coreField{}, coreField{}, fmt.Errorf("can't descend into bitfield") + } + + if err := local.adjustOffsetBits(localMember.Offset); err != nil { + return coreField{}, coreField{}, err + } + + if err := target.adjustOffsetBits(targetMember.Offset); err != nil { + return coreField{}, coreField{}, err + } case *Array: // For arrays, acc is the index in the target. - targetType, ok := target.(*Array) + targetType, ok := target.Type.(*Array) if !ok { return coreField{}, coreField{}, fmt.Errorf("target not array: %w", errImpossibleRelocation) } @@ -611,17 +717,23 @@ func coreFindField(local Type, localAcc coreAccessor, target Type) (_, _ coreFie return coreField{}, coreField{}, fmt.Errorf("out of bounds access of target: %w", errImpossibleRelocation) } - local = localType.Type + local = coreField{ + Type: localType.Type, + offset: local.offset, + } localMaybeFlex = false - localOffset, err = adjustOffset(localOffset, local, acc) - if err != nil { + + if err := local.adjustOffsetToNthElement(acc); err != nil { return coreField{}, coreField{}, err } - target = targetType.Type + target = coreField{ + Type: targetType.Type, + offset: target.offset, + } targetMaybeFlex = false - targetOffset, err = adjustOffset(targetOffset, target, acc) - if err != nil { + + if err := target.adjustOffsetToNthElement(acc); err != nil { return coreField{}, coreField{}, err } @@ -629,12 +741,12 @@ func coreFindField(local Type, localAcc coreAccessor, target Type) (_, _ coreFie return coreField{}, coreField{}, fmt.Errorf("relocate field of %T: %w", localType, ErrNotSupported) } - if err := coreAreMembersCompatible(local, target); err != nil { + if err := coreAreMembersCompatible(local.Type, target.Type); err != nil { return coreField{}, coreField{}, err } } - return coreField{local, localOffset}, coreField{target, targetOffset}, nil + return local, target, nil } // coreFindMember finds a member in a composite type while handling anonymous @@ -646,7 +758,7 @@ func coreFindMember(typ composite, name string) (Member, bool, error) { type offsetTarget struct { composite - offset uint32 + offset Bits } targets := []offsetTarget{{typ, 0}} @@ -670,7 +782,7 @@ func coreFindMember(typ composite, name string) (Member, bool, error) { for j, member := range members { if member.Name == name { // NB: This is safe because member is a copy. - member.OffsetBits += target.offset + member.Offset += target.offset return member, j == len(members)-1, nil } @@ -685,7 +797,7 @@ func coreFindMember(typ composite, name string) (Member, bool, error) { return Member{}, false, fmt.Errorf("anonymous non-composite type %T not allowed", member.Type) } - targets = append(targets, offsetTarget{comp, target.offset + member.OffsetBits}) + targets = append(targets, offsetTarget{comp, target.offset + member.Offset}) } } @@ -704,9 +816,9 @@ func coreFindEnumValue(local Type, localAcc coreAccessor, target Type) (localVal return nil, nil, errImpossibleRelocation } - localName := essentialName(localValue.Name) + localName := newEssentialName(localValue.Name) for i, targetValue := range targetEnum.Values { - if essentialName(targetValue.Name) != localName { + if newEssentialName(targetValue.Name) != localName { continue } @@ -759,15 +871,9 @@ func coreAreTypesCompatible(localType Type, targetType Type) error { } switch lv := (localType).(type) { - case *Void, *Struct, *Union, *Enum, *Fwd: + case *Void, *Struct, *Union, *Enum, *Fwd, *Int: // Nothing to do here - case *Int: - tv := targetType.(*Int) - if lv.isBitfield() || tv.isBitfield() { - return fmt.Errorf("bitfield: %w", errImpossibleRelocation) - } - case *Pointer, *Array: depth++ localType.walk(&localTs) @@ -831,7 +937,7 @@ func coreAreMembersCompatible(localType Type, targetType Type) error { return nil } - if essentialName(a) == essentialName(b) { + if newEssentialName(a) == newEssentialName(b) { return nil } @@ -849,7 +955,7 @@ func coreAreMembersCompatible(localType Type, targetType Type) error { } switch lv := localType.(type) { - case *Array, *Pointer, *Float: + case *Array, *Pointer, *Float, *Int: return nil case *Enum: @@ -860,29 +966,7 @@ func coreAreMembersCompatible(localType Type, targetType Type) error { tv := targetType.(*Fwd) return doNamesMatch(lv.Name, tv.Name) - case *Int: - tv := targetType.(*Int) - if lv.isBitfield() || tv.isBitfield() { - return fmt.Errorf("bitfield: %w", errImpossibleRelocation) - } - return nil - default: return fmt.Errorf("type %s: %w", localType, ErrNotSupported) } } - -func skipQualifierAndTypedef(typ Type) (Type, error) { - result := typ - for depth := 0; depth <= maxTypeDepth; depth++ { - switch v := (result).(type) { - case qualifier: - result = v.qualify() - case *Typedef: - result = v.Type - default: - return result, nil - } - } - return nil, errors.New("exceeded type depth") -} diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/btf/doc.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/btf/doc.go similarity index 71% rename from cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/btf/doc.go rename to cluster-autoscaler/vendor/github.com/cilium/ebpf/btf/doc.go index ad2576cb23c4..b1f4b1fc3eb5 100644 --- a/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/btf/doc.go +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/btf/doc.go @@ -2,7 +2,4 @@ // // The canonical documentation lives in the Linux kernel repository and is // available at https://www.kernel.org/doc/html/latest/bpf/btf.html -// -// The API is very much unstable. You should only use this via the main -// ebpf library. package btf diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/btf/ext_info.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/btf/ext_info.go new file mode 100644 index 000000000000..2c0e1afe299a --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/btf/ext_info.go @@ -0,0 +1,721 @@ +package btf + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "sort" + + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/internal" +) + +// ExtInfos contains ELF section metadata. +type ExtInfos struct { + // The slices are sorted by offset in ascending order. + funcInfos map[string][]funcInfo + lineInfos map[string][]lineInfo + relocationInfos map[string][]coreRelocationInfo +} + +// loadExtInfosFromELF parses ext infos from the .BTF.ext section in an ELF. +// +// Returns an error wrapping ErrNotFound if no ext infos are present. +func loadExtInfosFromELF(file *internal.SafeELFFile, ts types, strings *stringTable) (*ExtInfos, error) { + section := file.Section(".BTF.ext") + if section == nil { + return nil, fmt.Errorf("btf ext infos: %w", ErrNotFound) + } + + if section.ReaderAt == nil { + return nil, fmt.Errorf("compressed ext_info is not supported") + } + + return loadExtInfos(section.ReaderAt, file.ByteOrder, ts, strings) +} + +// loadExtInfos parses bare ext infos. +func loadExtInfos(r io.ReaderAt, bo binary.ByteOrder, ts types, strings *stringTable) (*ExtInfos, error) { + // Open unbuffered section reader. binary.Read() calls io.ReadFull on + // the header structs, resulting in one syscall per header. + headerRd := io.NewSectionReader(r, 0, math.MaxInt64) + extHeader, err := parseBTFExtHeader(headerRd, bo) + if err != nil { + return nil, fmt.Errorf("parsing BTF extension header: %w", err) + } + + coreHeader, err := parseBTFExtCOREHeader(headerRd, bo, extHeader) + if err != nil { + return nil, fmt.Errorf("parsing BTF CO-RE header: %w", err) + } + + buf := internal.NewBufferedSectionReader(r, extHeader.funcInfoStart(), int64(extHeader.FuncInfoLen)) + btfFuncInfos, err := parseFuncInfos(buf, bo, strings) + if err != nil { + return nil, fmt.Errorf("parsing BTF function info: %w", err) + } + + funcInfos := make(map[string][]funcInfo, len(btfFuncInfos)) + for section, bfis := range btfFuncInfos { + funcInfos[section], err = newFuncInfos(bfis, ts) + if err != nil { + return nil, fmt.Errorf("section %s: func infos: %w", section, err) + } + } + + buf = internal.NewBufferedSectionReader(r, extHeader.lineInfoStart(), int64(extHeader.LineInfoLen)) + btfLineInfos, err := parseLineInfos(buf, bo, strings) + if err != nil { + return nil, fmt.Errorf("parsing BTF line info: %w", err) + } + + lineInfos := make(map[string][]lineInfo, len(btfLineInfos)) + for section, blis := range btfLineInfos { + lineInfos[section], err = newLineInfos(blis, strings) + if err != nil { + return nil, fmt.Errorf("section %s: line infos: %w", section, err) + } + } + + if coreHeader == nil || coreHeader.COREReloLen == 0 { + return &ExtInfos{funcInfos, lineInfos, nil}, nil + } + + var btfCORERelos map[string][]bpfCORERelo + buf = internal.NewBufferedSectionReader(r, extHeader.coreReloStart(coreHeader), int64(coreHeader.COREReloLen)) + btfCORERelos, err = parseCORERelos(buf, bo, strings) + if err != nil { + return nil, fmt.Errorf("parsing CO-RE relocation info: %w", err) + } + + coreRelos := make(map[string][]coreRelocationInfo, len(btfCORERelos)) + for section, brs := range btfCORERelos { + coreRelos[section], err = newRelocationInfos(brs, ts, strings) + if err != nil { + return nil, fmt.Errorf("section %s: CO-RE relocations: %w", section, err) + } + } + + return &ExtInfos{funcInfos, lineInfos, coreRelos}, nil +} + +type funcInfoMeta struct{} +type coreRelocationMeta struct{} + +// Assign per-section metadata from BTF to a section's instructions. +func (ei *ExtInfos) Assign(insns asm.Instructions, section string) { + funcInfos := ei.funcInfos[section] + lineInfos := ei.lineInfos[section] + reloInfos := ei.relocationInfos[section] + + iter := insns.Iterate() + for iter.Next() { + if len(funcInfos) > 0 && funcInfos[0].offset == iter.Offset { + iter.Ins.Metadata.Set(funcInfoMeta{}, funcInfos[0].fn) + funcInfos = funcInfos[1:] + } + + if len(lineInfos) > 0 && lineInfos[0].offset == iter.Offset { + *iter.Ins = iter.Ins.WithSource(lineInfos[0].line) + lineInfos = lineInfos[1:] + } + + if len(reloInfos) > 0 && reloInfos[0].offset == iter.Offset { + iter.Ins.Metadata.Set(coreRelocationMeta{}, reloInfos[0].relo) + reloInfos = reloInfos[1:] + } + } +} + +// MarshalExtInfos encodes function and line info embedded in insns into kernel +// wire format. +func MarshalExtInfos(insns asm.Instructions, typeID func(Type) (TypeID, error)) (funcInfos, lineInfos []byte, _ error) { + iter := insns.Iterate() + var fiBuf, liBuf bytes.Buffer + for iter.Next() { + if fn := FuncMetadata(iter.Ins); fn != nil { + fi := &funcInfo{ + fn: fn, + offset: iter.Offset, + } + if err := fi.marshal(&fiBuf, typeID); err != nil { + return nil, nil, fmt.Errorf("write func info: %w", err) + } + } + + if line, ok := iter.Ins.Source().(*Line); ok { + li := &lineInfo{ + line: line, + offset: iter.Offset, + } + if err := li.marshal(&liBuf); err != nil { + return nil, nil, fmt.Errorf("write line info: %w", err) + } + } + } + return fiBuf.Bytes(), liBuf.Bytes(), nil +} + +// btfExtHeader is found at the start of the .BTF.ext section. +type btfExtHeader struct { + Magic uint16 + Version uint8 + Flags uint8 + + // HdrLen is larger than the size of struct btfExtHeader when it is + // immediately followed by a btfExtCOREHeader. + HdrLen uint32 + + FuncInfoOff uint32 + FuncInfoLen uint32 + LineInfoOff uint32 + LineInfoLen uint32 +} + +// parseBTFExtHeader parses the header of the .BTF.ext section. +func parseBTFExtHeader(r io.Reader, bo binary.ByteOrder) (*btfExtHeader, error) { + var header btfExtHeader + if err := binary.Read(r, bo, &header); err != nil { + return nil, fmt.Errorf("can't read header: %v", err) + } + + if header.Magic != btfMagic { + return nil, fmt.Errorf("incorrect magic value %v", header.Magic) + } + + if header.Version != 1 { + return nil, fmt.Errorf("unexpected version %v", header.Version) + } + + if header.Flags != 0 { + return nil, fmt.Errorf("unsupported flags %v", header.Flags) + } + + if int64(header.HdrLen) < int64(binary.Size(&header)) { + return nil, fmt.Errorf("header length shorter than btfExtHeader size") + } + + return &header, nil +} + +// funcInfoStart returns the offset from the beginning of the .BTF.ext section +// to the start of its func_info entries. +func (h *btfExtHeader) funcInfoStart() int64 { + return int64(h.HdrLen + h.FuncInfoOff) +} + +// lineInfoStart returns the offset from the beginning of the .BTF.ext section +// to the start of its line_info entries. +func (h *btfExtHeader) lineInfoStart() int64 { + return int64(h.HdrLen + h.LineInfoOff) +} + +// coreReloStart returns the offset from the beginning of the .BTF.ext section +// to the start of its CO-RE relocation entries. +func (h *btfExtHeader) coreReloStart(ch *btfExtCOREHeader) int64 { + return int64(h.HdrLen + ch.COREReloOff) +} + +// btfExtCOREHeader is found right after the btfExtHeader when its HdrLen +// field is larger than its size. +type btfExtCOREHeader struct { + COREReloOff uint32 + COREReloLen uint32 +} + +// parseBTFExtCOREHeader parses the tail of the .BTF.ext header. If additional +// header bytes are present, extHeader.HdrLen will be larger than the struct, +// indicating the presence of a CO-RE extension header. +func parseBTFExtCOREHeader(r io.Reader, bo binary.ByteOrder, extHeader *btfExtHeader) (*btfExtCOREHeader, error) { + extHdrSize := int64(binary.Size(&extHeader)) + remainder := int64(extHeader.HdrLen) - extHdrSize + + if remainder == 0 { + return nil, nil + } + + var coreHeader btfExtCOREHeader + if err := binary.Read(r, bo, &coreHeader); err != nil { + return nil, fmt.Errorf("can't read header: %v", err) + } + + return &coreHeader, nil +} + +type btfExtInfoSec struct { + SecNameOff uint32 + NumInfo uint32 +} + +// parseExtInfoSec parses a btf_ext_info_sec header within .BTF.ext, +// appearing within func_info and line_info sub-sections. +// These headers appear once for each program section in the ELF and are +// followed by one or more func/line_info records for the section. +func parseExtInfoSec(r io.Reader, bo binary.ByteOrder, strings *stringTable) (string, *btfExtInfoSec, error) { + var infoHeader btfExtInfoSec + if err := binary.Read(r, bo, &infoHeader); err != nil { + return "", nil, fmt.Errorf("read ext info header: %w", err) + } + + secName, err := strings.Lookup(infoHeader.SecNameOff) + if err != nil { + return "", nil, fmt.Errorf("get section name: %w", err) + } + if secName == "" { + return "", nil, fmt.Errorf("extinfo header refers to empty section name") + } + + if infoHeader.NumInfo == 0 { + return "", nil, fmt.Errorf("section %s has zero records", secName) + } + + return secName, &infoHeader, nil +} + +// parseExtInfoRecordSize parses the uint32 at the beginning of a func_infos +// or line_infos segment that describes the length of all extInfoRecords in +// that segment. +func parseExtInfoRecordSize(r io.Reader, bo binary.ByteOrder) (uint32, error) { + const maxRecordSize = 256 + + var recordSize uint32 + if err := binary.Read(r, bo, &recordSize); err != nil { + return 0, fmt.Errorf("can't read record size: %v", err) + } + + if recordSize < 4 { + // Need at least InsnOff worth of bytes per record. + return 0, errors.New("record size too short") + } + if recordSize > maxRecordSize { + return 0, fmt.Errorf("record size %v exceeds %v", recordSize, maxRecordSize) + } + + return recordSize, nil +} + +// The size of a FuncInfo in BTF wire format. +var FuncInfoSize = uint32(binary.Size(bpfFuncInfo{})) + +type funcInfo struct { + fn *Func + offset asm.RawInstructionOffset +} + +type bpfFuncInfo struct { + // Instruction offset of the function within an ELF section. + InsnOff uint32 + TypeID TypeID +} + +func newFuncInfo(fi bpfFuncInfo, ts types) (*funcInfo, error) { + typ, err := ts.ByID(fi.TypeID) + if err != nil { + return nil, err + } + + fn, ok := typ.(*Func) + if !ok { + return nil, fmt.Errorf("type ID %d is a %T, but expected a Func", fi.TypeID, typ) + } + + // C doesn't have anonymous functions, but check just in case. + if fn.Name == "" { + return nil, fmt.Errorf("func with type ID %d doesn't have a name", fi.TypeID) + } + + return &funcInfo{ + fn, + asm.RawInstructionOffset(fi.InsnOff), + }, nil +} + +func newFuncInfos(bfis []bpfFuncInfo, ts types) ([]funcInfo, error) { + fis := make([]funcInfo, 0, len(bfis)) + for _, bfi := range bfis { + fi, err := newFuncInfo(bfi, ts) + if err != nil { + return nil, fmt.Errorf("offset %d: %w", bfi.InsnOff, err) + } + fis = append(fis, *fi) + } + sort.Slice(fis, func(i, j int) bool { + return fis[i].offset <= fis[j].offset + }) + return fis, nil +} + +// marshal into the BTF wire format. +func (fi *funcInfo) marshal(w io.Writer, typeID func(Type) (TypeID, error)) error { + id, err := typeID(fi.fn) + if err != nil { + return err + } + bfi := bpfFuncInfo{ + InsnOff: uint32(fi.offset), + TypeID: id, + } + return binary.Write(w, internal.NativeEndian, &bfi) +} + +// parseLineInfos parses a func_info sub-section within .BTF.ext ito a map of +// func infos indexed by section name. +func parseFuncInfos(r io.Reader, bo binary.ByteOrder, strings *stringTable) (map[string][]bpfFuncInfo, error) { + recordSize, err := parseExtInfoRecordSize(r, bo) + if err != nil { + return nil, err + } + + result := make(map[string][]bpfFuncInfo) + for { + secName, infoHeader, err := parseExtInfoSec(r, bo, strings) + if errors.Is(err, io.EOF) { + return result, nil + } + if err != nil { + return nil, err + } + + records, err := parseFuncInfoRecords(r, bo, recordSize, infoHeader.NumInfo) + if err != nil { + return nil, fmt.Errorf("section %v: %w", secName, err) + } + + result[secName] = records + } +} + +// parseFuncInfoRecords parses a stream of func_infos into a funcInfos. +// These records appear after a btf_ext_info_sec header in the func_info +// sub-section of .BTF.ext. +func parseFuncInfoRecords(r io.Reader, bo binary.ByteOrder, recordSize uint32, recordNum uint32) ([]bpfFuncInfo, error) { + var out []bpfFuncInfo + var fi bpfFuncInfo + + if exp, got := FuncInfoSize, recordSize; exp != got { + // BTF blob's record size is longer than we know how to parse. + return nil, fmt.Errorf("expected FuncInfo record size %d, but BTF blob contains %d", exp, got) + } + + for i := uint32(0); i < recordNum; i++ { + if err := binary.Read(r, bo, &fi); err != nil { + return nil, fmt.Errorf("can't read function info: %v", err) + } + + if fi.InsnOff%asm.InstructionSize != 0 { + return nil, fmt.Errorf("offset %v is not aligned with instruction size", fi.InsnOff) + } + + // ELF tracks offset in bytes, the kernel expects raw BPF instructions. + // Convert as early as possible. + fi.InsnOff /= asm.InstructionSize + + out = append(out, fi) + } + + return out, nil +} + +var LineInfoSize = uint32(binary.Size(bpfLineInfo{})) + +// Line represents the location and contents of a single line of source +// code a BPF ELF was compiled from. +type Line struct { + fileName string + line string + lineNumber uint32 + lineColumn uint32 + + // TODO: We should get rid of the fields below, but for that we need to be + // able to write BTF. + + fileNameOff uint32 + lineOff uint32 +} + +func (li *Line) FileName() string { + return li.fileName +} + +func (li *Line) Line() string { + return li.line +} + +func (li *Line) LineNumber() uint32 { + return li.lineNumber +} + +func (li *Line) LineColumn() uint32 { + return li.lineColumn +} + +func (li *Line) String() string { + return li.line +} + +type lineInfo struct { + line *Line + offset asm.RawInstructionOffset +} + +// Constants for the format of bpfLineInfo.LineCol. +const ( + bpfLineShift = 10 + bpfLineMax = (1 << (32 - bpfLineShift)) - 1 + bpfColumnMax = (1 << bpfLineShift) - 1 +) + +type bpfLineInfo struct { + // Instruction offset of the line within the whole instruction stream, in instructions. + InsnOff uint32 + FileNameOff uint32 + LineOff uint32 + LineCol uint32 +} + +func newLineInfo(li bpfLineInfo, strings *stringTable) (*lineInfo, error) { + line, err := strings.Lookup(li.LineOff) + if err != nil { + return nil, fmt.Errorf("lookup of line: %w", err) + } + + fileName, err := strings.Lookup(li.FileNameOff) + if err != nil { + return nil, fmt.Errorf("lookup of filename: %w", err) + } + + lineNumber := li.LineCol >> bpfLineShift + lineColumn := li.LineCol & bpfColumnMax + + return &lineInfo{ + &Line{ + fileName, + line, + lineNumber, + lineColumn, + li.FileNameOff, + li.LineOff, + }, + asm.RawInstructionOffset(li.InsnOff), + }, nil +} + +func newLineInfos(blis []bpfLineInfo, strings *stringTable) ([]lineInfo, error) { + lis := make([]lineInfo, 0, len(blis)) + for _, bli := range blis { + li, err := newLineInfo(bli, strings) + if err != nil { + return nil, fmt.Errorf("offset %d: %w", bli.InsnOff, err) + } + lis = append(lis, *li) + } + sort.Slice(lis, func(i, j int) bool { + return lis[i].offset <= lis[j].offset + }) + return lis, nil +} + +// marshal writes the binary representation of the LineInfo to w. +func (li *lineInfo) marshal(w io.Writer) error { + line := li.line + if line.lineNumber > bpfLineMax { + return fmt.Errorf("line %d exceeds %d", line.lineNumber, bpfLineMax) + } + + if line.lineColumn > bpfColumnMax { + return fmt.Errorf("column %d exceeds %d", line.lineColumn, bpfColumnMax) + } + + bli := bpfLineInfo{ + uint32(li.offset), + line.fileNameOff, + line.lineOff, + (line.lineNumber << bpfLineShift) | line.lineColumn, + } + return binary.Write(w, internal.NativeEndian, &bli) +} + +// parseLineInfos parses a line_info sub-section within .BTF.ext ito a map of +// line infos indexed by section name. +func parseLineInfos(r io.Reader, bo binary.ByteOrder, strings *stringTable) (map[string][]bpfLineInfo, error) { + recordSize, err := parseExtInfoRecordSize(r, bo) + if err != nil { + return nil, err + } + + result := make(map[string][]bpfLineInfo) + for { + secName, infoHeader, err := parseExtInfoSec(r, bo, strings) + if errors.Is(err, io.EOF) { + return result, nil + } + if err != nil { + return nil, err + } + + records, err := parseLineInfoRecords(r, bo, recordSize, infoHeader.NumInfo) + if err != nil { + return nil, fmt.Errorf("section %v: %w", secName, err) + } + + result[secName] = records + } +} + +// parseLineInfoRecords parses a stream of line_infos into a lineInfos. +// These records appear after a btf_ext_info_sec header in the line_info +// sub-section of .BTF.ext. +func parseLineInfoRecords(r io.Reader, bo binary.ByteOrder, recordSize uint32, recordNum uint32) ([]bpfLineInfo, error) { + var out []bpfLineInfo + var li bpfLineInfo + + if exp, got := uint32(binary.Size(li)), recordSize; exp != got { + // BTF blob's record size is longer than we know how to parse. + return nil, fmt.Errorf("expected LineInfo record size %d, but BTF blob contains %d", exp, got) + } + + for i := uint32(0); i < recordNum; i++ { + if err := binary.Read(r, bo, &li); err != nil { + return nil, fmt.Errorf("can't read line info: %v", err) + } + + if li.InsnOff%asm.InstructionSize != 0 { + return nil, fmt.Errorf("offset %v is not aligned with instruction size", li.InsnOff) + } + + // ELF tracks offset in bytes, the kernel expects raw BPF instructions. + // Convert as early as possible. + li.InsnOff /= asm.InstructionSize + + out = append(out, li) + } + + return out, nil +} + +// bpfCORERelo matches the kernel's struct bpf_core_relo. +type bpfCORERelo struct { + InsnOff uint32 + TypeID TypeID + AccessStrOff uint32 + Kind coreKind +} + +type CORERelocation struct { + typ Type + accessor coreAccessor + kind coreKind +} + +func CORERelocationMetadata(ins *asm.Instruction) *CORERelocation { + relo, _ := ins.Metadata.Get(coreRelocationMeta{}).(*CORERelocation) + return relo +} + +type coreRelocationInfo struct { + relo *CORERelocation + offset asm.RawInstructionOffset +} + +func newRelocationInfo(relo bpfCORERelo, ts types, strings *stringTable) (*coreRelocationInfo, error) { + typ, err := ts.ByID(relo.TypeID) + if err != nil { + return nil, err + } + + accessorStr, err := strings.Lookup(relo.AccessStrOff) + if err != nil { + return nil, err + } + + accessor, err := parseCOREAccessor(accessorStr) + if err != nil { + return nil, fmt.Errorf("accessor %q: %s", accessorStr, err) + } + + return &coreRelocationInfo{ + &CORERelocation{ + typ, + accessor, + relo.Kind, + }, + asm.RawInstructionOffset(relo.InsnOff), + }, nil +} + +func newRelocationInfos(brs []bpfCORERelo, ts types, strings *stringTable) ([]coreRelocationInfo, error) { + rs := make([]coreRelocationInfo, 0, len(brs)) + for _, br := range brs { + relo, err := newRelocationInfo(br, ts, strings) + if err != nil { + return nil, fmt.Errorf("offset %d: %w", br.InsnOff, err) + } + rs = append(rs, *relo) + } + sort.Slice(rs, func(i, j int) bool { + return rs[i].offset < rs[j].offset + }) + return rs, nil +} + +var extInfoReloSize = binary.Size(bpfCORERelo{}) + +// parseCORERelos parses a core_relos sub-section within .BTF.ext ito a map of +// CO-RE relocations indexed by section name. +func parseCORERelos(r io.Reader, bo binary.ByteOrder, strings *stringTable) (map[string][]bpfCORERelo, error) { + recordSize, err := parseExtInfoRecordSize(r, bo) + if err != nil { + return nil, err + } + + if recordSize != uint32(extInfoReloSize) { + return nil, fmt.Errorf("expected record size %d, got %d", extInfoReloSize, recordSize) + } + + result := make(map[string][]bpfCORERelo) + for { + secName, infoHeader, err := parseExtInfoSec(r, bo, strings) + if errors.Is(err, io.EOF) { + return result, nil + } + if err != nil { + return nil, err + } + + records, err := parseCOREReloRecords(r, bo, recordSize, infoHeader.NumInfo) + if err != nil { + return nil, fmt.Errorf("section %v: %w", secName, err) + } + + result[secName] = records + } +} + +// parseCOREReloRecords parses a stream of CO-RE relocation entries into a +// coreRelos. These records appear after a btf_ext_info_sec header in the +// core_relos sub-section of .BTF.ext. +func parseCOREReloRecords(r io.Reader, bo binary.ByteOrder, recordSize uint32, recordNum uint32) ([]bpfCORERelo, error) { + var out []bpfCORERelo + + var relo bpfCORERelo + for i := uint32(0); i < recordNum; i++ { + if err := binary.Read(r, bo, &relo); err != nil { + return nil, fmt.Errorf("can't read CO-RE relocation: %v", err) + } + + if relo.InsnOff%asm.InstructionSize != 0 { + return nil, fmt.Errorf("offset %v is not aligned with instruction size", relo.InsnOff) + } + + // ELF tracks offset in bytes, the kernel expects raw BPF instructions. + // Convert as early as possible. + relo.InsnOff /= asm.InstructionSize + + out = append(out, relo) + } + + return out, nil +} diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/btf/format.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/btf/format.go new file mode 100644 index 000000000000..e7688a2a6e83 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/btf/format.go @@ -0,0 +1,319 @@ +package btf + +import ( + "errors" + "fmt" + "strings" +) + +var errNestedTooDeep = errors.New("nested too deep") + +// GoFormatter converts a Type to Go syntax. +// +// A zero GoFormatter is valid to use. +type GoFormatter struct { + w strings.Builder + + // Types present in this map are referred to using the given name if they + // are encountered when outputting another type. + Names map[Type]string + + // Identifier is called for each field of struct-like types. By default the + // field name is used as is. + Identifier func(string) string + + // EnumIdentifier is called for each element of an enum. By default the + // name of the enum type is concatenated with Identifier(element). + EnumIdentifier func(name, element string) string +} + +// TypeDeclaration generates a Go type declaration for a BTF type. +func (gf *GoFormatter) TypeDeclaration(name string, typ Type) (string, error) { + gf.w.Reset() + if err := gf.writeTypeDecl(name, typ); err != nil { + return "", err + } + return gf.w.String(), nil +} + +func (gf *GoFormatter) identifier(s string) string { + if gf.Identifier != nil { + return gf.Identifier(s) + } + + return s +} + +func (gf *GoFormatter) enumIdentifier(name, element string) string { + if gf.EnumIdentifier != nil { + return gf.EnumIdentifier(name, element) + } + + return name + gf.identifier(element) +} + +// writeTypeDecl outputs a declaration of the given type. +// +// It encodes https://golang.org/ref/spec#Type_declarations: +// +// type foo struct { bar uint32; } +// type bar int32 +func (gf *GoFormatter) writeTypeDecl(name string, typ Type) error { + if name == "" { + return fmt.Errorf("need a name for type %s", typ) + } + + switch v := skipQualifiers(typ).(type) { + case *Enum: + fmt.Fprintf(&gf.w, "type %s ", name) + switch v.Size { + case 1: + gf.w.WriteString("int8") + case 2: + gf.w.WriteString("int16") + case 4: + gf.w.WriteString("int32") + case 8: + gf.w.WriteString("int64") + default: + return fmt.Errorf("%s: invalid enum size %d", typ, v.Size) + } + + if len(v.Values) == 0 { + return nil + } + + gf.w.WriteString("; const ( ") + for _, ev := range v.Values { + id := gf.enumIdentifier(name, ev.Name) + fmt.Fprintf(&gf.w, "%s %s = %d; ", id, name, ev.Value) + } + gf.w.WriteString(")") + + return nil + + default: + fmt.Fprintf(&gf.w, "type %s ", name) + return gf.writeTypeLit(v, 0) + } +} + +// writeType outputs the name of a named type or a literal describing the type. +// +// It encodes https://golang.org/ref/spec#Types. +// +// foo (if foo is a named type) +// uint32 +func (gf *GoFormatter) writeType(typ Type, depth int) error { + typ = skipQualifiers(typ) + + name := gf.Names[typ] + if name != "" { + gf.w.WriteString(name) + return nil + } + + return gf.writeTypeLit(typ, depth) +} + +// writeTypeLit outputs a literal describing the type. +// +// The function ignores named types. +// +// It encodes https://golang.org/ref/spec#TypeLit. +// +// struct { bar uint32; } +// uint32 +func (gf *GoFormatter) writeTypeLit(typ Type, depth int) error { + depth++ + if depth > maxTypeDepth { + return errNestedTooDeep + } + + var err error + switch v := skipQualifiers(typ).(type) { + case *Int: + gf.writeIntLit(v) + + case *Enum: + gf.w.WriteString("int32") + + case *Typedef: + err = gf.writeType(v.Type, depth) + + case *Array: + fmt.Fprintf(&gf.w, "[%d]", v.Nelems) + err = gf.writeType(v.Type, depth) + + case *Struct: + err = gf.writeStructLit(v.Size, v.Members, depth) + + case *Union: + // Always choose the first member to represent the union in Go. + err = gf.writeStructLit(v.Size, v.Members[:1], depth) + + case *Datasec: + err = gf.writeDatasecLit(v, depth) + + default: + return fmt.Errorf("type %T: %w", v, ErrNotSupported) + } + + if err != nil { + return fmt.Errorf("%s: %w", typ, err) + } + + return nil +} + +func (gf *GoFormatter) writeIntLit(i *Int) { + // NB: Encoding.IsChar is ignored. + if i.Encoding.IsBool() && i.Size == 1 { + gf.w.WriteString("bool") + return + } + + bits := i.Size * 8 + if i.Encoding.IsSigned() { + fmt.Fprintf(&gf.w, "int%d", bits) + } else { + fmt.Fprintf(&gf.w, "uint%d", bits) + } +} + +func (gf *GoFormatter) writeStructLit(size uint32, members []Member, depth int) error { + gf.w.WriteString("struct { ") + + prevOffset := uint32(0) + skippedBitfield := false + for i, m := range members { + if m.BitfieldSize > 0 { + skippedBitfield = true + continue + } + + offset := m.Offset.Bytes() + if n := offset - prevOffset; skippedBitfield && n > 0 { + fmt.Fprintf(&gf.w, "_ [%d]byte /* unsupported bitfield */; ", n) + } else { + gf.writePadding(n) + } + + size, err := Sizeof(m.Type) + if err != nil { + return fmt.Errorf("field %d: %w", i, err) + } + prevOffset = offset + uint32(size) + + if err := gf.writeStructField(m, depth); err != nil { + return fmt.Errorf("field %d: %w", i, err) + } + } + + gf.writePadding(size - prevOffset) + gf.w.WriteString("}") + return nil +} + +func (gf *GoFormatter) writeStructField(m Member, depth int) error { + if m.BitfieldSize > 0 { + return fmt.Errorf("bitfields are not supported") + } + if m.Offset%8 != 0 { + return fmt.Errorf("unsupported offset %d", m.Offset) + } + + if m.Name == "" { + // Special case a nested anonymous union like + // struct foo { union { int bar; int baz }; } + // by replacing the whole union with its first member. + union, ok := m.Type.(*Union) + if !ok { + return fmt.Errorf("anonymous fields are not supported") + + } + + if len(union.Members) == 0 { + return errors.New("empty anonymous union") + } + + depth++ + if depth > maxTypeDepth { + return errNestedTooDeep + } + + m := union.Members[0] + size, err := Sizeof(m.Type) + if err != nil { + return err + } + + if err := gf.writeStructField(m, depth); err != nil { + return err + } + + gf.writePadding(union.Size - uint32(size)) + return nil + + } + + fmt.Fprintf(&gf.w, "%s ", gf.identifier(m.Name)) + + if err := gf.writeType(m.Type, depth); err != nil { + return err + } + + gf.w.WriteString("; ") + return nil +} + +func (gf *GoFormatter) writeDatasecLit(ds *Datasec, depth int) error { + gf.w.WriteString("struct { ") + + prevOffset := uint32(0) + for i, vsi := range ds.Vars { + v := vsi.Type.(*Var) + if v.Linkage != GlobalVar { + // Ignore static, extern, etc. for now. + continue + } + + if v.Name == "" { + return fmt.Errorf("variable %d: empty name", i) + } + + gf.writePadding(vsi.Offset - prevOffset) + prevOffset = vsi.Offset + vsi.Size + + fmt.Fprintf(&gf.w, "%s ", gf.identifier(v.Name)) + + if err := gf.writeType(v.Type, depth); err != nil { + return fmt.Errorf("variable %d: %w", i, err) + } + + gf.w.WriteString("; ") + } + + gf.writePadding(ds.Size - prevOffset) + gf.w.WriteString("}") + return nil +} + +func (gf *GoFormatter) writePadding(bytes uint32) { + if bytes > 0 { + fmt.Fprintf(&gf.w, "_ [%d]byte; ", bytes) + } +} + +func skipQualifiers(typ Type) Type { + result := typ + for depth := 0; depth <= maxTypeDepth; depth++ { + switch v := (result).(type) { + case qualifier: + result = v.qualify() + default: + return result + } + } + return &cycle{typ} +} diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/btf/handle.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/btf/handle.go new file mode 100644 index 000000000000..128e9b35cf3b --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/btf/handle.go @@ -0,0 +1,121 @@ +package btf + +import ( + "errors" + "fmt" + "os" + + "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/unix" +) + +// HandleInfo describes a Handle. +type HandleInfo struct { + // ID of this handle in the kernel. The ID is only valid as long as the + // associated handle is kept alive. + ID ID + + // Name is an identifying name for the BTF, currently only used by the + // kernel. + Name string + + // IsKernel is true if the BTF originated with the kernel and not + // userspace. + IsKernel bool + + // Size of the raw BTF in bytes. + size uint32 +} + +func newHandleInfoFromFD(fd *sys.FD) (*HandleInfo, error) { + // We invoke the syscall once with a empty BTF and name buffers to get size + // information to allocate buffers. Then we invoke it a second time with + // buffers to receive the data. + var btfInfo sys.BtfInfo + if err := sys.ObjInfo(fd, &btfInfo); err != nil { + return nil, fmt.Errorf("get BTF info for fd %s: %w", fd, err) + } + + if btfInfo.NameLen > 0 { + // NameLen doesn't account for the terminating NUL. + btfInfo.NameLen++ + } + + // Don't pull raw BTF by default, since it may be quite large. + btfSize := btfInfo.BtfSize + btfInfo.BtfSize = 0 + + nameBuffer := make([]byte, btfInfo.NameLen) + btfInfo.Name, btfInfo.NameLen = sys.NewSlicePointerLen(nameBuffer) + if err := sys.ObjInfo(fd, &btfInfo); err != nil { + return nil, err + } + + return &HandleInfo{ + ID: ID(btfInfo.Id), + Name: unix.ByteSliceToString(nameBuffer), + IsKernel: btfInfo.KernelBtf != 0, + size: btfSize, + }, nil +} + +// IsModule returns true if the BTF is for the kernel itself. +func (i *HandleInfo) IsVmlinux() bool { + return i.IsKernel && i.Name == "vmlinux" +} + +// IsModule returns true if the BTF is for a kernel module. +func (i *HandleInfo) IsModule() bool { + return i.IsKernel && i.Name != "vmlinux" +} + +// HandleIterator allows enumerating BTF blobs loaded into the kernel. +type HandleIterator struct { + // The ID of the last retrieved handle. Only valid after a call to Next. + ID ID + err error +} + +// Next retrieves a handle for the next BTF blob. +// +// [Handle.Close] is called if *handle is non-nil to avoid leaking fds. +// +// Returns true if another BTF blob was found. Call [HandleIterator.Err] after +// the function returns false. +func (it *HandleIterator) Next(handle **Handle) bool { + if *handle != nil { + (*handle).Close() + *handle = nil + } + + id := it.ID + for { + attr := &sys.BtfGetNextIdAttr{Id: id} + err := sys.BtfGetNextId(attr) + if errors.Is(err, os.ErrNotExist) { + // There are no more BTF objects. + return false + } else if err != nil { + it.err = fmt.Errorf("get next BTF ID: %w", err) + return false + } + + id = attr.NextId + *handle, err = NewHandleFromID(id) + if errors.Is(err, os.ErrNotExist) { + // Try again with the next ID. + continue + } else if err != nil { + it.err = fmt.Errorf("retrieve handle for ID %d: %w", id, err) + return false + } + + it.ID = id + return true + } +} + +// Err returns an error if iteration failed for some reason. +func (it *HandleIterator) Err() error { + return it.err +} diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/btf/strings.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/btf/strings.go new file mode 100644 index 000000000000..67626e0dd172 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/btf/strings.go @@ -0,0 +1,128 @@ +package btf + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" +) + +type stringTable struct { + base *stringTable + offsets []uint32 + strings []string +} + +// sizedReader is implemented by bytes.Reader, io.SectionReader, strings.Reader, etc. +type sizedReader interface { + io.Reader + Size() int64 +} + +func readStringTable(r sizedReader, base *stringTable) (*stringTable, error) { + // When parsing split BTF's string table, the first entry offset is derived + // from the last entry offset of the base BTF. + firstStringOffset := uint32(0) + if base != nil { + idx := len(base.offsets) - 1 + firstStringOffset = base.offsets[idx] + uint32(len(base.strings[idx])) + 1 + } + + // Derived from vmlinux BTF. + const averageStringLength = 16 + + n := int(r.Size() / averageStringLength) + offsets := make([]uint32, 0, n) + strings := make([]string, 0, n) + + offset := firstStringOffset + scanner := bufio.NewScanner(r) + scanner.Split(splitNull) + for scanner.Scan() { + str := scanner.Text() + offsets = append(offsets, offset) + strings = append(strings, str) + offset += uint32(len(str)) + 1 + } + if err := scanner.Err(); err != nil { + return nil, err + } + + if len(strings) == 0 { + return nil, errors.New("string table is empty") + } + + if firstStringOffset == 0 && strings[0] != "" { + return nil, errors.New("first item in string table is non-empty") + } + + return &stringTable{base, offsets, strings}, nil +} + +func splitNull(data []byte, atEOF bool) (advance int, token []byte, err error) { + i := bytes.IndexByte(data, 0) + if i == -1 { + if atEOF && len(data) > 0 { + return 0, nil, errors.New("string table isn't null terminated") + } + return 0, nil, nil + } + + return i + 1, data[:i], nil +} + +func (st *stringTable) Lookup(offset uint32) (string, error) { + if st.base != nil && offset <= st.base.offsets[len(st.base.offsets)-1] { + return st.base.lookup(offset) + } + return st.lookup(offset) +} + +func (st *stringTable) lookup(offset uint32) (string, error) { + i := search(st.offsets, offset) + if i == len(st.offsets) || st.offsets[i] != offset { + return "", fmt.Errorf("offset %d isn't start of a string", offset) + } + + return st.strings[i], nil +} + +func (st *stringTable) Length() int { + last := len(st.offsets) - 1 + return int(st.offsets[last]) + len(st.strings[last]) + 1 +} + +func (st *stringTable) Marshal(w io.Writer) error { + for _, str := range st.strings { + _, err := io.WriteString(w, str) + if err != nil { + return err + } + _, err = w.Write([]byte{0}) + if err != nil { + return err + } + } + return nil +} + +// search is a copy of sort.Search specialised for uint32. +// +// Licensed under https://go.dev/LICENSE +func search(ints []uint32, needle uint32) int { + // Define f(-1) == false and f(n) == true. + // Invariant: f(i-1) == false, f(j) == true. + i, j := 0, len(ints) + for i < j { + h := int(uint(i+j) >> 1) // avoid overflow when computing h + // i ≤ h < j + if !(ints[h] >= needle) { + i = h + 1 // preserves f(i-1) == false + } else { + j = h // preserves f(j) == true + } + } + // i == j, f(i-1) == false, and f(j) (= f(i)) == true => answer is i. + return i +} diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/btf/types.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/btf/types.go new file mode 100644 index 000000000000..402a363c28ac --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/btf/types.go @@ -0,0 +1,1212 @@ +package btf + +import ( + "fmt" + "io" + "math" + "reflect" + "strings" + + "github.com/cilium/ebpf/asm" +) + +const maxTypeDepth = 32 + +// TypeID identifies a type in a BTF section. +type TypeID uint32 + +// Type represents a type described by BTF. +type Type interface { + // Type can be formatted using the %s and %v verbs. %s outputs only the + // identity of the type, without any detail. %v outputs additional detail. + // + // Use the '+' flag to include the address of the type. + // + // Use the width to specify how many levels of detail to output, for example + // %1v will output detail for the root type and a short description of its + // children. %2v would output details of the root type and its children + // as well as a short description of the grandchildren. + fmt.Formatter + + // Name of the type, empty for anonymous types and types that cannot + // carry a name, like Void and Pointer. + TypeName() string + + // Make a copy of the type, without copying Type members. + copy() Type + + // Enumerate all nested Types. Repeated calls must visit nested + // types in the same order. + walk(*typeDeque) +} + +var ( + _ Type = (*Int)(nil) + _ Type = (*Struct)(nil) + _ Type = (*Union)(nil) + _ Type = (*Enum)(nil) + _ Type = (*Fwd)(nil) + _ Type = (*Func)(nil) + _ Type = (*Typedef)(nil) + _ Type = (*Var)(nil) + _ Type = (*Datasec)(nil) + _ Type = (*Float)(nil) +) + +// types is a list of Type. +// +// The order determines the ID of a type. +type types []Type + +func (ts types) ByID(id TypeID) (Type, error) { + if int(id) > len(ts) { + return nil, fmt.Errorf("type ID %d: %w", id, ErrNotFound) + } + return ts[id], nil +} + +// Void is the unit type of BTF. +type Void struct{} + +func (v *Void) Format(fs fmt.State, verb rune) { formatType(fs, verb, v) } +func (v *Void) TypeName() string { return "" } +func (v *Void) size() uint32 { return 0 } +func (v *Void) copy() Type { return (*Void)(nil) } +func (v *Void) walk(*typeDeque) {} + +type IntEncoding byte + +const ( + Signed IntEncoding = 1 << iota + Char + Bool +) + +func (ie IntEncoding) IsSigned() bool { + return ie&Signed != 0 +} + +func (ie IntEncoding) IsChar() bool { + return ie&Char != 0 +} + +func (ie IntEncoding) IsBool() bool { + return ie&Bool != 0 +} + +func (ie IntEncoding) String() string { + switch { + case ie.IsChar() && ie.IsSigned(): + return "char" + case ie.IsChar() && !ie.IsSigned(): + return "uchar" + case ie.IsBool(): + return "bool" + case ie.IsSigned(): + return "signed" + default: + return "unsigned" + } +} + +// Int is an integer of a given length. +// +// See https://www.kernel.org/doc/html/latest/bpf/btf.html#btf-kind-int +type Int struct { + Name string + + // The size of the integer in bytes. + Size uint32 + Encoding IntEncoding +} + +func (i *Int) Format(fs fmt.State, verb rune) { + formatType(fs, verb, i, i.Encoding, "size=", i.Size*8) +} + +func (i *Int) TypeName() string { return i.Name } +func (i *Int) size() uint32 { return i.Size } +func (i *Int) walk(*typeDeque) {} +func (i *Int) copy() Type { + cpy := *i + return &cpy +} + +// Pointer is a pointer to another type. +type Pointer struct { + Target Type +} + +func (p *Pointer) Format(fs fmt.State, verb rune) { + formatType(fs, verb, p, "target=", p.Target) +} + +func (p *Pointer) TypeName() string { return "" } +func (p *Pointer) size() uint32 { return 8 } +func (p *Pointer) walk(tdq *typeDeque) { tdq.push(&p.Target) } +func (p *Pointer) copy() Type { + cpy := *p + return &cpy +} + +// Array is an array with a fixed number of elements. +type Array struct { + Index Type + Type Type + Nelems uint32 +} + +func (arr *Array) Format(fs fmt.State, verb rune) { + formatType(fs, verb, arr, "index=", arr.Index, "type=", arr.Type, "n=", arr.Nelems) +} + +func (arr *Array) TypeName() string { return "" } + +func (arr *Array) walk(tdq *typeDeque) { + tdq.push(&arr.Index) + tdq.push(&arr.Type) +} + +func (arr *Array) copy() Type { + cpy := *arr + return &cpy +} + +// Struct is a compound type of consecutive members. +type Struct struct { + Name string + // The size of the struct including padding, in bytes + Size uint32 + Members []Member +} + +func (s *Struct) Format(fs fmt.State, verb rune) { + formatType(fs, verb, s, "fields=", len(s.Members)) +} + +func (s *Struct) TypeName() string { return s.Name } + +func (s *Struct) size() uint32 { return s.Size } + +func (s *Struct) walk(tdq *typeDeque) { + for i := range s.Members { + tdq.push(&s.Members[i].Type) + } +} + +func (s *Struct) copy() Type { + cpy := *s + cpy.Members = copyMembers(s.Members) + return &cpy +} + +func (s *Struct) members() []Member { + return s.Members +} + +// Union is a compound type where members occupy the same memory. +type Union struct { + Name string + // The size of the union including padding, in bytes. + Size uint32 + Members []Member +} + +func (u *Union) Format(fs fmt.State, verb rune) { + formatType(fs, verb, u, "fields=", len(u.Members)) +} + +func (u *Union) TypeName() string { return u.Name } + +func (u *Union) size() uint32 { return u.Size } + +func (u *Union) walk(tdq *typeDeque) { + for i := range u.Members { + tdq.push(&u.Members[i].Type) + } +} + +func (u *Union) copy() Type { + cpy := *u + cpy.Members = copyMembers(u.Members) + return &cpy +} + +func (u *Union) members() []Member { + return u.Members +} + +func copyMembers(orig []Member) []Member { + cpy := make([]Member, len(orig)) + copy(cpy, orig) + return cpy +} + +type composite interface { + members() []Member +} + +var ( + _ composite = (*Struct)(nil) + _ composite = (*Union)(nil) +) + +// A value in bits. +type Bits uint32 + +// Bytes converts a bit value into bytes. +func (b Bits) Bytes() uint32 { + return uint32(b / 8) +} + +// Member is part of a Struct or Union. +// +// It is not a valid Type. +type Member struct { + Name string + Type Type + Offset Bits + BitfieldSize Bits +} + +// Enum lists possible values. +type Enum struct { + Name string + // Size of the enum value in bytes. + Size uint32 + Values []EnumValue +} + +func (e *Enum) Format(fs fmt.State, verb rune) { + formatType(fs, verb, e, "size=", e.Size, "values=", len(e.Values)) +} + +func (e *Enum) TypeName() string { return e.Name } + +// EnumValue is part of an Enum +// +// Is is not a valid Type +type EnumValue struct { + Name string + Value int32 +} + +func (e *Enum) size() uint32 { return e.Size } +func (e *Enum) walk(*typeDeque) {} +func (e *Enum) copy() Type { + cpy := *e + cpy.Values = make([]EnumValue, len(e.Values)) + copy(cpy.Values, e.Values) + return &cpy +} + +// FwdKind is the type of forward declaration. +type FwdKind int + +// Valid types of forward declaration. +const ( + FwdStruct FwdKind = iota + FwdUnion +) + +func (fk FwdKind) String() string { + switch fk { + case FwdStruct: + return "struct" + case FwdUnion: + return "union" + default: + return fmt.Sprintf("%T(%d)", fk, int(fk)) + } +} + +// Fwd is a forward declaration of a Type. +type Fwd struct { + Name string + Kind FwdKind +} + +func (f *Fwd) Format(fs fmt.State, verb rune) { + formatType(fs, verb, f, f.Kind) +} + +func (f *Fwd) TypeName() string { return f.Name } + +func (f *Fwd) walk(*typeDeque) {} +func (f *Fwd) copy() Type { + cpy := *f + return &cpy +} + +// Typedef is an alias of a Type. +type Typedef struct { + Name string + Type Type +} + +func (td *Typedef) Format(fs fmt.State, verb rune) { + formatType(fs, verb, td, td.Type) +} + +func (td *Typedef) TypeName() string { return td.Name } + +func (td *Typedef) walk(tdq *typeDeque) { tdq.push(&td.Type) } +func (td *Typedef) copy() Type { + cpy := *td + return &cpy +} + +// Volatile is a qualifier. +type Volatile struct { + Type Type +} + +func (v *Volatile) Format(fs fmt.State, verb rune) { + formatType(fs, verb, v, v.Type) +} + +func (v *Volatile) TypeName() string { return "" } + +func (v *Volatile) qualify() Type { return v.Type } +func (v *Volatile) walk(tdq *typeDeque) { tdq.push(&v.Type) } +func (v *Volatile) copy() Type { + cpy := *v + return &cpy +} + +// Const is a qualifier. +type Const struct { + Type Type +} + +func (c *Const) Format(fs fmt.State, verb rune) { + formatType(fs, verb, c, c.Type) +} + +func (c *Const) TypeName() string { return "" } + +func (c *Const) qualify() Type { return c.Type } +func (c *Const) walk(tdq *typeDeque) { tdq.push(&c.Type) } +func (c *Const) copy() Type { + cpy := *c + return &cpy +} + +// Restrict is a qualifier. +type Restrict struct { + Type Type +} + +func (r *Restrict) Format(fs fmt.State, verb rune) { + formatType(fs, verb, r, r.Type) +} + +func (r *Restrict) TypeName() string { return "" } + +func (r *Restrict) qualify() Type { return r.Type } +func (r *Restrict) walk(tdq *typeDeque) { tdq.push(&r.Type) } +func (r *Restrict) copy() Type { + cpy := *r + return &cpy +} + +// Func is a function definition. +type Func struct { + Name string + Type Type + Linkage FuncLinkage +} + +func FuncMetadata(ins *asm.Instruction) *Func { + fn, _ := ins.Metadata.Get(funcInfoMeta{}).(*Func) + return fn +} + +func (f *Func) Format(fs fmt.State, verb rune) { + formatType(fs, verb, f, f.Linkage, "proto=", f.Type) +} + +func (f *Func) TypeName() string { return f.Name } + +func (f *Func) walk(tdq *typeDeque) { tdq.push(&f.Type) } +func (f *Func) copy() Type { + cpy := *f + return &cpy +} + +// FuncProto is a function declaration. +type FuncProto struct { + Return Type + Params []FuncParam +} + +func (fp *FuncProto) Format(fs fmt.State, verb rune) { + formatType(fs, verb, fp, "args=", len(fp.Params), "return=", fp.Return) +} + +func (fp *FuncProto) TypeName() string { return "" } + +func (fp *FuncProto) walk(tdq *typeDeque) { + tdq.push(&fp.Return) + for i := range fp.Params { + tdq.push(&fp.Params[i].Type) + } +} + +func (fp *FuncProto) copy() Type { + cpy := *fp + cpy.Params = make([]FuncParam, len(fp.Params)) + copy(cpy.Params, fp.Params) + return &cpy +} + +type FuncParam struct { + Name string + Type Type +} + +// Var is a global variable. +type Var struct { + Name string + Type Type + Linkage VarLinkage +} + +func (v *Var) Format(fs fmt.State, verb rune) { + formatType(fs, verb, v, v.Linkage) +} + +func (v *Var) TypeName() string { return v.Name } + +func (v *Var) walk(tdq *typeDeque) { tdq.push(&v.Type) } +func (v *Var) copy() Type { + cpy := *v + return &cpy +} + +// Datasec is a global program section containing data. +type Datasec struct { + Name string + Size uint32 + Vars []VarSecinfo +} + +func (ds *Datasec) Format(fs fmt.State, verb rune) { + formatType(fs, verb, ds) +} + +func (ds *Datasec) TypeName() string { return ds.Name } + +func (ds *Datasec) size() uint32 { return ds.Size } + +func (ds *Datasec) walk(tdq *typeDeque) { + for i := range ds.Vars { + tdq.push(&ds.Vars[i].Type) + } +} + +func (ds *Datasec) copy() Type { + cpy := *ds + cpy.Vars = make([]VarSecinfo, len(ds.Vars)) + copy(cpy.Vars, ds.Vars) + return &cpy +} + +// VarSecinfo describes variable in a Datasec. +// +// It is not a valid Type. +type VarSecinfo struct { + Type Type + Offset uint32 + Size uint32 +} + +// Float is a float of a given length. +type Float struct { + Name string + + // The size of the float in bytes. + Size uint32 +} + +func (f *Float) Format(fs fmt.State, verb rune) { + formatType(fs, verb, f, "size=", f.Size*8) +} + +func (f *Float) TypeName() string { return f.Name } +func (f *Float) size() uint32 { return f.Size } +func (f *Float) walk(*typeDeque) {} +func (f *Float) copy() Type { + cpy := *f + return &cpy +} + +// cycle is a type which had to be elided since it exceeded maxTypeDepth. +type cycle struct { + root Type +} + +func (c *cycle) ID() TypeID { return math.MaxUint32 } +func (c *cycle) Format(fs fmt.State, verb rune) { formatType(fs, verb, c, "root=", c.root) } +func (c *cycle) TypeName() string { return "" } +func (c *cycle) walk(*typeDeque) {} +func (c *cycle) copy() Type { + cpy := *c + return &cpy +} + +type sizer interface { + size() uint32 +} + +var ( + _ sizer = (*Int)(nil) + _ sizer = (*Pointer)(nil) + _ sizer = (*Struct)(nil) + _ sizer = (*Union)(nil) + _ sizer = (*Enum)(nil) + _ sizer = (*Datasec)(nil) +) + +type qualifier interface { + qualify() Type +} + +var ( + _ qualifier = (*Const)(nil) + _ qualifier = (*Restrict)(nil) + _ qualifier = (*Volatile)(nil) +) + +// Sizeof returns the size of a type in bytes. +// +// Returns an error if the size can't be computed. +func Sizeof(typ Type) (int, error) { + var ( + n = int64(1) + elem int64 + ) + + for i := 0; i < maxTypeDepth; i++ { + switch v := typ.(type) { + case *Array: + if n > 0 && int64(v.Nelems) > math.MaxInt64/n { + return 0, fmt.Errorf("type %s: overflow", typ) + } + + // Arrays may be of zero length, which allows + // n to be zero as well. + n *= int64(v.Nelems) + typ = v.Type + continue + + case sizer: + elem = int64(v.size()) + + case *Typedef: + typ = v.Type + continue + + case qualifier: + typ = v.qualify() + continue + + default: + return 0, fmt.Errorf("unsized type %T", typ) + } + + if n > 0 && elem > math.MaxInt64/n { + return 0, fmt.Errorf("type %s: overflow", typ) + } + + size := n * elem + if int64(int(size)) != size { + return 0, fmt.Errorf("type %s: overflow", typ) + } + + return int(size), nil + } + + return 0, fmt.Errorf("type %s: exceeded type depth", typ) +} + +// alignof returns the alignment of a type. +// +// Currently only supports the subset of types necessary for bitfield relocations. +func alignof(typ Type) (int, error) { + switch t := UnderlyingType(typ).(type) { + case *Enum: + return int(t.size()), nil + case *Int: + return int(t.Size), nil + default: + return 0, fmt.Errorf("can't calculate alignment of %T", t) + } +} + +// Transformer modifies a given Type and returns the result. +// +// For example, UnderlyingType removes any qualifiers or typedefs from a type. +// See the example on Copy for how to use a transform. +type Transformer func(Type) Type + +// Copy a Type recursively. +// +// typ may form a cycle. If transform is not nil, it is called with the +// to be copied type, and the returned value is copied instead. +func Copy(typ Type, transform Transformer) Type { + copies := make(copier) + copies.copy(&typ, transform) + return typ +} + +// copy a slice of Types recursively. +// +// See Copy for the semantics. +func copyTypes(types []Type, transform Transformer) []Type { + result := make([]Type, len(types)) + copy(result, types) + + copies := make(copier) + for i := range result { + copies.copy(&result[i], transform) + } + + return result +} + +type copier map[Type]Type + +func (c copier) copy(typ *Type, transform Transformer) { + var work typeDeque + for t := typ; t != nil; t = work.pop() { + // *t is the identity of the type. + if cpy := c[*t]; cpy != nil { + *t = cpy + continue + } + + var cpy Type + if transform != nil { + cpy = transform(*t).copy() + } else { + cpy = (*t).copy() + } + + c[*t] = cpy + *t = cpy + + // Mark any nested types for copying. + cpy.walk(&work) + } +} + +// typeDeque keeps track of pointers to types which still +// need to be visited. +type typeDeque struct { + types []*Type + read, write uint64 + mask uint64 +} + +func (dq *typeDeque) empty() bool { + return dq.read == dq.write +} + +// push adds a type to the stack. +func (dq *typeDeque) push(t *Type) { + if dq.write-dq.read < uint64(len(dq.types)) { + dq.types[dq.write&dq.mask] = t + dq.write++ + return + } + + new := len(dq.types) * 2 + if new == 0 { + new = 8 + } + + types := make([]*Type, new) + pivot := dq.read & dq.mask + n := copy(types, dq.types[pivot:]) + n += copy(types[n:], dq.types[:pivot]) + types[n] = t + + dq.types = types + dq.mask = uint64(new) - 1 + dq.read, dq.write = 0, uint64(n+1) +} + +// shift returns the first element or null. +func (dq *typeDeque) shift() *Type { + if dq.empty() { + return nil + } + + index := dq.read & dq.mask + t := dq.types[index] + dq.types[index] = nil + dq.read++ + return t +} + +// pop returns the last element or null. +func (dq *typeDeque) pop() *Type { + if dq.empty() { + return nil + } + + dq.write-- + index := dq.write & dq.mask + t := dq.types[index] + dq.types[index] = nil + return t +} + +// all returns all elements. +// +// The deque is empty after calling this method. +func (dq *typeDeque) all() []*Type { + length := dq.write - dq.read + types := make([]*Type, 0, length) + for t := dq.shift(); t != nil; t = dq.shift() { + types = append(types, t) + } + return types +} + +// inflateRawTypes takes a list of raw btf types linked via type IDs, and turns +// it into a graph of Types connected via pointers. +// +// If baseTypes are provided, then the raw types are +// considered to be of a split BTF (e.g., a kernel module). +// +// Returns a slice of types indexed by TypeID. Since BTF ignores compilation +// units, multiple types may share the same name. A Type may form a cyclic graph +// by pointing at itself. +func inflateRawTypes(rawTypes []rawType, baseTypes types, rawStrings *stringTable) ([]Type, error) { + types := make([]Type, 0, len(rawTypes)+1) // +1 for Void added to base types + + typeIDOffset := TypeID(1) // Void is TypeID(0), so the rest starts from TypeID(1) + + if baseTypes == nil { + // Void is defined to always be type ID 0, and is thus omitted from BTF. + types = append(types, (*Void)(nil)) + } else { + // For split BTF, the next ID is max base BTF type ID + 1 + typeIDOffset = TypeID(len(baseTypes)) + } + + type fixupDef struct { + id TypeID + typ *Type + } + + var fixups []fixupDef + fixup := func(id TypeID, typ *Type) { + if id < TypeID(len(baseTypes)) { + *typ = baseTypes[id] + return + } + + idx := id + if baseTypes != nil { + idx = id - TypeID(len(baseTypes)) + } + if idx < TypeID(len(types)) { + // We've already inflated this type, fix it up immediately. + *typ = types[idx] + return + } + fixups = append(fixups, fixupDef{id, typ}) + } + + type assertion struct { + typ *Type + want reflect.Type + } + + var assertions []assertion + assert := func(typ *Type, want reflect.Type) error { + if *typ != nil { + // The type has already been fixed up, check the type immediately. + if reflect.TypeOf(*typ) != want { + return fmt.Errorf("expected %s, got %T", want, *typ) + } + return nil + } + assertions = append(assertions, assertion{typ, want}) + return nil + } + + type bitfieldFixupDef struct { + id TypeID + m *Member + } + + var ( + legacyBitfields = make(map[TypeID][2]Bits) // offset, size + bitfieldFixups []bitfieldFixupDef + ) + convertMembers := func(raw []btfMember, kindFlag bool) ([]Member, error) { + // NB: The fixup below relies on pre-allocating this array to + // work, since otherwise append might re-allocate members. + members := make([]Member, 0, len(raw)) + for i, btfMember := range raw { + name, err := rawStrings.Lookup(btfMember.NameOff) + if err != nil { + return nil, fmt.Errorf("can't get name for member %d: %w", i, err) + } + + members = append(members, Member{ + Name: name, + Offset: Bits(btfMember.Offset), + }) + + m := &members[i] + fixup(raw[i].Type, &m.Type) + + if kindFlag { + m.BitfieldSize = Bits(btfMember.Offset >> 24) + m.Offset &= 0xffffff + // We ignore legacy bitfield definitions if the current composite + // is a new-style bitfield. This is kind of safe since offset and + // size on the type of the member must be zero if kindFlat is set + // according to spec. + continue + } + + // This may be a legacy bitfield, try to fix it up. + data, ok := legacyBitfields[raw[i].Type] + if ok { + // Bingo! + m.Offset += data[0] + m.BitfieldSize = data[1] + continue + } + + if m.Type != nil { + // We couldn't find a legacy bitfield, but we know that the member's + // type has already been inflated. Hence we know that it can't be + // a legacy bitfield and there is nothing left to do. + continue + } + + // We don't have fixup data, and the type we're pointing + // at hasn't been inflated yet. No choice but to defer + // the fixup. + bitfieldFixups = append(bitfieldFixups, bitfieldFixupDef{ + raw[i].Type, + m, + }) + } + return members, nil + } + + for i, raw := range rawTypes { + var ( + id = typeIDOffset + TypeID(i) + typ Type + ) + + name, err := rawStrings.Lookup(raw.NameOff) + if err != nil { + return nil, fmt.Errorf("get name for type id %d: %w", id, err) + } + + switch raw.Kind() { + case kindInt: + size := raw.Size() + bi := raw.data.(*btfInt) + if bi.Offset() > 0 || bi.Bits().Bytes() != size { + legacyBitfields[id] = [2]Bits{bi.Offset(), bi.Bits()} + } + typ = &Int{name, raw.Size(), bi.Encoding()} + + case kindPointer: + ptr := &Pointer{nil} + fixup(raw.Type(), &ptr.Target) + typ = ptr + + case kindArray: + btfArr := raw.data.(*btfArray) + arr := &Array{nil, nil, btfArr.Nelems} + fixup(btfArr.IndexType, &arr.Index) + fixup(btfArr.Type, &arr.Type) + typ = arr + + case kindStruct: + members, err := convertMembers(raw.data.([]btfMember), raw.KindFlag()) + if err != nil { + return nil, fmt.Errorf("struct %s (id %d): %w", name, id, err) + } + typ = &Struct{name, raw.Size(), members} + + case kindUnion: + members, err := convertMembers(raw.data.([]btfMember), raw.KindFlag()) + if err != nil { + return nil, fmt.Errorf("union %s (id %d): %w", name, id, err) + } + typ = &Union{name, raw.Size(), members} + + case kindEnum: + rawvals := raw.data.([]btfEnum) + vals := make([]EnumValue, 0, len(rawvals)) + for i, btfVal := range rawvals { + name, err := rawStrings.Lookup(btfVal.NameOff) + if err != nil { + return nil, fmt.Errorf("get name for enum value %d: %s", i, err) + } + vals = append(vals, EnumValue{ + Name: name, + Value: btfVal.Val, + }) + } + typ = &Enum{name, raw.Size(), vals} + + case kindForward: + if raw.KindFlag() { + typ = &Fwd{name, FwdUnion} + } else { + typ = &Fwd{name, FwdStruct} + } + + case kindTypedef: + typedef := &Typedef{name, nil} + fixup(raw.Type(), &typedef.Type) + typ = typedef + + case kindVolatile: + volatile := &Volatile{nil} + fixup(raw.Type(), &volatile.Type) + typ = volatile + + case kindConst: + cnst := &Const{nil} + fixup(raw.Type(), &cnst.Type) + typ = cnst + + case kindRestrict: + restrict := &Restrict{nil} + fixup(raw.Type(), &restrict.Type) + typ = restrict + + case kindFunc: + fn := &Func{name, nil, raw.Linkage()} + fixup(raw.Type(), &fn.Type) + if err := assert(&fn.Type, reflect.TypeOf((*FuncProto)(nil))); err != nil { + return nil, err + } + typ = fn + + case kindFuncProto: + rawparams := raw.data.([]btfParam) + params := make([]FuncParam, 0, len(rawparams)) + for i, param := range rawparams { + name, err := rawStrings.Lookup(param.NameOff) + if err != nil { + return nil, fmt.Errorf("get name for func proto parameter %d: %s", i, err) + } + params = append(params, FuncParam{ + Name: name, + }) + } + for i := range params { + fixup(rawparams[i].Type, ¶ms[i].Type) + } + + fp := &FuncProto{nil, params} + fixup(raw.Type(), &fp.Return) + typ = fp + + case kindVar: + variable := raw.data.(*btfVariable) + v := &Var{name, nil, VarLinkage(variable.Linkage)} + fixup(raw.Type(), &v.Type) + typ = v + + case kindDatasec: + btfVars := raw.data.([]btfVarSecinfo) + vars := make([]VarSecinfo, 0, len(btfVars)) + for _, btfVar := range btfVars { + vars = append(vars, VarSecinfo{ + Offset: btfVar.Offset, + Size: btfVar.Size, + }) + } + for i := range vars { + fixup(btfVars[i].Type, &vars[i].Type) + if err := assert(&vars[i].Type, reflect.TypeOf((*Var)(nil))); err != nil { + return nil, err + } + } + typ = &Datasec{name, raw.SizeType, vars} + + case kindFloat: + typ = &Float{name, raw.Size()} + + default: + return nil, fmt.Errorf("type id %d: unknown kind: %v", id, raw.Kind()) + } + + types = append(types, typ) + } + + for _, fixup := range fixups { + i := int(fixup.id) + if i >= len(types)+len(baseTypes) { + return nil, fmt.Errorf("reference to invalid type id: %d", fixup.id) + } + if i < len(baseTypes) { + return nil, fmt.Errorf("fixup for base type id %d is not expected", i) + } + + *fixup.typ = types[i-len(baseTypes)] + } + + for _, bitfieldFixup := range bitfieldFixups { + if bitfieldFixup.id < TypeID(len(baseTypes)) { + return nil, fmt.Errorf("bitfield fixup from split to base types is not expected") + } + + data, ok := legacyBitfields[bitfieldFixup.id] + if ok { + // This is indeed a legacy bitfield, fix it up. + bitfieldFixup.m.Offset += data[0] + bitfieldFixup.m.BitfieldSize = data[1] + } + } + + for _, assertion := range assertions { + if reflect.TypeOf(*assertion.typ) != assertion.want { + return nil, fmt.Errorf("expected %s, got %T", assertion.want, *assertion.typ) + } + } + + return types, nil +} + +// essentialName represents the name of a BTF type stripped of any flavor +// suffixes after a ___ delimiter. +type essentialName string + +// newEssentialName returns name without a ___ suffix. +// +// CO-RE has the concept of 'struct flavors', which are used to deal with +// changes in kernel data structures. Anything after three underscores +// in a type name is ignored for the purpose of finding a candidate type +// in the kernel's BTF. +func newEssentialName(name string) essentialName { + if name == "" { + return "" + } + lastIdx := strings.LastIndex(name, "___") + if lastIdx > 0 { + return essentialName(name[:lastIdx]) + } + return essentialName(name) +} + +// UnderlyingType skips qualifiers and Typedefs. +func UnderlyingType(typ Type) Type { + result := typ + for depth := 0; depth <= maxTypeDepth; depth++ { + switch v := (result).(type) { + case qualifier: + result = v.qualify() + case *Typedef: + result = v.Type + default: + return result + } + } + return &cycle{typ} +} + +type formatState struct { + fmt.State + depth int +} + +// formattableType is a subset of Type, to ease unit testing of formatType. +type formattableType interface { + fmt.Formatter + TypeName() string +} + +// formatType formats a type in a canonical form. +// +// Handles cyclical types by only printing cycles up to a certain depth. Elements +// in extra are separated by spaces unless the preceding element is a string +// ending in '='. +func formatType(f fmt.State, verb rune, t formattableType, extra ...interface{}) { + if verb != 'v' && verb != 's' { + fmt.Fprintf(f, "{UNRECOGNIZED: %c}", verb) + return + } + + // This is the same as %T, but elides the package name. Assumes that + // formattableType is implemented by a pointer receiver. + goTypeName := reflect.TypeOf(t).Elem().Name() + _, _ = io.WriteString(f, goTypeName) + + if name := t.TypeName(); name != "" { + // Output BTF type name if present. + fmt.Fprintf(f, ":%q", name) + } + + if f.Flag('+') { + // Output address if requested. + fmt.Fprintf(f, ":%#p", t) + } + + if verb == 's' { + // %s omits details. + return + } + + var depth int + if ps, ok := f.(*formatState); ok { + depth = ps.depth + f = ps.State + } + + maxDepth, ok := f.Width() + if !ok { + maxDepth = 0 + } + + if depth > maxDepth { + // We've reached the maximum depth. This avoids infinite recursion even + // for cyclical types. + return + } + + if len(extra) == 0 { + return + } + + wantSpace := false + _, _ = io.WriteString(f, "[") + for _, arg := range extra { + if wantSpace { + _, _ = io.WriteString(f, " ") + } + + switch v := arg.(type) { + case string: + _, _ = io.WriteString(f, v) + wantSpace = len(v) > 0 && v[len(v)-1] != '=' + continue + + case formattableType: + v.Format(&formatState{f, depth + 1}, verb) + + default: + fmt.Fprint(f, arg) + } + + wantSpace = true + } + _, _ = io.WriteString(f, "]") +} diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/collection.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/collection.go index 2ededc87a05c..8c2ddc380214 100644 --- a/cluster-autoscaler/vendor/github.com/cilium/ebpf/collection.go +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/collection.go @@ -4,14 +4,11 @@ import ( "encoding/binary" "errors" "fmt" - "io" - "math" "reflect" "strings" "github.com/cilium/ebpf/asm" - "github.com/cilium/ebpf/internal" - "github.com/cilium/ebpf/internal/btf" + "github.com/cilium/ebpf/btf" ) // CollectionOptions control loading a collection into the kernel. @@ -20,6 +17,17 @@ import ( type CollectionOptions struct { Maps MapOptions Programs ProgramOptions + + // MapReplacements takes a set of Maps that will be used instead of + // creating new ones when loading the CollectionSpec. + // + // For each given Map, there must be a corresponding MapSpec in + // CollectionSpec.Maps, and its type, key/value size, max entries and flags + // must match the values of the MapSpec. + // + // The given Maps are Clone()d before being used in the Collection, so the + // caller can Close() them freely when they are no longer needed. + MapReplacements map[string]*Map } // CollectionSpec describes a collection. @@ -27,6 +35,10 @@ type CollectionSpec struct { Maps map[string]*MapSpec Programs map[string]*ProgramSpec + // Types holds type information about Maps and Programs. + // Modifications to Types are currently undefined behaviour. + Types *btf.Spec + // ByteOrder specifies whether the ELF was compiled for // big-endian or little-endian architectures. ByteOrder binary.ByteOrder @@ -42,6 +54,7 @@ func (cs *CollectionSpec) Copy() *CollectionSpec { Maps: make(map[string]*MapSpec, len(cs.Maps)), Programs: make(map[string]*ProgramSpec, len(cs.Programs)), ByteOrder: cs.ByteOrder, + Types: cs.Types, } for name, spec := range cs.Maps { @@ -61,19 +74,21 @@ func (cs *CollectionSpec) Copy() *CollectionSpec { // when calling NewCollection. Any named maps are removed from CollectionSpec.Maps. // // Returns an error if a named map isn't used in at least one program. +// +// Deprecated: Pass CollectionOptions.MapReplacements when loading the Collection +// instead. func (cs *CollectionSpec) RewriteMaps(maps map[string]*Map) error { for symbol, m := range maps { // have we seen a program that uses this symbol / map seen := false - fd := m.FD() for progName, progSpec := range cs.Programs { - err := progSpec.Instructions.RewriteMapPtr(symbol, fd) + err := progSpec.Instructions.AssociateMap(symbol, m) switch { case err == nil: seen = true - case asm.IsUnreferencedSymbol(err): + case errors.Is(err, asm.ErrUnreferencedSymbol): // Not all programs need to use the map default: @@ -107,34 +122,67 @@ func (cs *CollectionSpec) RewriteMaps(maps map[string]*Map) error { // // Returns an error if a constant doesn't exist. func (cs *CollectionSpec) RewriteConstants(consts map[string]interface{}) error { - rodata := cs.Maps[".rodata"] - if rodata == nil { - return errors.New("missing .rodata section") - } + replaced := make(map[string]bool) - if rodata.BTF == nil { - return errors.New(".rodata section has no BTF") - } + for name, spec := range cs.Maps { + if !strings.HasPrefix(name, ".rodata") { + continue + } - if n := len(rodata.Contents); n != 1 { - return fmt.Errorf("expected one key in .rodata, found %d", n) - } + b, ds, err := spec.dataSection() + if errors.Is(err, errMapNoBTFValue) { + // Data sections without a BTF Datasec are valid, but don't support + // constant replacements. + continue + } + if err != nil { + return fmt.Errorf("map %s: %w", name, err) + } + + // MapSpec.Copy() performs a shallow copy. Fully copy the byte slice + // to avoid any changes affecting other copies of the MapSpec. + cpy := make([]byte, len(b)) + copy(cpy, b) + + for _, v := range ds.Vars { + vname := v.Type.TypeName() + replacement, ok := consts[vname] + if !ok { + continue + } + + if replaced[vname] { + return fmt.Errorf("section %s: duplicate variable %s", name, vname) + } + + if int(v.Offset+v.Size) > len(cpy) { + return fmt.Errorf("section %s: offset %d(+%d) for variable %s is out of bounds", name, v.Offset, v.Size, vname) + } - kv := rodata.Contents[0] - value, ok := kv.Value.([]byte) - if !ok { - return fmt.Errorf("first value in .rodata is %T not []byte", kv.Value) + b, err := marshalBytes(replacement, int(v.Size)) + if err != nil { + return fmt.Errorf("marshaling constant replacement %s: %w", vname, err) + } + + copy(cpy[v.Offset:v.Offset+v.Size], b) + + replaced[vname] = true + } + + spec.Contents[0] = MapKV{Key: uint32(0), Value: cpy} } - buf := make([]byte, len(value)) - copy(buf, value) + var missing []string + for c := range consts { + if !replaced[c] { + missing = append(missing, c) + } + } - err := patchValue(buf, rodata.BTF.Value, consts) - if err != nil { - return err + if len(missing) != 0 { + return fmt.Errorf("spec is missing one or more constants: %s", strings.Join(missing, ",")) } - rodata.Contents[0] = MapKV{kv.Key, buf} return nil } @@ -187,6 +235,9 @@ func (cs *CollectionSpec) Assign(to interface{}) error { // LoadAndAssign loads Maps and Programs into the kernel and assigns them // to a struct. // +// Omitting Map/Program.Close() during application shutdown is an error. +// See the package documentation for details around Map and Program lifecycle. +// // This function is a shortcut to manually checking the presence // of maps and programs in a CollectionSpec. Consider using bpf2go // if this sounds useful. @@ -209,15 +260,21 @@ func (cs *CollectionSpec) Assign(to interface{}) error { // Returns an error if any of the fields can't be found, or // if the same Map or Program is assigned multiple times. func (cs *CollectionSpec) LoadAndAssign(to interface{}, opts *CollectionOptions) error { - loader := newCollectionLoader(cs, opts) - defer loader.cleanup() + loader, err := newCollectionLoader(cs, opts) + if err != nil { + return err + } + defer loader.close() // Support assigning Programs and Maps, lazy-loading the required objects. assignedMaps := make(map[string]bool) + assignedProgs := make(map[string]bool) + getValue := func(typ reflect.Type, name string) (interface{}, error) { switch typ { case reflect.TypeOf((*Program)(nil)): + assignedProgs[name] = true return loader.loadProgram(name) case reflect.TypeOf((*Map)(nil)): @@ -244,15 +301,26 @@ func (cs *CollectionSpec) LoadAndAssign(to interface{}, opts *CollectionOptions) switch m.typ { case ProgramArray: // Require all lazy-loaded ProgramArrays to be assigned to the given object. - // Without any references, they will be closed on the first GC and all tail - // calls into them will miss. - if !assignedMaps[n] { + // The kernel empties a ProgramArray once the last user space reference + // to it closes, which leads to failed tail calls. Combined with the library + // closing map fds via GC finalizers this can lead to surprising behaviour. + // Only allow unassigned ProgramArrays when the library hasn't pre-populated + // any entries from static value declarations. At this point, we know the map + // is empty and there's no way for the caller to interact with the map going + // forward. + if !assignedMaps[n] && len(cs.Maps[n].Contents) > 0 { return fmt.Errorf("ProgramArray %s must be assigned to prevent missed tail calls", n) } } } - loader.finalize() + // Prevent loader.cleanup() from closing assigned Maps and Programs. + for m := range assignedMaps { + delete(loader.maps, m) + } + for p := range assignedProgs { + delete(loader.programs, p) + } return nil } @@ -264,15 +332,26 @@ type Collection struct { Maps map[string]*Map } -// NewCollection creates a Collection from a specification. +// NewCollection creates a Collection from the given spec, creating and +// loading its declared resources into the kernel. +// +// Omitting Collection.Close() during application shutdown is an error. +// See the package documentation for details around Map and Program lifecycle. func NewCollection(spec *CollectionSpec) (*Collection, error) { return NewCollectionWithOptions(spec, CollectionOptions{}) } -// NewCollectionWithOptions creates a Collection from a specification. +// NewCollectionWithOptions creates a Collection from the given spec using +// options, creating and loading its declared resources into the kernel. +// +// Omitting Collection.Close() during application shutdown is an error. +// See the package documentation for details around Map and Program lifecycle. func NewCollectionWithOptions(spec *CollectionSpec, opts CollectionOptions) (*Collection, error) { - loader := newCollectionLoader(spec, &opts) - defer loader.cleanup() + loader, err := newCollectionLoader(spec, &opts) + if err != nil { + return nil, err + } + defer loader.close() // Create maps first, as their fds need to be linked into programs. for mapName := range spec.Maps { @@ -281,7 +360,11 @@ func NewCollectionWithOptions(spec *CollectionSpec, opts CollectionOptions) (*Co } } - for progName := range spec.Programs { + for progName, prog := range spec.Programs { + if prog.Type == UnspecifiedProgram { + continue + } + if _, err := loader.loadProgram(progName); err != nil { return nil, err } @@ -293,9 +376,9 @@ func NewCollectionWithOptions(spec *CollectionSpec, opts CollectionOptions) (*Co return nil, err } + // Prevent loader.cleanup from closing maps and programs. maps, progs := loader.maps, loader.programs - - loader.finalize() + loader.maps, loader.programs = nil, nil return &Collection{ progs, @@ -305,13 +388,11 @@ func NewCollectionWithOptions(spec *CollectionSpec, opts CollectionOptions) (*Co type handleCache struct { btfHandles map[*btf.Spec]*btf.Handle - btfSpecs map[io.ReaderAt]*btf.Spec } func newHandleCache() *handleCache { return &handleCache{ btfHandles: make(map[*btf.Spec]*btf.Handle), - btfSpecs: make(map[io.ReaderAt]*btf.Spec), } } @@ -329,20 +410,6 @@ func (hc handleCache) btfHandle(spec *btf.Spec) (*btf.Handle, error) { return handle, nil } -func (hc handleCache) btfSpec(rd io.ReaderAt) (*btf.Spec, error) { - if hc.btfSpecs[rd] != nil { - return hc.btfSpecs[rd], nil - } - - spec, err := btf.LoadSpecFromReader(rd) - if err != nil { - return nil, err - } - - hc.btfSpecs[rd] = spec - return spec, nil -} - func (hc handleCache) close() { for _, handle := range hc.btfHandles { handle.Close() @@ -357,30 +424,34 @@ type collectionLoader struct { handles *handleCache } -func newCollectionLoader(coll *CollectionSpec, opts *CollectionOptions) *collectionLoader { +func newCollectionLoader(coll *CollectionSpec, opts *CollectionOptions) (*collectionLoader, error) { if opts == nil { opts = &CollectionOptions{} } + // Check for existing MapSpecs in the CollectionSpec for all provided replacement maps. + for name, m := range opts.MapReplacements { + spec, ok := coll.Maps[name] + if !ok { + return nil, fmt.Errorf("replacement map %s not found in CollectionSpec", name) + } + + if err := spec.checkCompatibility(m); err != nil { + return nil, fmt.Errorf("using replacement map %s: %w", spec.Name, err) + } + } + return &collectionLoader{ coll, opts, make(map[string]*Map), make(map[string]*Program), newHandleCache(), - } -} - -// finalize should be called when all the collectionLoader's resources -// have been successfully loaded into the kernel and populated with values. -func (cl *collectionLoader) finalize() { - cl.maps, cl.programs = nil, nil + }, nil } -// cleanup cleans up all resources left over in the collectionLoader. -// Call finalize() when Map and Program creation/population is successful -// to prevent them from getting closed. -func (cl *collectionLoader) cleanup() { +// close all resources left over in the collectionLoader. +func (cl *collectionLoader) close() { cl.handles.close() for _, m := range cl.maps { m.Close() @@ -400,6 +471,21 @@ func (cl *collectionLoader) loadMap(mapName string) (*Map, error) { return nil, fmt.Errorf("missing map %s", mapName) } + if mapSpec.BTF != nil && cl.coll.Types != mapSpec.BTF { + return nil, fmt.Errorf("map %s: BTF doesn't match collection", mapName) + } + + if replaceMap, ok := cl.opts.MapReplacements[mapName]; ok { + // Clone the map to avoid closing user's map later on. + m, err := replaceMap.Clone() + if err != nil { + return nil, err + } + + cl.maps[mapName] = m + return m, nil + } + m, err := newMapWithOptions(mapSpec, cl.opts.Maps, cl.handles) if err != nil { return nil, fmt.Errorf("map %s: %w", mapName, err) @@ -419,33 +505,41 @@ func (cl *collectionLoader) loadProgram(progName string) (*Program, error) { return nil, fmt.Errorf("unknown program %s", progName) } + // Bail out early if we know the kernel is going to reject the program. + // This skips loading map dependencies, saving some cleanup work later. + if progSpec.Type == UnspecifiedProgram { + return nil, fmt.Errorf("cannot load program %s: program type is unspecified", progName) + } + + if progSpec.BTF != nil && cl.coll.Types != progSpec.BTF { + return nil, fmt.Errorf("program %s: BTF doesn't match collection", progName) + } + progSpec = progSpec.Copy() - // Rewrite any reference to a valid map. + // Rewrite any reference to a valid map in the program's instructions, + // which includes all of its dependencies. for i := range progSpec.Instructions { ins := &progSpec.Instructions[i] - if !ins.IsLoadFromMap() || ins.Reference == "" { + if !ins.IsLoadFromMap() || ins.Reference() == "" { continue } - if uint32(ins.Constant) != math.MaxUint32 { - // Don't overwrite maps already rewritten, users can - // rewrite programs in the spec themselves + // Don't overwrite map loads containing non-zero map fd's, + // they can be manually included by the caller. + // Map FDs/IDs are placed in the lower 32 bits of Constant. + if int32(ins.Constant) > 0 { continue } - m, err := cl.loadMap(ins.Reference) + m, err := cl.loadMap(ins.Reference()) if err != nil { return nil, fmt.Errorf("program %s: %w", progName, err) } - fd := m.FD() - if fd < 0 { - return nil, fmt.Errorf("map %s: %w", ins.Reference, internal.ErrClosedFd) - } - if err := ins.RewriteMapPtr(m.FD()); err != nil { - return nil, fmt.Errorf("program %s: map %s: %w", progName, ins.Reference, err) + if err := ins.AssociateMap(m); err != nil { + return nil, fmt.Errorf("program %s: map %s: %w", progName, ins.Reference(), err) } } @@ -467,24 +561,30 @@ func (cl *collectionLoader) populateMaps() error { mapSpec = mapSpec.Copy() - // Replace any object stubs with loaded objects. + // MapSpecs that refer to inner maps or programs within the same + // CollectionSpec do so using strings. These strings are used as the key + // to look up the respective object in the Maps or Programs fields. + // Resolve those references to actual Map or Program resources that + // have been loaded into the kernel. for i, kv := range mapSpec.Contents { - switch v := kv.Value.(type) { - case programStub: - // loadProgram is idempotent and could return an existing Program. - prog, err := cl.loadProgram(string(v)) - if err != nil { - return fmt.Errorf("loading program %s, for map %s: %w", v, mapName, err) + if objName, ok := kv.Value.(string); ok { + switch mapSpec.Type { + case ProgramArray: + // loadProgram is idempotent and could return an existing Program. + prog, err := cl.loadProgram(objName) + if err != nil { + return fmt.Errorf("loading program %s, for map %s: %w", objName, mapName, err) + } + mapSpec.Contents[i] = MapKV{kv.Key, prog} + + case ArrayOfMaps, HashOfMaps: + // loadMap is idempotent and could return an existing Map. + innerMap, err := cl.loadMap(objName) + if err != nil { + return fmt.Errorf("loading inner map %s, for map %s: %w", objName, mapName, err) + } + mapSpec.Contents[i] = MapKV{kv.Key, innerMap} } - mapSpec.Contents[i] = MapKV{kv.Key, prog} - - case mapStub: - // loadMap is idempotent and could return an existing Map. - innerMap, err := cl.loadMap(string(v)) - if err != nil { - return fmt.Errorf("loading inner map %s, for map %s: %w", v, mapName, err) - } - mapSpec.Contents[i] = MapKV{kv.Key, innerMap} } } @@ -497,7 +597,11 @@ func (cl *collectionLoader) populateMaps() error { return nil } -// LoadCollection parses an object file and converts it to a collection. +// LoadCollection reads an object file and creates and loads its declared +// resources into the kernel. +// +// Omitting Collection.Close() during application shutdown is an error. +// See the package documentation for details around Map and Program lifecycle. func LoadCollection(file string) (*Collection, error) { spec, err := LoadCollectionSpec(file) if err != nil { diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/doc.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/doc.go index f7f34da8f44c..396b3394d33d 100644 --- a/cluster-autoscaler/vendor/github.com/cilium/ebpf/doc.go +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/doc.go @@ -13,4 +13,13 @@ // your application as any other resource. // // Use the link subpackage to attach a loaded program to a hook in the kernel. +// +// Note that losing all references to Map and Program resources will cause +// their underlying file descriptors to be closed, potentially removing those +// objects from the kernel. Always retain a reference by e.g. deferring a +// Close() of a Collection or LoadAndAssign object until application exit. +// +// Special care needs to be taken when handling maps of type ProgramArray, +// as the kernel erases its contents when the last userspace or bpffs +// reference disappears, regardless of the map being in active use. package ebpf diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/elf_reader.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/elf_reader.go index 42010f43e583..df278895c631 100644 --- a/cluster-autoscaler/vendor/github.com/cilium/ebpf/elf_reader.go +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/elf_reader.go @@ -13,8 +13,8 @@ import ( "strings" "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/btf" "github.com/cilium/ebpf/internal" - "github.com/cilium/ebpf/internal/btf" "github.com/cilium/ebpf/internal/unix" ) @@ -26,6 +26,7 @@ type elfCode struct { license string version uint32 btf *btf.Spec + extInfo *btf.ExtInfos } // LoadCollectionSpec parses an ELF file into a CollectionSpec. @@ -49,7 +50,6 @@ func LoadCollectionSpecFromReader(rd io.ReaderAt) (*CollectionSpec, error) { if err != nil { return nil, err } - defer f.Close() var ( licenseSection *elf.Section @@ -95,77 +95,29 @@ func LoadCollectionSpecFromReader(rd io.ReaderAt) (*CollectionSpec, error) { return nil, fmt.Errorf("load version: %w", err) } - btfSpec, err := btf.LoadSpecFromReader(rd) + btfSpec, btfExtInfo, err := btf.LoadSpecAndExtInfosFromReader(rd) if err != nil && !errors.Is(err, btf.ErrNotFound) { return nil, fmt.Errorf("load BTF: %w", err) } - // Assign symbols to all the sections we're interested in. - symbols, err := f.Symbols() - if err != nil { - return nil, fmt.Errorf("load symbols: %v", err) - } - - for _, symbol := range symbols { - idx := symbol.Section - symType := elf.ST_TYPE(symbol.Info) - - section := sections[idx] - if section == nil { - continue - } - - // Older versions of LLVM don't tag symbols correctly, so keep - // all NOTYPE ones. - keep := symType == elf.STT_NOTYPE - switch section.kind { - case mapSection, btfMapSection, dataSection: - keep = keep || symType == elf.STT_OBJECT - case programSection: - keep = keep || symType == elf.STT_FUNC - } - if !keep || symbol.Name == "" { - continue - } - - section.symbols[symbol.Value] = symbol - } - ec := &elfCode{ SafeELFFile: f, sections: sections, license: license, version: version, btf: btfSpec, + extInfo: btfExtInfo, } - // Go through relocation sections, and parse the ones for sections we're - // interested in. Make sure that relocations point at valid sections. - for idx, relSection := range relSections { - section := sections[idx] - if section == nil { - continue - } - - rels, err := ec.loadRelocations(relSection, symbols) - if err != nil { - return nil, fmt.Errorf("relocation for section %q: %w", section.Name, err) - } - - for _, rel := range rels { - target := sections[rel.Section] - if target == nil { - return nil, fmt.Errorf("section %q: reference to %q in section %s: %w", section.Name, rel.Name, rel.Section, ErrNotSupported) - } - - if target.Flags&elf.SHF_STRINGS > 0 { - return nil, fmt.Errorf("section %q: string is not stack allocated: %w", section.Name, ErrNotSupported) - } + symbols, err := f.Symbols() + if err != nil { + return nil, fmt.Errorf("load symbols: %v", err) + } - target.references++ - } + ec.assignSymbols(symbols) - section.relocations = rels + if err := ec.loadRelocations(relSections, symbols); err != nil { + return nil, fmt.Errorf("load relocations: %w", err) } // Collect all the various ways to define maps. @@ -183,12 +135,12 @@ func LoadCollectionSpecFromReader(rd io.ReaderAt) (*CollectionSpec, error) { } // Finally, collect programs and link them. - progs, err := ec.loadPrograms() + progs, err := ec.loadProgramSections() if err != nil { return nil, fmt.Errorf("load programs: %w", err) } - return &CollectionSpec{maps, progs, ec.ByteOrder}, nil + return &CollectionSpec{maps, progs, btfSpec, ec.ByteOrder}, nil } func loadLicense(sec *elf.Section) (string, error) { @@ -247,12 +199,91 @@ func newElfSection(section *elf.Section, kind elfSectionKind) *elfSection { } } -func (ec *elfCode) loadPrograms() (map[string]*ProgramSpec, error) { - var ( - progs []*ProgramSpec - libs []*ProgramSpec - ) +// assignSymbols takes a list of symbols and assigns them to their +// respective sections, indexed by name. +func (ec *elfCode) assignSymbols(symbols []elf.Symbol) { + for _, symbol := range symbols { + symType := elf.ST_TYPE(symbol.Info) + symSection := ec.sections[symbol.Section] + if symSection == nil { + continue + } + + // Anonymous symbols only occur in debug sections which we don't process + // relocations for. Anonymous symbols are not referenced from other sections. + if symbol.Name == "" { + continue + } + + // Older versions of LLVM don't tag symbols correctly, so keep + // all NOTYPE ones. + switch symSection.kind { + case mapSection, btfMapSection, dataSection: + if symType != elf.STT_NOTYPE && symType != elf.STT_OBJECT { + continue + } + case programSection: + if symType != elf.STT_NOTYPE && symType != elf.STT_FUNC { + continue + } + // LLVM emits LBB_ (Local Basic Block) symbols that seem to be jump + // targets within sections, but BPF has no use for them. + if symType == elf.STT_NOTYPE && elf.ST_BIND(symbol.Info) == elf.STB_LOCAL && + strings.HasPrefix(symbol.Name, "LBB") { + continue + } + // Only collect symbols that occur in program/maps/data sections. + default: + continue + } + symSection.symbols[symbol.Value] = symbol + } +} + +// loadRelocations iterates .rel* sections and extracts relocation entries for +// sections of interest. Makes sure relocations point at valid sections. +func (ec *elfCode) loadRelocations(relSections map[elf.SectionIndex]*elf.Section, symbols []elf.Symbol) error { + for idx, relSection := range relSections { + section := ec.sections[idx] + if section == nil { + continue + } + + rels, err := ec.loadSectionRelocations(relSection, symbols) + if err != nil { + return fmt.Errorf("relocation for section %q: %w", section.Name, err) + } + + for _, rel := range rels { + target := ec.sections[rel.Section] + if target == nil { + return fmt.Errorf("section %q: reference to %q in section %s: %w", section.Name, rel.Name, rel.Section, ErrNotSupported) + } + + if target.Flags&elf.SHF_STRINGS > 0 { + return fmt.Errorf("section %q: string is not stack allocated: %w", section.Name, ErrNotSupported) + } + + target.references++ + } + + section.relocations = rels + } + + return nil +} + +// loadProgramSections iterates ec's sections and emits a ProgramSpec +// for each function it finds. +// +// The resulting map is indexed by function name. +func (ec *elfCode) loadProgramSections() (map[string]*ProgramSpec, error) { + + progs := make(map[string]*ProgramSpec) + + // Generate a ProgramSpec for each function found in each program section. + var export []string for _, sec := range ec.sections { if sec.kind != programSection { continue @@ -262,86 +293,144 @@ func (ec *elfCode) loadPrograms() (map[string]*ProgramSpec, error) { return nil, fmt.Errorf("section %v: missing symbols", sec.Name) } - funcSym, ok := sec.symbols[0] - if !ok { - return nil, fmt.Errorf("section %v: no label at start", sec.Name) - } - - insns, length, err := ec.loadInstructions(sec) + funcs, err := ec.loadFunctions(sec) if err != nil { - return nil, fmt.Errorf("program %s: %w", funcSym.Name, err) + return nil, fmt.Errorf("section %v: %w", sec.Name, err) } progType, attachType, progFlags, attachTo := getProgType(sec.Name) - spec := &ProgramSpec{ - Name: funcSym.Name, - Type: progType, - Flags: progFlags, - AttachType: attachType, - AttachTo: attachTo, - License: ec.license, - KernelVersion: ec.version, - Instructions: insns, - ByteOrder: ec.ByteOrder, - } + for name, insns := range funcs { + spec := &ProgramSpec{ + Name: name, + Type: progType, + Flags: progFlags, + AttachType: attachType, + AttachTo: attachTo, + SectionName: sec.Name, + License: ec.license, + KernelVersion: ec.version, + Instructions: insns, + ByteOrder: ec.ByteOrder, + BTF: ec.btf, + } - if ec.btf != nil { - spec.BTF, err = ec.btf.Program(sec.Name, length) - if err != nil && !errors.Is(err, btf.ErrNoExtendedInfo) { - return nil, fmt.Errorf("program %s: %w", funcSym.Name, err) + // Function names must be unique within a single ELF blob. + if progs[name] != nil { + return nil, fmt.Errorf("duplicate program name %s", name) } - } + progs[name] = spec - if spec.Type == UnspecifiedProgram { - // There is no single name we can use for "library" sections, - // since they may contain multiple functions. We'll decode the - // labels they contain later on, and then link sections that way. - libs = append(libs, spec) - } else { - progs = append(progs, spec) + if spec.SectionName != ".text" { + export = append(export, name) + } } } - res := make(map[string]*ProgramSpec, len(progs)) - for _, prog := range progs { - err := link(prog, libs) - if err != nil { - return nil, fmt.Errorf("program %s: %w", prog.Name, err) + flattenPrograms(progs, export) + + // Hide programs (e.g. library functions) that were not explicitly emitted + // to an ELF section. These could be exposed in a separate CollectionSpec + // field later to allow them to be modified. + for n, p := range progs { + if p.SectionName == ".text" { + delete(progs, n) } - res[prog.Name] = prog } - return res, nil + return progs, nil } -func (ec *elfCode) loadInstructions(section *elfSection) (asm.Instructions, uint64, error) { - var ( - r = bufio.NewReader(section.Open()) - insns asm.Instructions - offset uint64 - ) - for { - var ins asm.Instruction - n, err := ins.Unmarshal(r, ec.ByteOrder) - if err == io.EOF { - return insns, offset, nil - } - if err != nil { - return nil, 0, fmt.Errorf("offset %d: %w", offset, err) - } +// loadFunctions extracts instruction streams from the given program section +// starting at each symbol in the section. The section's symbols must already +// be narrowed down to STT_NOTYPE (emitted by clang <8) or STT_FUNC. +// +// The resulting map is indexed by function name. +func (ec *elfCode) loadFunctions(section *elfSection) (map[string]asm.Instructions, error) { + r := bufio.NewReader(section.Open()) + + // Decode the section's instruction stream. + var insns asm.Instructions + if err := insns.Unmarshal(r, ec.ByteOrder); err != nil { + return nil, fmt.Errorf("decoding instructions for section %s: %w", section.Name, err) + } + if len(insns) == 0 { + return nil, fmt.Errorf("no instructions found in section %s", section.Name) + } - ins.Symbol = section.symbols[offset].Name + iter := insns.Iterate() + for iter.Next() { + ins := iter.Ins + offset := iter.Offset.Bytes() + // Tag Symbol Instructions. + if sym, ok := section.symbols[offset]; ok { + *ins = ins.WithSymbol(sym.Name) + } + + // Apply any relocations for the current instruction. + // If no relocation is present, resolve any section-relative function calls. if rel, ok := section.relocations[offset]; ok { - if err = ec.relocateInstruction(&ins, rel); err != nil { - return nil, 0, fmt.Errorf("offset %d: relocate instruction: %w", offset, err) + if err := ec.relocateInstruction(ins, rel); err != nil { + return nil, fmt.Errorf("offset %d: relocating instruction: %w", offset, err) + } + } else { + if err := referenceRelativeJump(ins, offset, section.symbols); err != nil { + return nil, fmt.Errorf("offset %d: resolving relative jump: %w", offset, err) } } + } - insns = append(insns, ins) - offset += n + if ec.extInfo != nil { + ec.extInfo.Assign(insns, section.Name) } + + return splitSymbols(insns) +} + +// referenceRelativeJump turns a relative jump to another bpf subprogram within +// the same ELF section into a Reference Instruction. +// +// Up to LLVM 9, calls to subprograms within the same ELF section are sometimes +// encoded using relative jumps instead of relocation entries. These jumps go +// out of bounds of the current program, so their targets must be memoized +// before the section's instruction stream is split. +// +// The relative jump Constant is blinded to -1 and the target Symbol is set as +// the Instruction's Reference so it can be resolved by the linker. +func referenceRelativeJump(ins *asm.Instruction, offset uint64, symbols map[uint64]elf.Symbol) error { + if !ins.IsFunctionReference() || ins.Constant == -1 { + return nil + } + + tgt := jumpTarget(offset, *ins) + sym := symbols[tgt].Name + if sym == "" { + return fmt.Errorf("no jump target found at offset %d", tgt) + } + + *ins = ins.WithReference(sym) + ins.Constant = -1 + + return nil +} + +// jumpTarget takes ins' offset within an instruction stream (in bytes) +// and returns its absolute jump destination (in bytes) within the +// instruction stream. +func jumpTarget(offset uint64, ins asm.Instruction) uint64 { + // A relative jump instruction describes the amount of raw BPF instructions + // to jump, convert the offset into bytes. + dest := ins.Constant * asm.InstructionSize + + // The starting point of the jump is the end of the current instruction. + dest += int64(offset + asm.InstructionSize) + + if dest < 0 { + return 0 + } + + return uint64(dest) } func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) error { @@ -367,18 +456,12 @@ func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) err ins.Src = asm.PseudoMapFD - // Mark the instruction as needing an update when creating the - // collection. - if err := ins.RewriteMapPtr(-1); err != nil { - return err - } - case dataSection: var offset uint32 switch typ { case elf.STT_SECTION: if bind != elf.STB_LOCAL { - return fmt.Errorf("direct load: %s: unsupported relocation %s", name, bind) + return fmt.Errorf("direct load: %s: unsupported section relocation %s", name, bind) } // This is really a reference to a static symbol, which clang doesn't @@ -387,8 +470,17 @@ func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) err offset = uint32(uint64(ins.Constant)) case elf.STT_OBJECT: - if bind != elf.STB_GLOBAL { - return fmt.Errorf("direct load: %s: unsupported relocation %s", name, bind) + // LLVM 9 emits OBJECT-LOCAL symbols for anonymous constants. + if bind != elf.STB_GLOBAL && bind != elf.STB_LOCAL { + return fmt.Errorf("direct load: %s: unsupported object relocation %s", name, bind) + } + + offset = uint32(rel.Value) + + case elf.STT_NOTYPE: + // LLVM 7 emits NOTYPE-LOCAL symbols for anonymous constants. + if bind != elf.STB_LOCAL { + return fmt.Errorf("direct load: %s: unsupported untyped relocation %s", name, bind) } offset = uint32(rel.Value) @@ -406,51 +498,71 @@ func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) err ins.Constant = int64(uint64(offset) << 32) ins.Src = asm.PseudoMapValue - // Mark the instruction as needing an update when creating the - // collection. - if err := ins.RewriteMapPtr(-1); err != nil { - return err - } - case programSection: - if ins.OpCode.JumpOp() != asm.Call { - return fmt.Errorf("not a call instruction: %s", ins) - } + switch opCode := ins.OpCode; { + case opCode.JumpOp() == asm.Call: + if ins.Src != asm.PseudoCall { + return fmt.Errorf("call: %s: incorrect source register", name) + } - if ins.Src != asm.PseudoCall { - return fmt.Errorf("call: %s: incorrect source register", name) - } + switch typ { + case elf.STT_NOTYPE, elf.STT_FUNC: + if bind != elf.STB_GLOBAL { + return fmt.Errorf("call: %s: unsupported binding: %s", name, bind) + } - switch typ { - case elf.STT_NOTYPE, elf.STT_FUNC: - if bind != elf.STB_GLOBAL { - return fmt.Errorf("call: %s: unsupported binding: %s", name, bind) - } + case elf.STT_SECTION: + if bind != elf.STB_LOCAL { + return fmt.Errorf("call: %s: unsupported binding: %s", name, bind) + } - case elf.STT_SECTION: - if bind != elf.STB_LOCAL { - return fmt.Errorf("call: %s: unsupported binding: %s", name, bind) + // The function we want to call is in the indicated section, + // at the offset encoded in the instruction itself. Reverse + // the calculation to find the real function we're looking for. + // A value of -1 references the first instruction in the section. + offset := int64(int32(ins.Constant)+1) * asm.InstructionSize + sym, ok := target.symbols[uint64(offset)] + if !ok { + return fmt.Errorf("call: no symbol at offset %d", offset) + } + + name = sym.Name + ins.Constant = -1 + + default: + return fmt.Errorf("call: %s: invalid symbol type %s", name, typ) } + case opCode.IsDWordLoad(): + switch typ { + case elf.STT_FUNC: + if bind != elf.STB_GLOBAL { + return fmt.Errorf("load: %s: unsupported binding: %s", name, bind) + } + + case elf.STT_SECTION: + if bind != elf.STB_LOCAL { + return fmt.Errorf("load: %s: unsupported binding: %s", name, bind) + } - // The function we want to call is in the indicated section, - // at the offset encoded in the instruction itself. Reverse - // the calculation to find the real function we're looking for. - // A value of -1 references the first instruction in the section. - offset := int64(int32(ins.Constant)+1) * asm.InstructionSize - if offset < 0 { - return fmt.Errorf("call: %s: invalid offset %d", name, offset) + // ins.Constant already contains the offset in bytes from the + // start of the section. This is different than a call to a + // static function. + + default: + return fmt.Errorf("load: %s: invalid symbol type %s", name, typ) } - sym, ok := target.symbols[uint64(offset)] + sym, ok := target.symbols[uint64(ins.Constant)] if !ok { - return fmt.Errorf("call: %s: no symbol at offset %d", name, offset) + return fmt.Errorf("load: no symbol at offset %d", ins.Constant) } - ins.Constant = -1 name = sym.Name + ins.Constant = -1 + ins.Src = asm.PseudoFunc default: - return fmt.Errorf("call: %s: invalid symbol type %s", name, typ) + return fmt.Errorf("neither a call nor a load instruction: %v", ins) } case undefSection: @@ -468,7 +580,7 @@ func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) err return fmt.Errorf("relocation to %q: %w", target.Name, ErrNotSupported) } - ins.Reference = name + *ins = ins.WithReference(name) return nil } @@ -525,7 +637,7 @@ func (ec *elfCode) loadMaps(maps map[string]*MapSpec) error { return fmt.Errorf("map %s: reading map tail: %w", mapName, err) } if len(extra) > 0 { - spec.Extra = *bytes.NewReader(extra) + spec.Extra = bytes.NewReader(extra) } if err := spec.clampPerfEventArraySize(); err != nil { @@ -554,7 +666,7 @@ func (ec *elfCode) loadBTFMaps(maps map[string]*MapSpec) error { // Each section must appear as a DataSec in the ELF's BTF blob. var ds *btf.Datasec - if err := ec.btf.FindType(sec.Name, &ds); err != nil { + if err := ec.btf.TypeByName(sec.Name, &ds); err != nil { return fmt.Errorf("cannot find section '%s' in BTF: %w", sec.Name, err) } @@ -617,14 +729,6 @@ func (ec *elfCode) loadBTFMaps(maps map[string]*MapSpec) error { return nil } -// A programStub is a placeholder for a Program to be inserted at a certain map key. -// It needs to be resolved into a Program later on in the loader process. -type programStub string - -// A mapStub is a placeholder for a Map to be inserted at a certain map key. -// It needs to be resolved into a Map later on in the loader process. -type mapStub string - // mapSpecFromBTF produces a MapSpec based on a btf.Struct def representing // a BTF map definition. The name and spec arguments will be copied to the // resulting MapSpec, and inner must be true on any resursive invocations. @@ -811,7 +915,9 @@ func mapSpecFromBTF(es *elfSection, vs *btf.VarSecinfo, def *btf.Struct, spec *b ValueSize: valueSize, MaxEntries: maxEntries, Flags: flags, - BTF: &btf.Map{Spec: spec, Key: key, Value: value}, + Key: key, + Value: value, + BTF: spec, Pinning: pinType, InnerMap: innerMapSpec, Contents: contents, @@ -863,7 +969,7 @@ func resolveBTFValuesContents(es *elfSection, vs *btf.VarSecinfo, member btf.Mem // The offset of the 'values' member within the _struct_ (in bits) // is the starting point of the array. Convert to bytes. Add VarSecinfo // offset to get the absolute position in the ELF blob. - start := (member.OffsetBits / 8) + vs.Offset + start := member.Offset.Bytes() + vs.Offset // 'values' is encoded in BTF as a zero (variable) length struct // member, and its contents run until the end of the VarSecinfo. // Add VarSecinfo offset to get the absolute position in the ELF blob. @@ -898,9 +1004,9 @@ func resolveBTFValuesContents(es *elfSection, vs *btf.VarSecinfo, member btf.Mem // skipped here. switch t := elf.ST_TYPE(r.Info); t { case elf.STT_FUNC: - contents = append(contents, MapKV{uint32(k), programStub(r.Name)}) + contents = append(contents, MapKV{uint32(k), r.Name}) case elf.STT_OBJECT: - contents = append(contents, MapKV{uint32(k), mapStub(r.Name)}) + contents = append(contents, MapKV{uint32(k), r.Name}) default: return nil, fmt.Errorf("unknown relocation type %v", t) } @@ -921,15 +1027,6 @@ func (ec *elfCode) loadDataSections(maps map[string]*MapSpec) error { continue } - if ec.btf == nil { - return errors.New("data sections require BTF, make sure all consts are marked as static") - } - - var datasec *btf.Datasec - if err := ec.btf.FindType(sec.Name, &datasec); err != nil { - return fmt.Errorf("data section %s: can't get BTF: %w", sec.Name, err) - } - data, err := sec.Data() if err != nil { return fmt.Errorf("data section %s: can't get contents: %w", sec.Name, err) @@ -946,14 +1043,25 @@ func (ec *elfCode) loadDataSections(maps map[string]*MapSpec) error { ValueSize: uint32(len(data)), MaxEntries: 1, Contents: []MapKV{{uint32(0), data}}, - BTF: &btf.Map{Spec: ec.btf, Key: &btf.Void{}, Value: datasec}, } - switch sec.Name { - case ".rodata": + // It is possible for a data section to exist without a corresponding BTF Datasec + // if it only contains anonymous values like macro-defined arrays. + if ec.btf != nil { + var ds *btf.Datasec + if ec.btf.TypeByName(sec.Name, &ds) == nil { + // Assign the spec's key and BTF only if the Datasec lookup was successful. + mapSpec.BTF = ec.btf + mapSpec.Key = &btf.Void{} + mapSpec.Value = ds + } + } + + switch n := sec.Name; { + case strings.HasPrefix(n, ".rodata"): mapSpec.Flags = unix.BPF_F_RDONLY_PROG mapSpec.Freeze = true - case ".bss": + case n == ".bss": // The kernel already zero-initializes the map mapSpec.Contents = nil } @@ -964,91 +1072,103 @@ func (ec *elfCode) loadDataSections(maps map[string]*MapSpec) error { } func getProgType(sectionName string) (ProgramType, AttachType, uint32, string) { - types := map[string]struct { + types := []struct { + prefix string progType ProgramType attachType AttachType progFlags uint32 }{ - // From https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/tools/lib/bpf/libbpf.c - "socket": {SocketFilter, AttachNone, 0}, - "sk_reuseport/migrate": {SkReuseport, AttachSkReuseportSelectOrMigrate, 0}, - "sk_reuseport": {SkReuseport, AttachSkReuseportSelect, 0}, - "seccomp": {SocketFilter, AttachNone, 0}, - "kprobe/": {Kprobe, AttachNone, 0}, - "uprobe/": {Kprobe, AttachNone, 0}, - "kretprobe/": {Kprobe, AttachNone, 0}, - "uretprobe/": {Kprobe, AttachNone, 0}, - "tracepoint/": {TracePoint, AttachNone, 0}, - "raw_tracepoint/": {RawTracepoint, AttachNone, 0}, - "raw_tp/": {RawTracepoint, AttachNone, 0}, - "tp_btf/": {Tracing, AttachTraceRawTp, 0}, - "xdp": {XDP, AttachNone, 0}, - "perf_event": {PerfEvent, AttachNone, 0}, - "lwt_in": {LWTIn, AttachNone, 0}, - "lwt_out": {LWTOut, AttachNone, 0}, - "lwt_xmit": {LWTXmit, AttachNone, 0}, - "lwt_seg6local": {LWTSeg6Local, AttachNone, 0}, - "sockops": {SockOps, AttachCGroupSockOps, 0}, - "sk_skb/stream_parser": {SkSKB, AttachSkSKBStreamParser, 0}, - "sk_skb/stream_verdict": {SkSKB, AttachSkSKBStreamParser, 0}, - "sk_msg": {SkMsg, AttachSkSKBStreamVerdict, 0}, - "lirc_mode2": {LircMode2, AttachLircMode2, 0}, - "flow_dissector": {FlowDissector, AttachFlowDissector, 0}, - "iter/": {Tracing, AttachTraceIter, 0}, - "fentry/": {Tracing, AttachTraceFEntry, 0}, - "fmod_ret/": {Tracing, AttachModifyReturn, 0}, - "fexit/": {Tracing, AttachTraceFExit, 0}, - "fentry.s/": {Tracing, AttachTraceFEntry, unix.BPF_F_SLEEPABLE}, - "fmod_ret.s/": {Tracing, AttachModifyReturn, unix.BPF_F_SLEEPABLE}, - "fexit.s/": {Tracing, AttachTraceFExit, unix.BPF_F_SLEEPABLE}, - "sk_lookup/": {SkLookup, AttachSkLookup, 0}, - "freplace/": {Extension, AttachNone, 0}, - "lsm/": {LSM, AttachLSMMac, 0}, - "lsm.s/": {LSM, AttachLSMMac, unix.BPF_F_SLEEPABLE}, - - "cgroup_skb/ingress": {CGroupSKB, AttachCGroupInetIngress, 0}, - "cgroup_skb/egress": {CGroupSKB, AttachCGroupInetEgress, 0}, - "cgroup/dev": {CGroupDevice, AttachCGroupDevice, 0}, - "cgroup/skb": {CGroupSKB, AttachNone, 0}, - "cgroup/sock": {CGroupSock, AttachCGroupInetSockCreate, 0}, - "cgroup/post_bind4": {CGroupSock, AttachCGroupInet4PostBind, 0}, - "cgroup/post_bind6": {CGroupSock, AttachCGroupInet6PostBind, 0}, - "cgroup/bind4": {CGroupSockAddr, AttachCGroupInet4Bind, 0}, - "cgroup/bind6": {CGroupSockAddr, AttachCGroupInet6Bind, 0}, - "cgroup/connect4": {CGroupSockAddr, AttachCGroupInet4Connect, 0}, - "cgroup/connect6": {CGroupSockAddr, AttachCGroupInet6Connect, 0}, - "cgroup/sendmsg4": {CGroupSockAddr, AttachCGroupUDP4Sendmsg, 0}, - "cgroup/sendmsg6": {CGroupSockAddr, AttachCGroupUDP6Sendmsg, 0}, - "cgroup/recvmsg4": {CGroupSockAddr, AttachCGroupUDP4Recvmsg, 0}, - "cgroup/recvmsg6": {CGroupSockAddr, AttachCGroupUDP6Recvmsg, 0}, - "cgroup/sysctl": {CGroupSysctl, AttachCGroupSysctl, 0}, - "cgroup/getsockopt": {CGroupSockopt, AttachCGroupGetsockopt, 0}, - "cgroup/setsockopt": {CGroupSockopt, AttachCGroupSetsockopt, 0}, - "classifier": {SchedCLS, AttachNone, 0}, - "action": {SchedACT, AttachNone, 0}, - - "cgroup/getsockname4": {CGroupSockAddr, AttachCgroupInet4GetSockname, 0}, - "cgroup/getsockname6": {CGroupSockAddr, AttachCgroupInet6GetSockname, 0}, - "cgroup/getpeername4": {CGroupSockAddr, AttachCgroupInet4GetPeername, 0}, - "cgroup/getpeername6": {CGroupSockAddr, AttachCgroupInet6GetPeername, 0}, + // Please update the types from libbpf.c and follow the order of it. + // https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/tools/lib/bpf/libbpf.c + {"socket", SocketFilter, AttachNone, 0}, + {"sk_reuseport/migrate", SkReuseport, AttachSkReuseportSelectOrMigrate, 0}, + {"sk_reuseport", SkReuseport, AttachSkReuseportSelect, 0}, + {"kprobe/", Kprobe, AttachNone, 0}, + {"uprobe/", Kprobe, AttachNone, 0}, + {"kretprobe/", Kprobe, AttachNone, 0}, + {"uretprobe/", Kprobe, AttachNone, 0}, + {"tc", SchedCLS, AttachNone, 0}, + {"classifier", SchedCLS, AttachNone, 0}, + {"action", SchedACT, AttachNone, 0}, + {"tracepoint/", TracePoint, AttachNone, 0}, + {"tp/", TracePoint, AttachNone, 0}, + {"raw_tracepoint/", RawTracepoint, AttachNone, 0}, + {"raw_tp/", RawTracepoint, AttachNone, 0}, + {"raw_tracepoint.w/", RawTracepointWritable, AttachNone, 0}, + {"raw_tp.w/", RawTracepointWritable, AttachNone, 0}, + {"tp_btf/", Tracing, AttachTraceRawTp, 0}, + {"fentry/", Tracing, AttachTraceFEntry, 0}, + {"fmod_ret/", Tracing, AttachModifyReturn, 0}, + {"fexit/", Tracing, AttachTraceFExit, 0}, + {"fentry.s/", Tracing, AttachTraceFEntry, unix.BPF_F_SLEEPABLE}, + {"fmod_ret.s/", Tracing, AttachModifyReturn, unix.BPF_F_SLEEPABLE}, + {"fexit.s/", Tracing, AttachTraceFExit, unix.BPF_F_SLEEPABLE}, + {"freplace/", Extension, AttachNone, 0}, + {"lsm/", LSM, AttachLSMMac, 0}, + {"lsm.s/", LSM, AttachLSMMac, unix.BPF_F_SLEEPABLE}, + {"iter/", Tracing, AttachTraceIter, 0}, + {"syscall", Syscall, AttachNone, 0}, + {"xdp_devmap/", XDP, AttachXDPDevMap, 0}, + {"xdp_cpumap/", XDP, AttachXDPCPUMap, 0}, + {"xdp", XDP, AttachNone, 0}, + {"perf_event", PerfEvent, AttachNone, 0}, + {"lwt_in", LWTIn, AttachNone, 0}, + {"lwt_out", LWTOut, AttachNone, 0}, + {"lwt_xmit", LWTXmit, AttachNone, 0}, + {"lwt_seg6local", LWTSeg6Local, AttachNone, 0}, + {"cgroup_skb/ingress", CGroupSKB, AttachCGroupInetIngress, 0}, + {"cgroup_skb/egress", CGroupSKB, AttachCGroupInetEgress, 0}, + {"cgroup/skb", CGroupSKB, AttachNone, 0}, + {"cgroup/sock_create", CGroupSock, AttachCGroupInetSockCreate, 0}, + {"cgroup/sock_release", CGroupSock, AttachCgroupInetSockRelease, 0}, + {"cgroup/sock", CGroupSock, AttachCGroupInetSockCreate, 0}, + {"cgroup/post_bind4", CGroupSock, AttachCGroupInet4PostBind, 0}, + {"cgroup/post_bind6", CGroupSock, AttachCGroupInet6PostBind, 0}, + {"cgroup/dev", CGroupDevice, AttachCGroupDevice, 0}, + {"sockops", SockOps, AttachCGroupSockOps, 0}, + {"sk_skb/stream_parser", SkSKB, AttachSkSKBStreamParser, 0}, + {"sk_skb/stream_verdict", SkSKB, AttachSkSKBStreamVerdict, 0}, + {"sk_skb", SkSKB, AttachNone, 0}, + {"sk_msg", SkMsg, AttachSkMsgVerdict, 0}, + {"lirc_mode2", LircMode2, AttachLircMode2, 0}, + {"flow_dissector", FlowDissector, AttachFlowDissector, 0}, + {"cgroup/bind4", CGroupSockAddr, AttachCGroupInet4Bind, 0}, + {"cgroup/bind6", CGroupSockAddr, AttachCGroupInet6Bind, 0}, + {"cgroup/connect4", CGroupSockAddr, AttachCGroupInet4Connect, 0}, + {"cgroup/connect6", CGroupSockAddr, AttachCGroupInet6Connect, 0}, + {"cgroup/sendmsg4", CGroupSockAddr, AttachCGroupUDP4Sendmsg, 0}, + {"cgroup/sendmsg6", CGroupSockAddr, AttachCGroupUDP6Sendmsg, 0}, + {"cgroup/recvmsg4", CGroupSockAddr, AttachCGroupUDP4Recvmsg, 0}, + {"cgroup/recvmsg6", CGroupSockAddr, AttachCGroupUDP6Recvmsg, 0}, + {"cgroup/getpeername4", CGroupSockAddr, AttachCgroupInet4GetPeername, 0}, + {"cgroup/getpeername6", CGroupSockAddr, AttachCgroupInet6GetPeername, 0}, + {"cgroup/getsockname4", CGroupSockAddr, AttachCgroupInet4GetSockname, 0}, + {"cgroup/getsockname6", CGroupSockAddr, AttachCgroupInet6GetSockname, 0}, + {"cgroup/sysctl", CGroupSysctl, AttachCGroupSysctl, 0}, + {"cgroup/getsockopt", CGroupSockopt, AttachCGroupGetsockopt, 0}, + {"cgroup/setsockopt", CGroupSockopt, AttachCGroupSetsockopt, 0}, + {"struct_ops+", StructOps, AttachNone, 0}, + {"sk_lookup/", SkLookup, AttachSkLookup, 0}, + + {"seccomp", SocketFilter, AttachNone, 0}, } - for prefix, t := range types { - if !strings.HasPrefix(sectionName, prefix) { + for _, t := range types { + if !strings.HasPrefix(sectionName, t.prefix) { continue } - if !strings.HasSuffix(prefix, "/") { + if !strings.HasSuffix(t.prefix, "/") { return t.progType, t.attachType, t.progFlags, "" } - return t.progType, t.attachType, t.progFlags, sectionName[len(prefix):] + return t.progType, t.attachType, t.progFlags, sectionName[len(t.prefix):] } return UnspecifiedProgram, AttachNone, 0, "" } -func (ec *elfCode) loadRelocations(sec *elf.Section, symbols []elf.Symbol) (map[uint64]elf.Symbol, error) { +func (ec *elfCode) loadSectionRelocations(sec *elf.Section, symbols []elf.Symbol) (map[uint64]elf.Symbol, error) { rels := make(map[uint64]elf.Symbol) if sec.Entsize < 16 { diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/elf_reader_fuzz.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/elf_reader_fuzz.go deleted file mode 100644 index 5f4e0a0ad023..000000000000 --- a/cluster-autoscaler/vendor/github.com/cilium/ebpf/elf_reader_fuzz.go +++ /dev/null @@ -1,22 +0,0 @@ -//go:build gofuzz -// +build gofuzz - -// Use with https://github.com/dvyukov/go-fuzz - -package ebpf - -import "bytes" - -func FuzzLoadCollectionSpec(data []byte) int { - spec, err := LoadCollectionSpecFromReader(bytes.NewReader(data)) - if err != nil { - if spec != nil { - panic("spec is not nil") - } - return 0 - } - if spec == nil { - panic("spec is nil") - } - return 1 -} diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/info.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/info.go index 65fa4d7d8502..ae77bc6197f2 100644 --- a/cluster-autoscaler/vendor/github.com/cilium/ebpf/info.go +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/info.go @@ -2,6 +2,7 @@ package ebpf import ( "bufio" + "bytes" "encoding/hex" "errors" "fmt" @@ -10,9 +11,13 @@ import ( "strings" "syscall" "time" + "unsafe" + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/btf" "github.com/cilium/ebpf/internal" - "github.com/cilium/ebpf/internal/btf" + "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/unix" ) // MapInfo describes a map. @@ -23,12 +28,13 @@ type MapInfo struct { ValueSize uint32 MaxEntries uint32 Flags uint32 - // Name as supplied by user space at load time. + // Name as supplied by user space at load time. Available from 4.15. Name string } -func newMapInfoFromFd(fd *internal.FD) (*MapInfo, error) { - info, err := bpfGetMapInfoByFD(fd) +func newMapInfoFromFd(fd *sys.FD) (*MapInfo, error) { + var info sys.MapInfo + err := sys.ObjInfo(fd, &info) if errors.Is(err, syscall.EINVAL) { return newMapInfoFromProc(fd) } @@ -37,18 +43,17 @@ func newMapInfoFromFd(fd *internal.FD) (*MapInfo, error) { } return &MapInfo{ - MapType(info.map_type), - MapID(info.id), - info.key_size, - info.value_size, - info.max_entries, - info.map_flags, - // name is available from 4.15. - internal.CString(info.name[:]), + MapType(info.Type), + MapID(info.Id), + info.KeySize, + info.ValueSize, + info.MaxEntries, + info.MapFlags, + unix.ByteSliceToString(info.Name[:]), }, nil } -func newMapInfoFromProc(fd *internal.FD) (*MapInfo, error) { +func newMapInfoFromProc(fd *sys.FD) (*MapInfo, error) { var mi MapInfo err := scanFdInfo(fd, map[string]interface{}{ "map_type": &mi.Type, @@ -84,20 +89,21 @@ type programStats struct { type ProgramInfo struct { Type ProgramType id ProgramID - // Truncated hash of the BPF bytecode. + // Truncated hash of the BPF bytecode. Available from 4.13. Tag string - // Name as supplied by user space at load time. + // Name as supplied by user space at load time. Available from 4.15. Name string - // BTF for the program. - btf btf.ID - // IDS map ids related to program. - ids []MapID + btf btf.ID stats *programStats + + maps []MapID + insns []byte } -func newProgramInfoFromFd(fd *internal.FD) (*ProgramInfo, error) { - info, err := bpfGetProgInfoByFD(fd, nil) +func newProgramInfoFromFd(fd *sys.FD) (*ProgramInfo, error) { + var info sys.ProgInfo + err := sys.ObjInfo(fd, &info) if errors.Is(err, syscall.EINVAL) { return newProgramInfoFromProc(fd) } @@ -105,32 +111,43 @@ func newProgramInfoFromFd(fd *internal.FD) (*ProgramInfo, error) { return nil, err } - var mapIDs []MapID - if info.nr_map_ids > 0 { - mapIDs = make([]MapID, info.nr_map_ids) - info, err = bpfGetProgInfoByFD(fd, mapIDs) - if err != nil { + pi := ProgramInfo{ + Type: ProgramType(info.Type), + id: ProgramID(info.Id), + Tag: hex.EncodeToString(info.Tag[:]), + Name: unix.ByteSliceToString(info.Name[:]), + btf: btf.ID(info.BtfId), + stats: &programStats{ + runtime: time.Duration(info.RunTimeNs), + runCount: info.RunCnt, + }, + } + + // Start with a clean struct for the second call, otherwise we may get EFAULT. + var info2 sys.ProgInfo + + if info.NrMapIds > 0 { + pi.maps = make([]MapID, info.NrMapIds) + info2.NrMapIds = info.NrMapIds + info2.MapIds = sys.NewPointer(unsafe.Pointer(&pi.maps[0])) + } + + if info.XlatedProgLen > 0 { + pi.insns = make([]byte, info.XlatedProgLen) + info2.XlatedProgLen = info.XlatedProgLen + info2.XlatedProgInsns = sys.NewSlicePointer(pi.insns) + } + + if info.NrMapIds > 0 || info.XlatedProgLen > 0 { + if err := sys.ObjInfo(fd, &info2); err != nil { return nil, err } } - return &ProgramInfo{ - Type: ProgramType(info.prog_type), - id: ProgramID(info.id), - // tag is available if the kernel supports BPF_PROG_GET_INFO_BY_FD. - Tag: hex.EncodeToString(info.tag[:]), - // name is available from 4.15. - Name: internal.CString(info.name[:]), - btf: btf.ID(info.btf_id), - ids: mapIDs, - stats: &programStats{ - runtime: time.Duration(info.run_time_ns), - runCount: info.run_cnt, - }, - }, nil + return &pi, nil } -func newProgramInfoFromProc(fd *internal.FD) (*ProgramInfo, error) { +func newProgramInfoFromProc(fd *sys.FD) (*ProgramInfo, error) { var info ProgramInfo err := scanFdInfo(fd, map[string]interface{}{ "prog_type": &info.Type, @@ -160,6 +177,7 @@ func (pi *ProgramInfo) ID() (ProgramID, bool) { // BTFID returns the BTF ID associated with the program. // +// The ID is only valid as long as the associated program is kept alive. // Available from 5.0. // // The bool return value indicates whether this optional field is available and @@ -191,20 +209,50 @@ func (pi *ProgramInfo) Runtime() (time.Duration, bool) { return time.Duration(0), false } +// Instructions returns the 'xlated' instruction stream of the program +// after it has been verified and rewritten by the kernel. These instructions +// cannot be loaded back into the kernel as-is, this is mainly used for +// inspecting loaded programs for troubleshooting, dumping, etc. +// +// For example, map accesses are made to reference their kernel map IDs, +// not the FDs they had when the program was inserted. Note that before +// the introduction of bpf_insn_prepare_dump in kernel 4.16, xlated +// instructions were not sanitized, making the output even less reusable +// and less likely to round-trip or evaluate to the same program Tag. +// +// The first instruction is marked as a symbol using the Program's name. +// +// Available from 4.13. Requires CAP_BPF or equivalent. +func (pi *ProgramInfo) Instructions() (asm.Instructions, error) { + // If the calling process is not BPF-capable or if the kernel doesn't + // support getting xlated instructions, the field will be zero. + if len(pi.insns) == 0 { + return nil, fmt.Errorf("insufficient permissions or unsupported kernel: %w", ErrNotSupported) + } + + r := bytes.NewReader(pi.insns) + var insns asm.Instructions + if err := insns.Unmarshal(r, internal.NativeEndian); err != nil { + return nil, fmt.Errorf("unmarshaling instructions: %w", err) + } + + // Tag the first instruction with the name of the program, if available. + insns[0] = insns[0].WithSymbol(pi.Name) + + return insns, nil +} + // MapIDs returns the maps related to the program. // +// Available from 4.15. +// // The bool return value indicates whether this optional field is available. func (pi *ProgramInfo) MapIDs() ([]MapID, bool) { - return pi.ids, pi.ids != nil + return pi.maps, pi.maps != nil } -func scanFdInfo(fd *internal.FD, fields map[string]interface{}) error { - raw, err := fd.Value() - if err != nil { - return err - } - - fh, err := os.Open(fmt.Sprintf("/proc/self/fdinfo/%d", raw)) +func scanFdInfo(fd *sys.FD, fields map[string]interface{}) error { + fh, err := os.Open(fmt.Sprintf("/proc/self/fdinfo/%d", fd.Int())) if err != nil { return err } @@ -247,6 +295,10 @@ func scanFdInfoReader(r io.Reader, fields map[string]interface{}) error { return err } + if len(fields) > 0 && scanned == 0 { + return ErrNotSupported + } + if scanned != len(fields) { return errMissingFields } @@ -261,11 +313,9 @@ func scanFdInfoReader(r io.Reader, fields map[string]interface{}) error { // // Requires at least 5.8. func EnableStats(which uint32) (io.Closer, error) { - attr := internal.BPFEnableStatsAttr{ - StatsType: which, - } - - fd, err := internal.BPFEnableStats(&attr) + fd, err := sys.EnableStats(&sys.EnableStatsAttr{ + Type: which, + }) if err != nil { return nil, err } diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/btf/btf.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/btf/btf.go deleted file mode 100644 index 2b5f6d226a4f..000000000000 --- a/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/btf/btf.go +++ /dev/null @@ -1,798 +0,0 @@ -package btf - -import ( - "bytes" - "debug/elf" - "encoding/binary" - "errors" - "fmt" - "io" - "math" - "os" - "reflect" - "sync" - "unsafe" - - "github.com/cilium/ebpf/internal" - "github.com/cilium/ebpf/internal/unix" -) - -const btfMagic = 0xeB9F - -// Errors returned by BTF functions. -var ( - ErrNotSupported = internal.ErrNotSupported - ErrNotFound = errors.New("not found") - ErrNoExtendedInfo = errors.New("no extended info") -) - -// ID represents the unique ID of a BTF object. -type ID uint32 - -// Spec represents decoded BTF. -type Spec struct { - rawTypes []rawType - strings stringTable - types []Type - namedTypes map[string][]NamedType - funcInfos map[string]extInfo - lineInfos map[string]extInfo - coreRelos map[string]coreRelos - byteOrder binary.ByteOrder -} - -type btfHeader struct { - Magic uint16 - Version uint8 - Flags uint8 - HdrLen uint32 - - TypeOff uint32 - TypeLen uint32 - StringOff uint32 - StringLen uint32 -} - -// LoadSpecFromReader reads BTF sections from an ELF. -// -// Returns ErrNotFound if the reader contains no BTF. -func LoadSpecFromReader(rd io.ReaderAt) (*Spec, error) { - file, err := internal.NewSafeELFFile(rd) - if err != nil { - return nil, err - } - defer file.Close() - - symbols, err := file.Symbols() - if err != nil { - return nil, fmt.Errorf("can't read symbols: %v", err) - } - - variableOffsets := make(map[variable]uint32) - for _, symbol := range symbols { - if idx := symbol.Section; idx >= elf.SHN_LORESERVE && idx <= elf.SHN_HIRESERVE { - // Ignore things like SHN_ABS - continue - } - - if int(symbol.Section) >= len(file.Sections) { - return nil, fmt.Errorf("symbol %s: invalid section %d", symbol.Name, symbol.Section) - } - - secName := file.Sections[symbol.Section].Name - if symbol.Value > math.MaxUint32 { - return nil, fmt.Errorf("section %s: symbol %s: size exceeds maximum", secName, symbol.Name) - } - - variableOffsets[variable{secName, symbol.Name}] = uint32(symbol.Value) - } - - return loadSpecFromELF(file, variableOffsets) -} - -func loadSpecFromELF(file *internal.SafeELFFile, variableOffsets map[variable]uint32) (*Spec, error) { - var ( - btfSection *elf.Section - btfExtSection *elf.Section - sectionSizes = make(map[string]uint32) - ) - - for _, sec := range file.Sections { - switch sec.Name { - case ".BTF": - btfSection = sec - case ".BTF.ext": - btfExtSection = sec - default: - if sec.Type != elf.SHT_PROGBITS && sec.Type != elf.SHT_NOBITS { - break - } - - if sec.Size > math.MaxUint32 { - return nil, fmt.Errorf("section %s exceeds maximum size", sec.Name) - } - - sectionSizes[sec.Name] = uint32(sec.Size) - } - } - - if btfSection == nil { - return nil, fmt.Errorf("btf: %w", ErrNotFound) - } - - spec, err := loadRawSpec(btfSection.Open(), file.ByteOrder, sectionSizes, variableOffsets) - if err != nil { - return nil, err - } - - if btfExtSection == nil { - return spec, nil - } - - spec.funcInfos, spec.lineInfos, spec.coreRelos, err = parseExtInfos(btfExtSection.Open(), file.ByteOrder, spec.strings) - if err != nil { - return nil, fmt.Errorf("can't read ext info: %w", err) - } - - return spec, nil -} - -// LoadRawSpec reads a blob of BTF data that isn't wrapped in an ELF file. -// -// Prefer using LoadSpecFromReader, since this function only supports a subset -// of BTF. -func LoadRawSpec(btf io.Reader, bo binary.ByteOrder) (*Spec, error) { - // This will return an error if we encounter a Datasec, since we can't fix - // it up. - return loadRawSpec(btf, bo, nil, nil) -} - -func loadRawSpec(btf io.Reader, bo binary.ByteOrder, sectionSizes map[string]uint32, variableOffsets map[variable]uint32) (*Spec, error) { - rawTypes, rawStrings, err := parseBTF(btf, bo) - if err != nil { - return nil, err - } - - err = fixupDatasec(rawTypes, rawStrings, sectionSizes, variableOffsets) - if err != nil { - return nil, err - } - - types, typesByName, err := inflateRawTypes(rawTypes, rawStrings) - if err != nil { - return nil, err - } - - return &Spec{ - rawTypes: rawTypes, - namedTypes: typesByName, - types: types, - strings: rawStrings, - byteOrder: bo, - }, nil -} - -var kernelBTF struct { - sync.Mutex - *Spec -} - -// LoadKernelSpec returns the current kernel's BTF information. -// -// Requires a >= 5.5 kernel with CONFIG_DEBUG_INFO_BTF enabled. Returns -// ErrNotSupported if BTF is not enabled. -func LoadKernelSpec() (*Spec, error) { - kernelBTF.Lock() - defer kernelBTF.Unlock() - - if kernelBTF.Spec != nil { - return kernelBTF.Spec, nil - } - - var err error - kernelBTF.Spec, err = loadKernelSpec() - return kernelBTF.Spec, err -} - -func loadKernelSpec() (*Spec, error) { - release, err := unix.KernelRelease() - if err != nil { - return nil, fmt.Errorf("can't read kernel release number: %w", err) - } - - fh, err := os.Open("/sys/kernel/btf/vmlinux") - if err == nil { - defer fh.Close() - - return LoadRawSpec(fh, internal.NativeEndian) - } - - // use same list of locations as libbpf - // https://github.com/libbpf/libbpf/blob/9a3a42608dbe3731256a5682a125ac1e23bced8f/src/btf.c#L3114-L3122 - locations := []string{ - "/boot/vmlinux-%s", - "/lib/modules/%s/vmlinux-%[1]s", - "/lib/modules/%s/build/vmlinux", - "/usr/lib/modules/%s/kernel/vmlinux", - "/usr/lib/debug/boot/vmlinux-%s", - "/usr/lib/debug/boot/vmlinux-%s.debug", - "/usr/lib/debug/lib/modules/%s/vmlinux", - } - - for _, loc := range locations { - path := fmt.Sprintf(loc, release) - - fh, err := os.Open(path) - if err != nil { - continue - } - defer fh.Close() - - file, err := internal.NewSafeELFFile(fh) - if err != nil { - return nil, err - } - defer file.Close() - - return loadSpecFromELF(file, nil) - } - - return nil, fmt.Errorf("no BTF for kernel version %s: %w", release, internal.ErrNotSupported) -} - -func parseBTF(btf io.Reader, bo binary.ByteOrder) ([]rawType, stringTable, error) { - rawBTF, err := io.ReadAll(btf) - if err != nil { - return nil, nil, fmt.Errorf("can't read BTF: %v", err) - } - - rd := bytes.NewReader(rawBTF) - - var header btfHeader - if err := binary.Read(rd, bo, &header); err != nil { - return nil, nil, fmt.Errorf("can't read header: %v", err) - } - - if header.Magic != btfMagic { - return nil, nil, fmt.Errorf("incorrect magic value %v", header.Magic) - } - - if header.Version != 1 { - return nil, nil, fmt.Errorf("unexpected version %v", header.Version) - } - - if header.Flags != 0 { - return nil, nil, fmt.Errorf("unsupported flags %v", header.Flags) - } - - remainder := int64(header.HdrLen) - int64(binary.Size(&header)) - if remainder < 0 { - return nil, nil, errors.New("header is too short") - } - - if _, err := io.CopyN(internal.DiscardZeroes{}, rd, remainder); err != nil { - return nil, nil, fmt.Errorf("header padding: %v", err) - } - - if _, err := rd.Seek(int64(header.HdrLen+header.StringOff), io.SeekStart); err != nil { - return nil, nil, fmt.Errorf("can't seek to start of string section: %v", err) - } - - rawStrings, err := readStringTable(io.LimitReader(rd, int64(header.StringLen))) - if err != nil { - return nil, nil, fmt.Errorf("can't read type names: %w", err) - } - - if _, err := rd.Seek(int64(header.HdrLen+header.TypeOff), io.SeekStart); err != nil { - return nil, nil, fmt.Errorf("can't seek to start of type section: %v", err) - } - - rawTypes, err := readTypes(io.LimitReader(rd, int64(header.TypeLen)), bo) - if err != nil { - return nil, nil, fmt.Errorf("can't read types: %w", err) - } - - return rawTypes, rawStrings, nil -} - -type variable struct { - section string - name string -} - -func fixupDatasec(rawTypes []rawType, rawStrings stringTable, sectionSizes map[string]uint32, variableOffsets map[variable]uint32) error { - for i, rawType := range rawTypes { - if rawType.Kind() != kindDatasec { - continue - } - - name, err := rawStrings.Lookup(rawType.NameOff) - if err != nil { - return err - } - - if name == ".kconfig" || name == ".ksyms" { - return fmt.Errorf("reference to %s: %w", name, ErrNotSupported) - } - - if rawTypes[i].SizeType != 0 { - continue - } - - size, ok := sectionSizes[name] - if !ok { - return fmt.Errorf("data section %s: missing size", name) - } - - rawTypes[i].SizeType = size - - secinfos := rawType.data.([]btfVarSecinfo) - for j, secInfo := range secinfos { - id := int(secInfo.Type - 1) - if id >= len(rawTypes) { - return fmt.Errorf("data section %s: invalid type id %d for variable %d", name, id, j) - } - - varName, err := rawStrings.Lookup(rawTypes[id].NameOff) - if err != nil { - return fmt.Errorf("data section %s: can't get name for type %d: %w", name, id, err) - } - - offset, ok := variableOffsets[variable{name, varName}] - if !ok { - return fmt.Errorf("data section %s: missing offset for variable %s", name, varName) - } - - secinfos[j].Offset = offset - } - } - - return nil -} - -// Copy creates a copy of Spec. -func (s *Spec) Copy() *Spec { - types, _ := copyTypes(s.types, nil) - namedTypes := make(map[string][]NamedType) - for _, typ := range types { - if named, ok := typ.(NamedType); ok { - name := essentialName(named.TypeName()) - namedTypes[name] = append(namedTypes[name], named) - } - } - - // NB: Other parts of spec are not copied since they are immutable. - return &Spec{ - s.rawTypes, - s.strings, - types, - namedTypes, - s.funcInfos, - s.lineInfos, - s.coreRelos, - s.byteOrder, - } -} - -type marshalOpts struct { - ByteOrder binary.ByteOrder - StripFuncLinkage bool -} - -func (s *Spec) marshal(opts marshalOpts) ([]byte, error) { - var ( - buf bytes.Buffer - header = new(btfHeader) - headerLen = binary.Size(header) - ) - - // Reserve space for the header. We have to write it last since - // we don't know the size of the type section yet. - _, _ = buf.Write(make([]byte, headerLen)) - - // Write type section, just after the header. - for _, raw := range s.rawTypes { - switch { - case opts.StripFuncLinkage && raw.Kind() == kindFunc: - raw.SetLinkage(StaticFunc) - } - - if err := raw.Marshal(&buf, opts.ByteOrder); err != nil { - return nil, fmt.Errorf("can't marshal BTF: %w", err) - } - } - - typeLen := uint32(buf.Len() - headerLen) - - // Write string section after type section. - _, _ = buf.Write(s.strings) - - // Fill out the header, and write it out. - header = &btfHeader{ - Magic: btfMagic, - Version: 1, - Flags: 0, - HdrLen: uint32(headerLen), - TypeOff: 0, - TypeLen: typeLen, - StringOff: typeLen, - StringLen: uint32(len(s.strings)), - } - - raw := buf.Bytes() - err := binary.Write(sliceWriter(raw[:headerLen]), opts.ByteOrder, header) - if err != nil { - return nil, fmt.Errorf("can't write header: %v", err) - } - - return raw, nil -} - -type sliceWriter []byte - -func (sw sliceWriter) Write(p []byte) (int, error) { - if len(p) != len(sw) { - return 0, errors.New("size doesn't match") - } - - return copy(sw, p), nil -} - -// Program finds the BTF for a specific section. -// -// Length is the number of bytes in the raw BPF instruction stream. -// -// Returns an error which may wrap ErrNoExtendedInfo if the Spec doesn't -// contain extended BTF info. -func (s *Spec) Program(name string, length uint64) (*Program, error) { - if length == 0 { - return nil, errors.New("length musn't be zero") - } - - if s.funcInfos == nil && s.lineInfos == nil && s.coreRelos == nil { - return nil, fmt.Errorf("BTF for section %s: %w", name, ErrNoExtendedInfo) - } - - funcInfos, funcOK := s.funcInfos[name] - lineInfos, lineOK := s.lineInfos[name] - relos, coreOK := s.coreRelos[name] - - if !funcOK && !lineOK && !coreOK { - return nil, fmt.Errorf("no extended BTF info for section %s", name) - } - - return &Program{s, length, funcInfos, lineInfos, relos}, nil -} - -// FindType searches for a type with a specific name. -// -// Called T a type that satisfies Type, typ must be a non-nil **T. -// On success, the address of the found type will be copied in typ. -// -// Returns an error wrapping ErrNotFound if no matching -// type exists in spec. -func (s *Spec) FindType(name string, typ interface{}) error { - typValue := reflect.ValueOf(typ) - if typValue.Kind() != reflect.Ptr { - return fmt.Errorf("%T is not a pointer", typ) - } - - typPtr := typValue.Elem() - if !typPtr.CanSet() { - return fmt.Errorf("%T cannot be set", typ) - } - - wanted := typPtr.Type() - if !wanted.AssignableTo(reflect.TypeOf((*Type)(nil)).Elem()) { - return fmt.Errorf("%T does not satisfy Type interface", typ) - } - - var candidate Type - for _, typ := range s.namedTypes[essentialName(name)] { - if reflect.TypeOf(typ) != wanted { - continue - } - - // Match against the full name, not just the essential one. - if typ.TypeName() != name { - continue - } - - if candidate != nil { - return fmt.Errorf("type %s: multiple candidates for %T", name, typ) - } - - candidate = typ - } - - if candidate == nil { - return fmt.Errorf("type %s: %w", name, ErrNotFound) - } - - typPtr.Set(reflect.ValueOf(candidate)) - - return nil -} - -// Handle is a reference to BTF loaded into the kernel. -type Handle struct { - spec *Spec - fd *internal.FD -} - -// NewHandle loads BTF into the kernel. -// -// Returns ErrNotSupported if BTF is not supported. -func NewHandle(spec *Spec) (*Handle, error) { - if err := haveBTF(); err != nil { - return nil, err - } - - if spec.byteOrder != internal.NativeEndian { - return nil, fmt.Errorf("can't load %s BTF on %s", spec.byteOrder, internal.NativeEndian) - } - - btf, err := spec.marshal(marshalOpts{ - ByteOrder: internal.NativeEndian, - StripFuncLinkage: haveFuncLinkage() != nil, - }) - if err != nil { - return nil, fmt.Errorf("can't marshal BTF: %w", err) - } - - if uint64(len(btf)) > math.MaxUint32 { - return nil, errors.New("BTF exceeds the maximum size") - } - - attr := &bpfLoadBTFAttr{ - btf: internal.NewSlicePointer(btf), - btfSize: uint32(len(btf)), - } - - fd, err := bpfLoadBTF(attr) - if err != nil { - logBuf := make([]byte, 64*1024) - attr.logBuf = internal.NewSlicePointer(logBuf) - attr.btfLogSize = uint32(len(logBuf)) - attr.btfLogLevel = 1 - _, logErr := bpfLoadBTF(attr) - return nil, internal.ErrorWithLog(err, logBuf, logErr) - } - - return &Handle{spec.Copy(), fd}, nil -} - -// NewHandleFromID returns the BTF handle for a given id. -// -// Returns ErrNotExist, if there is no BTF with the given id. -// -// Requires CAP_SYS_ADMIN. -func NewHandleFromID(id ID) (*Handle, error) { - fd, err := internal.BPFObjGetFDByID(internal.BPF_BTF_GET_FD_BY_ID, uint32(id)) - if err != nil { - return nil, fmt.Errorf("get BTF by id: %w", err) - } - - info, err := newInfoFromFd(fd) - if err != nil { - _ = fd.Close() - return nil, fmt.Errorf("get BTF spec for handle: %w", err) - } - - return &Handle{info.BTF, fd}, nil -} - -// Spec returns the Spec that defined the BTF loaded into the kernel. -func (h *Handle) Spec() *Spec { - return h.spec -} - -// Close destroys the handle. -// -// Subsequent calls to FD will return an invalid value. -func (h *Handle) Close() error { - return h.fd.Close() -} - -// FD returns the file descriptor for the handle. -func (h *Handle) FD() int { - value, err := h.fd.Value() - if err != nil { - return -1 - } - - return int(value) -} - -// Map is the BTF for a map. -type Map struct { - Spec *Spec - Key, Value Type -} - -// Program is the BTF information for a stream of instructions. -type Program struct { - spec *Spec - length uint64 - funcInfos, lineInfos extInfo - coreRelos coreRelos -} - -// Spec returns the BTF spec of this program. -func (p *Program) Spec() *Spec { - return p.spec -} - -// Append the information from other to the Program. -func (p *Program) Append(other *Program) error { - if other.spec != p.spec { - return fmt.Errorf("can't append program with different BTF specs") - } - - funcInfos, err := p.funcInfos.append(other.funcInfos, p.length) - if err != nil { - return fmt.Errorf("func infos: %w", err) - } - - lineInfos, err := p.lineInfos.append(other.lineInfos, p.length) - if err != nil { - return fmt.Errorf("line infos: %w", err) - } - - p.funcInfos = funcInfos - p.lineInfos = lineInfos - p.coreRelos = p.coreRelos.append(other.coreRelos, p.length) - p.length += other.length - return nil -} - -// FuncInfos returns the binary form of BTF function infos. -func (p *Program) FuncInfos() (recordSize uint32, bytes []byte, err error) { - bytes, err = p.funcInfos.MarshalBinary() - if err != nil { - return 0, nil, fmt.Errorf("func infos: %w", err) - } - - return p.funcInfos.recordSize, bytes, nil -} - -// LineInfos returns the binary form of BTF line infos. -func (p *Program) LineInfos() (recordSize uint32, bytes []byte, err error) { - bytes, err = p.lineInfos.MarshalBinary() - if err != nil { - return 0, nil, fmt.Errorf("line infos: %w", err) - } - - return p.lineInfos.recordSize, bytes, nil -} - -// Fixups returns the changes required to adjust the program to the target. -// -// Passing a nil target will relocate against the running kernel. -func (p *Program) Fixups(target *Spec) (COREFixups, error) { - if len(p.coreRelos) == 0 { - return nil, nil - } - - if target == nil { - var err error - target, err = LoadKernelSpec() - if err != nil { - return nil, err - } - } - - return coreRelocate(p.spec, target, p.coreRelos) -} - -type bpfLoadBTFAttr struct { - btf internal.Pointer - logBuf internal.Pointer - btfSize uint32 - btfLogSize uint32 - btfLogLevel uint32 -} - -func bpfLoadBTF(attr *bpfLoadBTFAttr) (*internal.FD, error) { - fd, err := internal.BPF(internal.BPF_BTF_LOAD, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) - if err != nil { - return nil, err - } - - return internal.NewFD(uint32(fd)), nil -} - -func marshalBTF(types interface{}, strings []byte, bo binary.ByteOrder) []byte { - const minHeaderLength = 24 - - typesLen := uint32(binary.Size(types)) - header := btfHeader{ - Magic: btfMagic, - Version: 1, - HdrLen: minHeaderLength, - TypeOff: 0, - TypeLen: typesLen, - StringOff: typesLen, - StringLen: uint32(len(strings)), - } - - buf := new(bytes.Buffer) - _ = binary.Write(buf, bo, &header) - _ = binary.Write(buf, bo, types) - buf.Write(strings) - - return buf.Bytes() -} - -var haveBTF = internal.FeatureTest("BTF", "5.1", func() error { - var ( - types struct { - Integer btfType - Var btfType - btfVar struct{ Linkage uint32 } - } - strings = []byte{0, 'a', 0} - ) - - // We use a BTF_KIND_VAR here, to make sure that - // the kernel understands BTF at least as well as we - // do. BTF_KIND_VAR was introduced ~5.1. - types.Integer.SetKind(kindPointer) - types.Var.NameOff = 1 - types.Var.SetKind(kindVar) - types.Var.SizeType = 1 - - btf := marshalBTF(&types, strings, internal.NativeEndian) - - fd, err := bpfLoadBTF(&bpfLoadBTFAttr{ - btf: internal.NewSlicePointer(btf), - btfSize: uint32(len(btf)), - }) - if errors.Is(err, unix.EINVAL) || errors.Is(err, unix.EPERM) { - // Treat both EINVAL and EPERM as not supported: loading the program - // might still succeed without BTF. - return internal.ErrNotSupported - } - if err != nil { - return err - } - - fd.Close() - return nil -}) - -var haveFuncLinkage = internal.FeatureTest("BTF func linkage", "5.6", func() error { - if err := haveBTF(); err != nil { - return err - } - - var ( - types struct { - FuncProto btfType - Func btfType - } - strings = []byte{0, 'a', 0} - ) - - types.FuncProto.SetKind(kindFuncProto) - types.Func.SetKind(kindFunc) - types.Func.SizeType = 1 // aka FuncProto - types.Func.NameOff = 1 - types.Func.SetLinkage(GlobalFunc) - - btf := marshalBTF(&types, strings, internal.NativeEndian) - - fd, err := bpfLoadBTF(&bpfLoadBTFAttr{ - btf: internal.NewSlicePointer(btf), - btfSize: uint32(len(btf)), - }) - if errors.Is(err, unix.EINVAL) { - return internal.ErrNotSupported - } - if err != nil { - return err - } - - fd.Close() - return nil -}) diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/btf/ext_info.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/btf/ext_info.go deleted file mode 100644 index cdae2ec40827..000000000000 --- a/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/btf/ext_info.go +++ /dev/null @@ -1,312 +0,0 @@ -package btf - -import ( - "bufio" - "bytes" - "encoding/binary" - "errors" - "fmt" - "io" - - "github.com/cilium/ebpf/asm" - "github.com/cilium/ebpf/internal" -) - -type btfExtHeader struct { - Magic uint16 - Version uint8 - Flags uint8 - HdrLen uint32 - - FuncInfoOff uint32 - FuncInfoLen uint32 - LineInfoOff uint32 - LineInfoLen uint32 -} - -type btfExtCoreHeader struct { - CoreReloOff uint32 - CoreReloLen uint32 -} - -func parseExtInfos(r io.ReadSeeker, bo binary.ByteOrder, strings stringTable) (funcInfo, lineInfo map[string]extInfo, relos map[string]coreRelos, err error) { - var header btfExtHeader - var coreHeader btfExtCoreHeader - if err := binary.Read(r, bo, &header); err != nil { - return nil, nil, nil, fmt.Errorf("can't read header: %v", err) - } - - if header.Magic != btfMagic { - return nil, nil, nil, fmt.Errorf("incorrect magic value %v", header.Magic) - } - - if header.Version != 1 { - return nil, nil, nil, fmt.Errorf("unexpected version %v", header.Version) - } - - if header.Flags != 0 { - return nil, nil, nil, fmt.Errorf("unsupported flags %v", header.Flags) - } - - remainder := int64(header.HdrLen) - int64(binary.Size(&header)) - if remainder < 0 { - return nil, nil, nil, errors.New("header is too short") - } - - coreHdrSize := int64(binary.Size(&coreHeader)) - if remainder >= coreHdrSize { - if err := binary.Read(r, bo, &coreHeader); err != nil { - return nil, nil, nil, fmt.Errorf("can't read CO-RE relocation header: %v", err) - } - remainder -= coreHdrSize - } - - // Of course, the .BTF.ext header has different semantics than the - // .BTF ext header. We need to ignore non-null values. - _, err = io.CopyN(io.Discard, r, remainder) - if err != nil { - return nil, nil, nil, fmt.Errorf("header padding: %v", err) - } - - if _, err := r.Seek(int64(header.HdrLen+header.FuncInfoOff), io.SeekStart); err != nil { - return nil, nil, nil, fmt.Errorf("can't seek to function info section: %v", err) - } - - buf := bufio.NewReader(io.LimitReader(r, int64(header.FuncInfoLen))) - funcInfo, err = parseExtInfo(buf, bo, strings) - if err != nil { - return nil, nil, nil, fmt.Errorf("function info: %w", err) - } - - if _, err := r.Seek(int64(header.HdrLen+header.LineInfoOff), io.SeekStart); err != nil { - return nil, nil, nil, fmt.Errorf("can't seek to line info section: %v", err) - } - - buf = bufio.NewReader(io.LimitReader(r, int64(header.LineInfoLen))) - lineInfo, err = parseExtInfo(buf, bo, strings) - if err != nil { - return nil, nil, nil, fmt.Errorf("line info: %w", err) - } - - if coreHeader.CoreReloOff > 0 && coreHeader.CoreReloLen > 0 { - if _, err := r.Seek(int64(header.HdrLen+coreHeader.CoreReloOff), io.SeekStart); err != nil { - return nil, nil, nil, fmt.Errorf("can't seek to CO-RE relocation section: %v", err) - } - - relos, err = parseExtInfoRelos(io.LimitReader(r, int64(coreHeader.CoreReloLen)), bo, strings) - if err != nil { - return nil, nil, nil, fmt.Errorf("CO-RE relocation info: %w", err) - } - } - - return funcInfo, lineInfo, relos, nil -} - -type btfExtInfoSec struct { - SecNameOff uint32 - NumInfo uint32 -} - -type extInfoRecord struct { - InsnOff uint64 - Opaque []byte -} - -type extInfo struct { - byteOrder binary.ByteOrder - recordSize uint32 - records []extInfoRecord -} - -func (ei extInfo) append(other extInfo, offset uint64) (extInfo, error) { - if other.byteOrder != ei.byteOrder { - return extInfo{}, fmt.Errorf("ext_info byte order mismatch, want %v (got %v)", ei.byteOrder, other.byteOrder) - } - - if other.recordSize != ei.recordSize { - return extInfo{}, fmt.Errorf("ext_info record size mismatch, want %d (got %d)", ei.recordSize, other.recordSize) - } - - records := make([]extInfoRecord, 0, len(ei.records)+len(other.records)) - records = append(records, ei.records...) - for _, info := range other.records { - records = append(records, extInfoRecord{ - InsnOff: info.InsnOff + offset, - Opaque: info.Opaque, - }) - } - return extInfo{ei.byteOrder, ei.recordSize, records}, nil -} - -func (ei extInfo) MarshalBinary() ([]byte, error) { - if ei.byteOrder != internal.NativeEndian { - return nil, fmt.Errorf("%s is not the native byte order", ei.byteOrder) - } - - if len(ei.records) == 0 { - return nil, nil - } - - buf := bytes.NewBuffer(make([]byte, 0, int(ei.recordSize)*len(ei.records))) - for _, info := range ei.records { - // The kernel expects offsets in number of raw bpf instructions, - // while the ELF tracks it in bytes. - insnOff := uint32(info.InsnOff / asm.InstructionSize) - if err := binary.Write(buf, internal.NativeEndian, insnOff); err != nil { - return nil, fmt.Errorf("can't write instruction offset: %v", err) - } - - buf.Write(info.Opaque) - } - - return buf.Bytes(), nil -} - -func parseExtInfo(r io.Reader, bo binary.ByteOrder, strings stringTable) (map[string]extInfo, error) { - const maxRecordSize = 256 - - var recordSize uint32 - if err := binary.Read(r, bo, &recordSize); err != nil { - return nil, fmt.Errorf("can't read record size: %v", err) - } - - if recordSize < 4 { - // Need at least insnOff - return nil, errors.New("record size too short") - } - if recordSize > maxRecordSize { - return nil, fmt.Errorf("record size %v exceeds %v", recordSize, maxRecordSize) - } - - result := make(map[string]extInfo) - for { - secName, infoHeader, err := parseExtInfoHeader(r, bo, strings) - if errors.Is(err, io.EOF) { - return result, nil - } - - var records []extInfoRecord - for i := uint32(0); i < infoHeader.NumInfo; i++ { - var byteOff uint32 - if err := binary.Read(r, bo, &byteOff); err != nil { - return nil, fmt.Errorf("section %v: can't read extended info offset: %v", secName, err) - } - - buf := make([]byte, int(recordSize-4)) - if _, err := io.ReadFull(r, buf); err != nil { - return nil, fmt.Errorf("section %v: can't read record: %v", secName, err) - } - - if byteOff%asm.InstructionSize != 0 { - return nil, fmt.Errorf("section %v: offset %v is not aligned with instruction size", secName, byteOff) - } - - records = append(records, extInfoRecord{uint64(byteOff), buf}) - } - - result[secName] = extInfo{ - bo, - recordSize, - records, - } - } -} - -// bpfCoreRelo matches `struct bpf_core_relo` from the kernel -type bpfCoreRelo struct { - InsnOff uint32 - TypeID TypeID - AccessStrOff uint32 - Kind COREKind -} - -type coreRelo struct { - insnOff uint32 - typeID TypeID - accessor coreAccessor - kind COREKind -} - -type coreRelos []coreRelo - -// append two slices of extInfoRelo to each other. The InsnOff of b are adjusted -// by offset. -func (r coreRelos) append(other coreRelos, offset uint64) coreRelos { - result := make([]coreRelo, 0, len(r)+len(other)) - result = append(result, r...) - for _, relo := range other { - relo.insnOff += uint32(offset) - result = append(result, relo) - } - return result -} - -var extInfoReloSize = binary.Size(bpfCoreRelo{}) - -func parseExtInfoRelos(r io.Reader, bo binary.ByteOrder, strings stringTable) (map[string]coreRelos, error) { - var recordSize uint32 - if err := binary.Read(r, bo, &recordSize); err != nil { - return nil, fmt.Errorf("read record size: %v", err) - } - - if recordSize != uint32(extInfoReloSize) { - return nil, fmt.Errorf("expected record size %d, got %d", extInfoReloSize, recordSize) - } - - result := make(map[string]coreRelos) - for { - secName, infoHeader, err := parseExtInfoHeader(r, bo, strings) - if errors.Is(err, io.EOF) { - return result, nil - } - - var relos coreRelos - for i := uint32(0); i < infoHeader.NumInfo; i++ { - var relo bpfCoreRelo - if err := binary.Read(r, bo, &relo); err != nil { - return nil, fmt.Errorf("section %v: read record: %v", secName, err) - } - - if relo.InsnOff%asm.InstructionSize != 0 { - return nil, fmt.Errorf("section %v: offset %v is not aligned with instruction size", secName, relo.InsnOff) - } - - accessorStr, err := strings.Lookup(relo.AccessStrOff) - if err != nil { - return nil, err - } - - accessor, err := parseCoreAccessor(accessorStr) - if err != nil { - return nil, fmt.Errorf("accessor %q: %s", accessorStr, err) - } - - relos = append(relos, coreRelo{ - relo.InsnOff, - relo.TypeID, - accessor, - relo.Kind, - }) - } - - result[secName] = relos - } -} - -func parseExtInfoHeader(r io.Reader, bo binary.ByteOrder, strings stringTable) (string, *btfExtInfoSec, error) { - var infoHeader btfExtInfoSec - if err := binary.Read(r, bo, &infoHeader); err != nil { - return "", nil, fmt.Errorf("read ext info header: %w", err) - } - - secName, err := strings.Lookup(infoHeader.SecNameOff) - if err != nil { - return "", nil, fmt.Errorf("get section name: %w", err) - } - - if infoHeader.NumInfo == 0 { - return "", nil, fmt.Errorf("section %s has zero records", secName) - } - - return secName, &infoHeader, nil -} diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/btf/fuzz.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/btf/fuzz.go deleted file mode 100644 index 220b285afe00..000000000000 --- a/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/btf/fuzz.go +++ /dev/null @@ -1,50 +0,0 @@ -//go:build gofuzz -// +build gofuzz - -// Use with https://github.com/dvyukov/go-fuzz - -package btf - -import ( - "bytes" - "encoding/binary" - - "github.com/cilium/ebpf/internal" -) - -func FuzzSpec(data []byte) int { - if len(data) < binary.Size(btfHeader{}) { - return -1 - } - - spec, err := loadNakedSpec(bytes.NewReader(data), internal.NativeEndian, nil, nil) - if err != nil { - if spec != nil { - panic("spec is not nil") - } - return 0 - } - if spec == nil { - panic("spec is nil") - } - return 1 -} - -func FuzzExtInfo(data []byte) int { - if len(data) < binary.Size(btfExtHeader{}) { - return -1 - } - - table := stringTable("\x00foo\x00barfoo\x00") - info, err := parseExtInfo(bytes.NewReader(data), internal.NativeEndian, table) - if err != nil { - if info != nil { - panic("info is not nil") - } - return 0 - } - if info == nil { - panic("info is nil") - } - return 1 -} diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/btf/info.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/btf/info.go deleted file mode 100644 index 6a9b5d2e0ba3..000000000000 --- a/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/btf/info.go +++ /dev/null @@ -1,48 +0,0 @@ -package btf - -import ( - "bytes" - - "github.com/cilium/ebpf/internal" -) - -// info describes a BTF object. -type info struct { - BTF *Spec - ID ID - // Name is an identifying name for the BTF, currently only used by the - // kernel. - Name string - // KernelBTF is true if the BTf originated with the kernel and not - // userspace. - KernelBTF bool -} - -func newInfoFromFd(fd *internal.FD) (*info, error) { - // We invoke the syscall once with a empty BTF and name buffers to get size - // information to allocate buffers. Then we invoke it a second time with - // buffers to receive the data. - bpfInfo, err := bpfGetBTFInfoByFD(fd, nil, nil) - if err != nil { - return nil, err - } - - btfBuffer := make([]byte, bpfInfo.btfSize) - nameBuffer := make([]byte, bpfInfo.nameLen) - bpfInfo, err = bpfGetBTFInfoByFD(fd, btfBuffer, nameBuffer) - if err != nil { - return nil, err - } - - spec, err := loadRawSpec(bytes.NewReader(btfBuffer), internal.NativeEndian, nil, nil) - if err != nil { - return nil, err - } - - return &info{ - BTF: spec, - ID: ID(bpfInfo.id), - Name: internal.CString(nameBuffer), - KernelBTF: bpfInfo.kernelBTF != 0, - }, nil -} diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/btf/strings.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/btf/strings.go deleted file mode 100644 index 9876aa227c0a..000000000000 --- a/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/btf/strings.go +++ /dev/null @@ -1,54 +0,0 @@ -package btf - -import ( - "bytes" - "errors" - "fmt" - "io" -) - -type stringTable []byte - -func readStringTable(r io.Reader) (stringTable, error) { - contents, err := io.ReadAll(r) - if err != nil { - return nil, fmt.Errorf("can't read string table: %v", err) - } - - if len(contents) < 1 { - return nil, errors.New("string table is empty") - } - - if contents[0] != '\x00' { - return nil, errors.New("first item in string table is non-empty") - } - - if contents[len(contents)-1] != '\x00' { - return nil, errors.New("string table isn't null terminated") - } - - return stringTable(contents), nil -} - -func (st stringTable) Lookup(offset uint32) (string, error) { - if int64(offset) > int64(^uint(0)>>1) { - return "", fmt.Errorf("offset %d overflows int", offset) - } - - pos := int(offset) - if pos >= len(st) { - return "", fmt.Errorf("offset %d is out of bounds", offset) - } - - if pos > 0 && st[pos-1] != '\x00' { - return "", fmt.Errorf("offset %d isn't start of a string", offset) - } - - str := st[pos:] - end := bytes.IndexByte(str, '\x00') - if end == -1 { - return "", fmt.Errorf("offset %d isn't null terminated", offset) - } - - return string(str[:end]), nil -} diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/btf/syscalls.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/btf/syscalls.go deleted file mode 100644 index a4f80abd011f..000000000000 --- a/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/btf/syscalls.go +++ /dev/null @@ -1,31 +0,0 @@ -package btf - -import ( - "fmt" - "unsafe" - - "github.com/cilium/ebpf/internal" -) - -type bpfBTFInfo struct { - btf internal.Pointer - btfSize uint32 - id uint32 - name internal.Pointer - nameLen uint32 - kernelBTF uint32 -} - -func bpfGetBTFInfoByFD(fd *internal.FD, btf, name []byte) (*bpfBTFInfo, error) { - info := bpfBTFInfo{ - btf: internal.NewSlicePointer(btf), - btfSize: uint32(len(btf)), - name: internal.NewSlicePointer(name), - nameLen: uint32(len(name)), - } - if err := internal.BPFObjGetInfoByFD(fd, unsafe.Pointer(&info), unsafe.Sizeof(info)); err != nil { - return nil, fmt.Errorf("can't get program info: %w", err) - } - - return &info, nil -} diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/btf/types.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/btf/types.go deleted file mode 100644 index 5c8e7c6e59da..000000000000 --- a/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/btf/types.go +++ /dev/null @@ -1,957 +0,0 @@ -package btf - -import ( - "fmt" - "math" - "strings" -) - -const maxTypeDepth = 32 - -// TypeID identifies a type in a BTF section. -type TypeID uint32 - -// ID implements part of the Type interface. -func (tid TypeID) ID() TypeID { - return tid -} - -// Type represents a type described by BTF. -type Type interface { - ID() TypeID - - String() string - - // Make a copy of the type, without copying Type members. - copy() Type - - // Enumerate all nested Types. Repeated calls must visit nested - // types in the same order. - walk(*typeDeque) -} - -// NamedType is a type with a name. -type NamedType interface { - Type - - // Name of the type, empty for anonymous types. - TypeName() string -} - -var ( - _ NamedType = (*Int)(nil) - _ NamedType = (*Struct)(nil) - _ NamedType = (*Union)(nil) - _ NamedType = (*Enum)(nil) - _ NamedType = (*Fwd)(nil) - _ NamedType = (*Func)(nil) - _ NamedType = (*Typedef)(nil) - _ NamedType = (*Var)(nil) - _ NamedType = (*Datasec)(nil) - _ NamedType = (*Float)(nil) -) - -// Void is the unit type of BTF. -type Void struct{} - -func (v *Void) ID() TypeID { return 0 } -func (v *Void) String() string { return "void#0" } -func (v *Void) size() uint32 { return 0 } -func (v *Void) copy() Type { return (*Void)(nil) } -func (v *Void) walk(*typeDeque) {} - -type IntEncoding byte - -const ( - Signed IntEncoding = 1 << iota - Char - Bool -) - -// Int is an integer of a given length. -type Int struct { - TypeID - Name string - - // The size of the integer in bytes. - Size uint32 - Encoding IntEncoding - // OffsetBits is the starting bit offset. Currently always 0. - // See https://www.kernel.org/doc/html/latest/bpf/btf.html#btf-kind-int - OffsetBits uint32 - Bits byte -} - -func (i *Int) String() string { - var s strings.Builder - - switch { - case i.Encoding&Char != 0: - s.WriteString("char") - case i.Encoding&Bool != 0: - s.WriteString("bool") - default: - if i.Encoding&Signed == 0 { - s.WriteRune('u') - } - s.WriteString("int") - fmt.Fprintf(&s, "%d", i.Size*8) - } - - fmt.Fprintf(&s, "#%d", i.TypeID) - - if i.Bits > 0 { - fmt.Fprintf(&s, "[bits=%d]", i.Bits) - } - - return s.String() -} - -func (i *Int) TypeName() string { return i.Name } -func (i *Int) size() uint32 { return i.Size } -func (i *Int) walk(*typeDeque) {} -func (i *Int) copy() Type { - cpy := *i - return &cpy -} - -func (i *Int) isBitfield() bool { - return i.OffsetBits > 0 -} - -// Pointer is a pointer to another type. -type Pointer struct { - TypeID - Target Type -} - -func (p *Pointer) String() string { - return fmt.Sprintf("pointer#%d[target=#%d]", p.TypeID, p.Target.ID()) -} - -func (p *Pointer) size() uint32 { return 8 } -func (p *Pointer) walk(tdq *typeDeque) { tdq.push(&p.Target) } -func (p *Pointer) copy() Type { - cpy := *p - return &cpy -} - -// Array is an array with a fixed number of elements. -type Array struct { - TypeID - Type Type - Nelems uint32 -} - -func (arr *Array) String() string { - return fmt.Sprintf("array#%d[type=#%d n=%d]", arr.TypeID, arr.Type.ID(), arr.Nelems) -} - -func (arr *Array) walk(tdq *typeDeque) { tdq.push(&arr.Type) } -func (arr *Array) copy() Type { - cpy := *arr - return &cpy -} - -// Struct is a compound type of consecutive members. -type Struct struct { - TypeID - Name string - // The size of the struct including padding, in bytes - Size uint32 - Members []Member -} - -func (s *Struct) String() string { - return fmt.Sprintf("struct#%d[%q]", s.TypeID, s.Name) -} - -func (s *Struct) TypeName() string { return s.Name } - -func (s *Struct) size() uint32 { return s.Size } - -func (s *Struct) walk(tdq *typeDeque) { - for i := range s.Members { - tdq.push(&s.Members[i].Type) - } -} - -func (s *Struct) copy() Type { - cpy := *s - cpy.Members = copyMembers(s.Members) - return &cpy -} - -func (s *Struct) members() []Member { - return s.Members -} - -// Union is a compound type where members occupy the same memory. -type Union struct { - TypeID - Name string - // The size of the union including padding, in bytes. - Size uint32 - Members []Member -} - -func (u *Union) String() string { - return fmt.Sprintf("union#%d[%q]", u.TypeID, u.Name) -} - -func (u *Union) TypeName() string { return u.Name } - -func (u *Union) size() uint32 { return u.Size } - -func (u *Union) walk(tdq *typeDeque) { - for i := range u.Members { - tdq.push(&u.Members[i].Type) - } -} - -func (u *Union) copy() Type { - cpy := *u - cpy.Members = copyMembers(u.Members) - return &cpy -} - -func (u *Union) members() []Member { - return u.Members -} - -func copyMembers(orig []Member) []Member { - cpy := make([]Member, len(orig)) - copy(cpy, orig) - return cpy -} - -type composite interface { - members() []Member -} - -var ( - _ composite = (*Struct)(nil) - _ composite = (*Union)(nil) -) - -// Member is part of a Struct or Union. -// -// It is not a valid Type. -type Member struct { - Name string - Type Type - // OffsetBits is the bit offset of this member. - OffsetBits uint32 - BitfieldSize uint32 -} - -// Enum lists possible values. -type Enum struct { - TypeID - Name string - Values []EnumValue -} - -func (e *Enum) String() string { - return fmt.Sprintf("enum#%d[%q]", e.TypeID, e.Name) -} - -func (e *Enum) TypeName() string { return e.Name } - -// EnumValue is part of an Enum -// -// Is is not a valid Type -type EnumValue struct { - Name string - Value int32 -} - -func (e *Enum) size() uint32 { return 4 } -func (e *Enum) walk(*typeDeque) {} -func (e *Enum) copy() Type { - cpy := *e - cpy.Values = make([]EnumValue, len(e.Values)) - copy(cpy.Values, e.Values) - return &cpy -} - -// FwdKind is the type of forward declaration. -type FwdKind int - -// Valid types of forward declaration. -const ( - FwdStruct FwdKind = iota - FwdUnion -) - -func (fk FwdKind) String() string { - switch fk { - case FwdStruct: - return "struct" - case FwdUnion: - return "union" - default: - return fmt.Sprintf("%T(%d)", fk, int(fk)) - } -} - -// Fwd is a forward declaration of a Type. -type Fwd struct { - TypeID - Name string - Kind FwdKind -} - -func (f *Fwd) String() string { - return fmt.Sprintf("fwd#%d[%s %q]", f.TypeID, f.Kind, f.Name) -} - -func (f *Fwd) TypeName() string { return f.Name } - -func (f *Fwd) walk(*typeDeque) {} -func (f *Fwd) copy() Type { - cpy := *f - return &cpy -} - -// Typedef is an alias of a Type. -type Typedef struct { - TypeID - Name string - Type Type -} - -func (td *Typedef) String() string { - return fmt.Sprintf("typedef#%d[%q #%d]", td.TypeID, td.Name, td.Type.ID()) -} - -func (td *Typedef) TypeName() string { return td.Name } - -func (td *Typedef) walk(tdq *typeDeque) { tdq.push(&td.Type) } -func (td *Typedef) copy() Type { - cpy := *td - return &cpy -} - -// Volatile is a qualifier. -type Volatile struct { - TypeID - Type Type -} - -func (v *Volatile) String() string { - return fmt.Sprintf("volatile#%d[#%d]", v.TypeID, v.Type.ID()) -} - -func (v *Volatile) qualify() Type { return v.Type } -func (v *Volatile) walk(tdq *typeDeque) { tdq.push(&v.Type) } -func (v *Volatile) copy() Type { - cpy := *v - return &cpy -} - -// Const is a qualifier. -type Const struct { - TypeID - Type Type -} - -func (c *Const) String() string { - return fmt.Sprintf("const#%d[#%d]", c.TypeID, c.Type.ID()) -} - -func (c *Const) qualify() Type { return c.Type } -func (c *Const) walk(tdq *typeDeque) { tdq.push(&c.Type) } -func (c *Const) copy() Type { - cpy := *c - return &cpy -} - -// Restrict is a qualifier. -type Restrict struct { - TypeID - Type Type -} - -func (r *Restrict) String() string { - return fmt.Sprintf("restrict#%d[#%d]", r.TypeID, r.Type.ID()) -} - -func (r *Restrict) qualify() Type { return r.Type } -func (r *Restrict) walk(tdq *typeDeque) { tdq.push(&r.Type) } -func (r *Restrict) copy() Type { - cpy := *r - return &cpy -} - -// Func is a function definition. -type Func struct { - TypeID - Name string - Type Type - Linkage FuncLinkage -} - -func (f *Func) String() string { - return fmt.Sprintf("func#%d[%s %q proto=#%d]", f.TypeID, f.Linkage, f.Name, f.Type.ID()) -} - -func (f *Func) TypeName() string { return f.Name } - -func (f *Func) walk(tdq *typeDeque) { tdq.push(&f.Type) } -func (f *Func) copy() Type { - cpy := *f - return &cpy -} - -// FuncProto is a function declaration. -type FuncProto struct { - TypeID - Return Type - Params []FuncParam -} - -func (fp *FuncProto) String() string { - var s strings.Builder - fmt.Fprintf(&s, "proto#%d[", fp.TypeID) - for _, param := range fp.Params { - fmt.Fprintf(&s, "%q=#%d, ", param.Name, param.Type.ID()) - } - fmt.Fprintf(&s, "return=#%d]", fp.Return.ID()) - return s.String() -} - -func (fp *FuncProto) walk(tdq *typeDeque) { - tdq.push(&fp.Return) - for i := range fp.Params { - tdq.push(&fp.Params[i].Type) - } -} - -func (fp *FuncProto) copy() Type { - cpy := *fp - cpy.Params = make([]FuncParam, len(fp.Params)) - copy(cpy.Params, fp.Params) - return &cpy -} - -type FuncParam struct { - Name string - Type Type -} - -// Var is a global variable. -type Var struct { - TypeID - Name string - Type Type - Linkage VarLinkage -} - -func (v *Var) String() string { - return fmt.Sprintf("var#%d[%s %q]", v.TypeID, v.Linkage, v.Name) -} - -func (v *Var) TypeName() string { return v.Name } - -func (v *Var) walk(tdq *typeDeque) { tdq.push(&v.Type) } -func (v *Var) copy() Type { - cpy := *v - return &cpy -} - -// Datasec is a global program section containing data. -type Datasec struct { - TypeID - Name string - Size uint32 - Vars []VarSecinfo -} - -func (ds *Datasec) String() string { - return fmt.Sprintf("section#%d[%q]", ds.TypeID, ds.Name) -} - -func (ds *Datasec) TypeName() string { return ds.Name } - -func (ds *Datasec) size() uint32 { return ds.Size } - -func (ds *Datasec) walk(tdq *typeDeque) { - for i := range ds.Vars { - tdq.push(&ds.Vars[i].Type) - } -} - -func (ds *Datasec) copy() Type { - cpy := *ds - cpy.Vars = make([]VarSecinfo, len(ds.Vars)) - copy(cpy.Vars, ds.Vars) - return &cpy -} - -// VarSecinfo describes variable in a Datasec. -// -// It is not a valid Type. -type VarSecinfo struct { - Type Type - Offset uint32 - Size uint32 -} - -// Float is a float of a given length. -type Float struct { - TypeID - Name string - - // The size of the float in bytes. - Size uint32 -} - -func (f *Float) String() string { - return fmt.Sprintf("float%d#%d[%q]", f.Size*8, f.TypeID, f.Name) -} - -func (f *Float) TypeName() string { return f.Name } -func (f *Float) size() uint32 { return f.Size } -func (f *Float) walk(*typeDeque) {} -func (f *Float) copy() Type { - cpy := *f - return &cpy -} - -type sizer interface { - size() uint32 -} - -var ( - _ sizer = (*Int)(nil) - _ sizer = (*Pointer)(nil) - _ sizer = (*Struct)(nil) - _ sizer = (*Union)(nil) - _ sizer = (*Enum)(nil) - _ sizer = (*Datasec)(nil) -) - -type qualifier interface { - qualify() Type -} - -var ( - _ qualifier = (*Const)(nil) - _ qualifier = (*Restrict)(nil) - _ qualifier = (*Volatile)(nil) -) - -// Sizeof returns the size of a type in bytes. -// -// Returns an error if the size can't be computed. -func Sizeof(typ Type) (int, error) { - var ( - n = int64(1) - elem int64 - ) - - for i := 0; i < maxTypeDepth; i++ { - switch v := typ.(type) { - case *Array: - if n > 0 && int64(v.Nelems) > math.MaxInt64/n { - return 0, fmt.Errorf("type %s: overflow", typ) - } - - // Arrays may be of zero length, which allows - // n to be zero as well. - n *= int64(v.Nelems) - typ = v.Type - continue - - case sizer: - elem = int64(v.size()) - - case *Typedef: - typ = v.Type - continue - - case qualifier: - typ = v.qualify() - continue - - default: - return 0, fmt.Errorf("unsized type %T", typ) - } - - if n > 0 && elem > math.MaxInt64/n { - return 0, fmt.Errorf("type %s: overflow", typ) - } - - size := n * elem - if int64(int(size)) != size { - return 0, fmt.Errorf("type %s: overflow", typ) - } - - return int(size), nil - } - - return 0, fmt.Errorf("type %s: exceeded type depth", typ) -} - -// copy a Type recursively. -// -// typ may form a cycle. -// -// Returns any errors from transform verbatim. -func copyType(typ Type, transform func(Type) (Type, error)) (Type, error) { - copies := make(copier) - return typ, copies.copy(&typ, transform) -} - -// copy a slice of Types recursively. -// -// Types may form a cycle. -// -// Returns any errors from transform verbatim. -func copyTypes(types []Type, transform func(Type) (Type, error)) ([]Type, error) { - result := make([]Type, len(types)) - copy(result, types) - - copies := make(copier) - for i := range result { - if err := copies.copy(&result[i], transform); err != nil { - return nil, err - } - } - - return result, nil -} - -type copier map[Type]Type - -func (c copier) copy(typ *Type, transform func(Type) (Type, error)) error { - var work typeDeque - for t := typ; t != nil; t = work.pop() { - // *t is the identity of the type. - if cpy := c[*t]; cpy != nil { - *t = cpy - continue - } - - var cpy Type - if transform != nil { - tf, err := transform(*t) - if err != nil { - return fmt.Errorf("copy %s: %w", *t, err) - } - cpy = tf.copy() - } else { - cpy = (*t).copy() - } - - c[*t] = cpy - *t = cpy - - // Mark any nested types for copying. - cpy.walk(&work) - } - - return nil -} - -// typeDeque keeps track of pointers to types which still -// need to be visited. -type typeDeque struct { - types []*Type - read, write uint64 - mask uint64 -} - -func (dq *typeDeque) empty() bool { - return dq.read == dq.write -} - -// push adds a type to the stack. -func (dq *typeDeque) push(t *Type) { - if dq.write-dq.read < uint64(len(dq.types)) { - dq.types[dq.write&dq.mask] = t - dq.write++ - return - } - - new := len(dq.types) * 2 - if new == 0 { - new = 8 - } - - types := make([]*Type, new) - pivot := dq.read & dq.mask - n := copy(types, dq.types[pivot:]) - n += copy(types[n:], dq.types[:pivot]) - types[n] = t - - dq.types = types - dq.mask = uint64(new) - 1 - dq.read, dq.write = 0, uint64(n+1) -} - -// shift returns the first element or null. -func (dq *typeDeque) shift() *Type { - if dq.empty() { - return nil - } - - index := dq.read & dq.mask - t := dq.types[index] - dq.types[index] = nil - dq.read++ - return t -} - -// pop returns the last element or null. -func (dq *typeDeque) pop() *Type { - if dq.empty() { - return nil - } - - dq.write-- - index := dq.write & dq.mask - t := dq.types[index] - dq.types[index] = nil - return t -} - -// all returns all elements. -// -// The deque is empty after calling this method. -func (dq *typeDeque) all() []*Type { - length := dq.write - dq.read - types := make([]*Type, 0, length) - for t := dq.shift(); t != nil; t = dq.shift() { - types = append(types, t) - } - return types -} - -// inflateRawTypes takes a list of raw btf types linked via type IDs, and turns -// it into a graph of Types connected via pointers. -// -// Returns a map of named types (so, where NameOff is non-zero) and a slice of types -// indexed by TypeID. Since BTF ignores compilation units, multiple types may share -// the same name. A Type may form a cyclic graph by pointing at itself. -func inflateRawTypes(rawTypes []rawType, rawStrings stringTable) (types []Type, namedTypes map[string][]NamedType, err error) { - type fixupDef struct { - id TypeID - expectedKind btfKind - typ *Type - } - - var fixups []fixupDef - fixup := func(id TypeID, expectedKind btfKind, typ *Type) { - fixups = append(fixups, fixupDef{id, expectedKind, typ}) - } - - convertMembers := func(raw []btfMember, kindFlag bool) ([]Member, error) { - // NB: The fixup below relies on pre-allocating this array to - // work, since otherwise append might re-allocate members. - members := make([]Member, 0, len(raw)) - for i, btfMember := range raw { - name, err := rawStrings.Lookup(btfMember.NameOff) - if err != nil { - return nil, fmt.Errorf("can't get name for member %d: %w", i, err) - } - m := Member{ - Name: name, - OffsetBits: btfMember.Offset, - } - if kindFlag { - m.BitfieldSize = btfMember.Offset >> 24 - m.OffsetBits &= 0xffffff - } - members = append(members, m) - } - for i := range members { - fixup(raw[i].Type, kindUnknown, &members[i].Type) - } - return members, nil - } - - types = make([]Type, 0, len(rawTypes)) - types = append(types, (*Void)(nil)) - namedTypes = make(map[string][]NamedType) - - for i, raw := range rawTypes { - var ( - // Void is defined to always be type ID 0, and is thus - // omitted from BTF. - id = TypeID(i + 1) - typ Type - ) - - name, err := rawStrings.Lookup(raw.NameOff) - if err != nil { - return nil, nil, fmt.Errorf("get name for type id %d: %w", id, err) - } - - switch raw.Kind() { - case kindInt: - encoding, offset, bits := intEncoding(*raw.data.(*uint32)) - typ = &Int{id, name, raw.Size(), encoding, offset, bits} - - case kindPointer: - ptr := &Pointer{id, nil} - fixup(raw.Type(), kindUnknown, &ptr.Target) - typ = ptr - - case kindArray: - btfArr := raw.data.(*btfArray) - - // IndexType is unused according to btf.rst. - // Don't make it available right now. - arr := &Array{id, nil, btfArr.Nelems} - fixup(btfArr.Type, kindUnknown, &arr.Type) - typ = arr - - case kindStruct: - members, err := convertMembers(raw.data.([]btfMember), raw.KindFlag()) - if err != nil { - return nil, nil, fmt.Errorf("struct %s (id %d): %w", name, id, err) - } - typ = &Struct{id, name, raw.Size(), members} - - case kindUnion: - members, err := convertMembers(raw.data.([]btfMember), raw.KindFlag()) - if err != nil { - return nil, nil, fmt.Errorf("union %s (id %d): %w", name, id, err) - } - typ = &Union{id, name, raw.Size(), members} - - case kindEnum: - rawvals := raw.data.([]btfEnum) - vals := make([]EnumValue, 0, len(rawvals)) - for i, btfVal := range rawvals { - name, err := rawStrings.Lookup(btfVal.NameOff) - if err != nil { - return nil, nil, fmt.Errorf("get name for enum value %d: %s", i, err) - } - vals = append(vals, EnumValue{ - Name: name, - Value: btfVal.Val, - }) - } - typ = &Enum{id, name, vals} - - case kindForward: - if raw.KindFlag() { - typ = &Fwd{id, name, FwdUnion} - } else { - typ = &Fwd{id, name, FwdStruct} - } - - case kindTypedef: - typedef := &Typedef{id, name, nil} - fixup(raw.Type(), kindUnknown, &typedef.Type) - typ = typedef - - case kindVolatile: - volatile := &Volatile{id, nil} - fixup(raw.Type(), kindUnknown, &volatile.Type) - typ = volatile - - case kindConst: - cnst := &Const{id, nil} - fixup(raw.Type(), kindUnknown, &cnst.Type) - typ = cnst - - case kindRestrict: - restrict := &Restrict{id, nil} - fixup(raw.Type(), kindUnknown, &restrict.Type) - typ = restrict - - case kindFunc: - fn := &Func{id, name, nil, raw.Linkage()} - fixup(raw.Type(), kindFuncProto, &fn.Type) - typ = fn - - case kindFuncProto: - rawparams := raw.data.([]btfParam) - params := make([]FuncParam, 0, len(rawparams)) - for i, param := range rawparams { - name, err := rawStrings.Lookup(param.NameOff) - if err != nil { - return nil, nil, fmt.Errorf("get name for func proto parameter %d: %s", i, err) - } - params = append(params, FuncParam{ - Name: name, - }) - } - for i := range params { - fixup(rawparams[i].Type, kindUnknown, ¶ms[i].Type) - } - - fp := &FuncProto{id, nil, params} - fixup(raw.Type(), kindUnknown, &fp.Return) - typ = fp - - case kindVar: - variable := raw.data.(*btfVariable) - v := &Var{id, name, nil, VarLinkage(variable.Linkage)} - fixup(raw.Type(), kindUnknown, &v.Type) - typ = v - - case kindDatasec: - btfVars := raw.data.([]btfVarSecinfo) - vars := make([]VarSecinfo, 0, len(btfVars)) - for _, btfVar := range btfVars { - vars = append(vars, VarSecinfo{ - Offset: btfVar.Offset, - Size: btfVar.Size, - }) - } - for i := range vars { - fixup(btfVars[i].Type, kindVar, &vars[i].Type) - } - typ = &Datasec{id, name, raw.SizeType, vars} - - case kindFloat: - typ = &Float{id, name, raw.Size()} - - default: - return nil, nil, fmt.Errorf("type id %d: unknown kind: %v", id, raw.Kind()) - } - - types = append(types, typ) - - if named, ok := typ.(NamedType); ok { - if name := essentialName(named.TypeName()); name != "" { - namedTypes[name] = append(namedTypes[name], named) - } - } - } - - for _, fixup := range fixups { - i := int(fixup.id) - if i >= len(types) { - return nil, nil, fmt.Errorf("reference to invalid type id: %d", fixup.id) - } - - // Default void (id 0) to unknown - rawKind := kindUnknown - if i > 0 { - rawKind = rawTypes[i-1].Kind() - } - - if expected := fixup.expectedKind; expected != kindUnknown && rawKind != expected { - return nil, nil, fmt.Errorf("expected type id %d to have kind %s, found %s", fixup.id, expected, rawKind) - } - - *fixup.typ = types[i] - } - - return types, namedTypes, nil -} - -// essentialName returns name without a ___ suffix. -func essentialName(name string) string { - lastIdx := strings.LastIndex(name, "___") - if lastIdx > 0 { - return name[:lastIdx] - } - return name -} diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/elf.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/elf.go index 54a4313130a4..011581938d99 100644 --- a/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/elf.go +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/elf.go @@ -35,6 +35,29 @@ func NewSafeELFFile(r io.ReaderAt) (safe *SafeELFFile, err error) { return &SafeELFFile{file}, nil } +// OpenSafeELFFile reads an ELF from a file. +// +// It works like NewSafeELFFile, with the exception that safe.Close will +// close the underlying file. +func OpenSafeELFFile(path string) (safe *SafeELFFile, err error) { + defer func() { + r := recover() + if r == nil { + return + } + + safe = nil + err = fmt.Errorf("reading ELF file panicked: %s", r) + }() + + file, err := elf.Open(path) + if err != nil { + return nil, err + } + + return &SafeELFFile{file}, nil +} + // Symbols is the safe version of elf.File.Symbols. func (se *SafeELFFile) Symbols() (syms []elf.Symbol, err error) { defer func() { @@ -66,3 +89,14 @@ func (se *SafeELFFile) DynamicSymbols() (syms []elf.Symbol, err error) { syms, err = se.File.DynamicSymbols() return } + +// SectionsByType returns all sections in the file with the specified section type. +func (se *SafeELFFile) SectionsByType(typ elf.SectionType) []*elf.Section { + sections := make([]*elf.Section, 0, 1) + for _, section := range se.Sections { + if section.Type == typ { + sections = append(sections, section) + } + } + return sections +} diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/endian.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/endian.go deleted file mode 100644 index 6ae99fcd5f3b..000000000000 --- a/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/endian.go +++ /dev/null @@ -1,29 +0,0 @@ -package internal - -import ( - "encoding/binary" - "unsafe" -) - -// NativeEndian is set to either binary.BigEndian or binary.LittleEndian, -// depending on the host's endianness. -var NativeEndian binary.ByteOrder - -// Clang is set to either "el" or "eb" depending on the host's endianness. -var ClangEndian string - -func init() { - if isBigEndian() { - NativeEndian = binary.BigEndian - ClangEndian = "eb" - } else { - NativeEndian = binary.LittleEndian - ClangEndian = "el" - } -} - -func isBigEndian() (ret bool) { - i := int(0x1) - bs := (*[int(unsafe.Sizeof(i))]byte)(unsafe.Pointer(&i)) - return bs[0] == 0 -} diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/endian_be.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/endian_be.go new file mode 100644 index 000000000000..ad33cda8511b --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/endian_be.go @@ -0,0 +1,13 @@ +//go:build armbe || arm64be || mips || mips64 || mips64p32 || ppc64 || s390 || s390x || sparc || sparc64 +// +build armbe arm64be mips mips64 mips64p32 ppc64 s390 s390x sparc sparc64 + +package internal + +import "encoding/binary" + +// NativeEndian is set to either binary.BigEndian or binary.LittleEndian, +// depending on the host's endianness. +var NativeEndian binary.ByteOrder = binary.BigEndian + +// ClangEndian is set to either "el" or "eb" depending on the host's endianness. +const ClangEndian = "eb" diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/endian_le.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/endian_le.go new file mode 100644 index 000000000000..41a68224c833 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/endian_le.go @@ -0,0 +1,13 @@ +//go:build 386 || amd64 || amd64p32 || arm || arm64 || mipsle || mips64le || mips64p32le || ppc64le || riscv64 +// +build 386 amd64 amd64p32 arm arm64 mipsle mips64le mips64p32le ppc64le riscv64 + +package internal + +import "encoding/binary" + +// NativeEndian is set to either binary.BigEndian or binary.LittleEndian, +// depending on the host's endianness. +var NativeEndian binary.ByteOrder = binary.LittleEndian + +// ClangEndian is set to either "el" or "eb" depending on the host's endianness. +const ClangEndian = "el" diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/errors.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/errors.go index 877bd72ee266..b5ccdd7d0531 100644 --- a/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/errors.go +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/errors.go @@ -2,50 +2,205 @@ package internal import ( "bytes" - "errors" "fmt" + "io" "strings" - - "github.com/cilium/ebpf/internal/unix" ) -// ErrorWithLog returns an error that includes logs from the -// kernel verifier. +// ErrorWithLog returns an error which includes logs from the kernel verifier. +// +// The default error output is a summary of the full log. The latter can be +// accessed via VerifierError.Log or by formatting the error, see Format. // -// logErr should be the error returned by the syscall that generated -// the log. It is used to check for truncation of the output. -func ErrorWithLog(err error, log []byte, logErr error) error { - logStr := strings.Trim(CString(log), "\t\r\n ") - if errors.Is(logErr, unix.ENOSPC) { - logStr += " (truncated...)" +// A set of heuristics is used to determine whether the log has been truncated. +func ErrorWithLog(err error, log []byte) *VerifierError { + const whitespace = "\t\r\v\n " + + // Convert verifier log C string by truncating it on the first 0 byte + // and trimming trailing whitespace before interpreting as a Go string. + truncated := false + if i := bytes.IndexByte(log, 0); i != -1 { + if i == len(log)-1 && !bytes.HasSuffix(log[:i], []byte{'\n'}) { + // The null byte is at the end of the buffer and it's not preceded + // by a newline character. Most likely the buffer was too short. + truncated = true + } + + log = log[:i] + } else if len(log) > 0 { + // No null byte? Dodgy! + truncated = true + } + + log = bytes.Trim(log, whitespace) + logLines := bytes.Split(log, []byte{'\n'}) + lines := make([]string, 0, len(logLines)) + for _, line := range logLines { + // Don't remove leading white space on individual lines. We rely on it + // when outputting logs. + lines = append(lines, string(bytes.TrimRight(line, whitespace))) } - return &VerifierError{err, logStr} + return &VerifierError{err, lines, truncated} } // VerifierError includes information from the eBPF verifier. +// +// It summarises the log output, see Format if you want to output the full contents. type VerifierError struct { - cause error - log string + // The error which caused this error. + Cause error + // The verifier output split into lines. + Log []string + // Whether the log output is truncated, based on several heuristics. + Truncated bool } func (le *VerifierError) Unwrap() error { - return le.cause + return le.Cause } func (le *VerifierError) Error() string { - if le.log == "" { - return le.cause.Error() + log := le.Log + if n := len(log); n > 0 && strings.HasPrefix(log[n-1], "processed ") { + // Get rid of "processed 39 insns (limit 1000000) ..." from summary. + log = log[:n-1] + } + + n := len(log) + if n == 0 { + return le.Cause.Error() + } + + lines := log[n-1:] + if n >= 2 && (includePreviousLine(log[n-1]) || le.Truncated) { + // Add one more line of context if it aids understanding the error. + lines = log[n-2:] + } + + var b strings.Builder + fmt.Fprintf(&b, "%s: ", le.Cause.Error()) + + for i, line := range lines { + b.WriteString(strings.TrimSpace(line)) + if i != len(lines)-1 { + b.WriteString(": ") + } + } + + omitted := len(le.Log) - len(lines) + if omitted == 0 && !le.Truncated { + return b.String() + } + + b.WriteString(" (") + if le.Truncated { + b.WriteString("truncated") + } + + if omitted > 0 { + if le.Truncated { + b.WriteString(", ") + } + fmt.Fprintf(&b, "%d line(s) omitted", omitted) + } + b.WriteString(")") + + return b.String() +} + +// includePreviousLine returns true if the given line likely is better +// understood with additional context from the preceding line. +func includePreviousLine(line string) bool { + // We need to find a good trade off between understandable error messages + // and too much complexity here. Checking the string prefix is ok, requiring + // regular expressions to do it is probably overkill. + + if strings.HasPrefix(line, "\t") { + // [13] STRUCT drm_rect size=16 vlen=4 + // \tx1 type_id=2 + return true + } + + if len(line) >= 2 && line[0] == 'R' && line[1] >= '0' && line[1] <= '9' { + // 0: (95) exit + // R0 !read_ok + return true } - return fmt.Sprintf("%s: %s", le.cause, le.log) + if strings.HasPrefix(line, "invalid bpf_context access") { + // 0: (79) r6 = *(u64 *)(r1 +0) + // func '__x64_sys_recvfrom' arg0 type FWD is not a struct + // invalid bpf_context access off=0 size=8 + return true + } + + return false } -// CString turns a NUL / zero terminated byte buffer into a string. -func CString(in []byte) string { - inLen := bytes.IndexByte(in, 0) - if inLen == -1 { - return "" +// Format the error. +// +// Understood verbs are %s and %v, which are equivalent to calling Error(). %v +// allows outputting additional information using the following flags: +// +// + Output the first lines, or all lines if no width is given. +// - Output the last lines, or all lines if no width is given. +// +// Use width to specify how many lines to output. Use the '-' flag to output +// lines from the end of the log instead of the beginning. +func (le *VerifierError) Format(f fmt.State, verb rune) { + switch verb { + case 's': + _, _ = io.WriteString(f, le.Error()) + + case 'v': + n, haveWidth := f.Width() + if !haveWidth || n > len(le.Log) { + n = len(le.Log) + } + + if !f.Flag('+') && !f.Flag('-') { + if haveWidth { + _, _ = io.WriteString(f, "%!v(BADWIDTH)") + return + } + + _, _ = io.WriteString(f, le.Error()) + return + } + + if f.Flag('+') && f.Flag('-') { + _, _ = io.WriteString(f, "%!v(BADFLAG)") + return + } + + fmt.Fprintf(f, "%s:", le.Cause.Error()) + + omitted := len(le.Log) - n + lines := le.Log[:n] + if f.Flag('-') { + // Print last instead of first lines. + lines = le.Log[len(le.Log)-n:] + if omitted > 0 { + fmt.Fprintf(f, "\n\t(%d line(s) omitted)", omitted) + } + } + + for _, line := range lines { + fmt.Fprintf(f, "\n\t%s", line) + } + + if !f.Flag('-') { + if omitted > 0 { + fmt.Fprintf(f, "\n\t(%d line(s) omitted)", omitted) + } + } + + if le.Truncated { + fmt.Fprintf(f, "\n\t(truncated)") + } + + default: + fmt.Fprintf(f, "%%!%c(BADVERB)", verb) } - return string(in[:inLen]) } diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/fd.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/fd.go deleted file mode 100644 index af04955bd531..000000000000 --- a/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/fd.go +++ /dev/null @@ -1,69 +0,0 @@ -package internal - -import ( - "errors" - "fmt" - "os" - "runtime" - "strconv" - - "github.com/cilium/ebpf/internal/unix" -) - -var ErrClosedFd = errors.New("use of closed file descriptor") - -type FD struct { - raw int64 -} - -func NewFD(value uint32) *FD { - fd := &FD{int64(value)} - runtime.SetFinalizer(fd, (*FD).Close) - return fd -} - -func (fd *FD) String() string { - return strconv.FormatInt(fd.raw, 10) -} - -func (fd *FD) Value() (uint32, error) { - if fd.raw < 0 { - return 0, ErrClosedFd - } - - return uint32(fd.raw), nil -} - -func (fd *FD) Close() error { - if fd.raw < 0 { - return nil - } - - value := int(fd.raw) - fd.raw = -1 - - fd.Forget() - return unix.Close(value) -} - -func (fd *FD) Forget() { - runtime.SetFinalizer(fd, nil) -} - -func (fd *FD) Dup() (*FD, error) { - if fd.raw < 0 { - return nil, ErrClosedFd - } - - dup, err := unix.FcntlInt(uintptr(fd.raw), unix.F_DUPFD_CLOEXEC, 0) - if err != nil { - return nil, fmt.Errorf("can't dup fd: %v", err) - } - - return NewFD(uint32(dup)), nil -} - -func (fd *FD) File(name string) *os.File { - fd.Forget() - return os.NewFile(uintptr(fd.raw), name) -} diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/feature.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/feature.go index c94a2e1ee018..0a6c2d1d528c 100644 --- a/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/feature.go +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/feature.go @@ -54,11 +54,6 @@ type FeatureTestFn func() error // // Returns an error wrapping ErrNotSupported if the feature is not supported. func FeatureTest(name, version string, fn FeatureTestFn) func() error { - v, err := NewVersion(version) - if err != nil { - return func() error { return err } - } - ft := new(featureTest) return func() error { ft.RLock() @@ -79,6 +74,11 @@ func FeatureTest(name, version string, fn FeatureTestFn) func() error { err := fn() switch { case errors.Is(err, ErrNotSupported): + v, err := NewVersion(version) + if err != nil { + return err + } + ft.result = &UnsupportedFeatureError{ MinimumVersion: v, Name: name, diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/io.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/io.go index fa7402782d7a..30b6641f0761 100644 --- a/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/io.go +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/io.go @@ -1,6 +1,35 @@ package internal -import "errors" +import ( + "bufio" + "compress/gzip" + "errors" + "io" + "os" +) + +// NewBufferedSectionReader wraps an io.ReaderAt in an appropriately-sized +// buffered reader. It is a convenience function for reading subsections of +// ELF sections while minimizing the amount of read() syscalls made. +// +// Syscall overhead is non-negligible in continuous integration context +// where ELFs might be accessed over virtual filesystems with poor random +// access performance. Buffering reads makes sense because (sub)sections +// end up being read completely anyway. +// +// Use instead of the r.Seek() + io.LimitReader() pattern. +func NewBufferedSectionReader(ra io.ReaderAt, off, n int64) *bufio.Reader { + // Clamp the size of the buffer to one page to avoid slurping large parts + // of a file into memory. bufio.NewReader uses a hardcoded default buffer + // of 4096. Allow arches with larger pages to allocate more, but don't + // allocate a fixed 4k buffer if we only need to read a small segment. + buf := n + if ps := int64(os.Getpagesize()); n > ps { + buf = ps + } + + return bufio.NewReaderSize(io.NewSectionReader(ra, off, n), int(buf)) +} // DiscardZeroes makes sure that all written bytes are zero // before discarding them. @@ -14,3 +43,20 @@ func (DiscardZeroes) Write(p []byte) (int, error) { } return len(p), nil } + +// ReadAllCompressed decompresses a gzipped file into memory. +func ReadAllCompressed(file string) ([]byte, error) { + fh, err := os.Open(file) + if err != nil { + return nil, err + } + defer fh.Close() + + gz, err := gzip.NewReader(fh) + if err != nil { + return nil, err + } + defer gz.Close() + + return io.ReadAll(gz) +} diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/output.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/output.go new file mode 100644 index 000000000000..aeab37fcfafe --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/output.go @@ -0,0 +1,84 @@ +package internal + +import ( + "bytes" + "errors" + "go/format" + "go/scanner" + "io" + "strings" + "unicode" +) + +// Identifier turns a C style type or field name into an exportable Go equivalent. +func Identifier(str string) string { + prev := rune(-1) + return strings.Map(func(r rune) rune { + // See https://golang.org/ref/spec#Identifiers + switch { + case unicode.IsLetter(r): + if prev == -1 { + r = unicode.ToUpper(r) + } + + case r == '_': + switch { + // The previous rune was deleted, or we are at the + // beginning of the string. + case prev == -1: + fallthrough + + // The previous rune is a lower case letter or a digit. + case unicode.IsDigit(prev) || (unicode.IsLetter(prev) && unicode.IsLower(prev)): + // delete the current rune, and force the + // next character to be uppercased. + r = -1 + } + + case unicode.IsDigit(r): + + default: + // Delete the current rune. prev is unchanged. + return -1 + } + + prev = r + return r + }, str) +} + +// WriteFormatted outputs a formatted src into out. +// +// If formatting fails it returns an informative error message. +func WriteFormatted(src []byte, out io.Writer) error { + formatted, err := format.Source(src) + if err == nil { + _, err = out.Write(formatted) + return err + } + + var el scanner.ErrorList + if !errors.As(err, &el) { + return err + } + + var nel scanner.ErrorList + for _, err := range el { + if !err.Pos.IsValid() { + nel = append(nel, err) + continue + } + + buf := src[err.Pos.Offset:] + nl := bytes.IndexRune(buf, '\n') + if nl == -1 { + nel = append(nel, err) + continue + } + + err.Msg += ": " + string(buf[:nl]) + nel = append(nel, err) + } + + return nel +} diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/pinning.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/pinning.go index 5329b432d72e..c711353c3ea5 100644 --- a/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/pinning.go +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/pinning.go @@ -4,24 +4,54 @@ import ( "errors" "fmt" "os" + "path/filepath" + "runtime" + "unsafe" + "github.com/cilium/ebpf/internal/sys" "github.com/cilium/ebpf/internal/unix" ) -func Pin(currentPath, newPath string, fd *FD) error { +func Pin(currentPath, newPath string, fd *sys.FD) error { + const bpfFSType = 0xcafe4a11 + if newPath == "" { return errors.New("given pinning path cannot be empty") } if currentPath == newPath { return nil } + + var statfs unix.Statfs_t + if err := unix.Statfs(filepath.Dir(newPath), &statfs); err != nil { + return err + } + + fsType := int64(statfs.Type) + if unsafe.Sizeof(statfs.Type) == 4 { + // We're on a 32 bit arch, where statfs.Type is int32. bpfFSType is a + // negative number when interpreted as int32 so we need to cast via + // uint32 to avoid sign extension. + fsType = int64(uint32(statfs.Type)) + } + + if fsType != bpfFSType { + return fmt.Errorf("%s is not on a bpf filesystem", newPath) + } + + defer runtime.KeepAlive(fd) + if currentPath == "" { - return BPFObjPin(newPath, fd) + return sys.ObjPin(&sys.ObjPinAttr{ + Pathname: sys.NewStringPointer(newPath), + BpfFd: fd.Uint(), + }) } - var err error + // Renameat2 is used instead of os.Rename to disallow the new path replacing // an existing path. - if err = unix.Renameat2(unix.AT_FDCWD, currentPath, unix.AT_FDCWD, newPath, unix.RENAME_NOREPLACE); err == nil { + err := unix.Renameat2(unix.AT_FDCWD, currentPath, unix.AT_FDCWD, newPath, unix.RENAME_NOREPLACE) + if err == nil { // Object is now moved to the new pinning path. return nil } @@ -29,7 +59,10 @@ func Pin(currentPath, newPath string, fd *FD) error { return fmt.Errorf("unable to move pinned object to new path %v: %w", newPath, err) } // Internal state not in sync with the file system so let's fix it. - return BPFObjPin(newPath, fd) + return sys.ObjPin(&sys.ObjPinAttr{ + Pathname: sys.NewStringPointer(newPath), + BpfFd: fd.Uint(), + }) } func Unpin(pinnedPath string) error { diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/sys/doc.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/sys/doc.go new file mode 100644 index 000000000000..dfe174448e1c --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/sys/doc.go @@ -0,0 +1,6 @@ +// Package sys contains bindings for the BPF syscall. +package sys + +// Regenerate types.go by invoking go generate in the current directory. + +//go:generate go run github.com/cilium/ebpf/internal/cmd/gentypes ../../btf/testdata/vmlinux.btf.gz diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/sys/fd.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/sys/fd.go new file mode 100644 index 000000000000..65517d45e26a --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/sys/fd.go @@ -0,0 +1,96 @@ +package sys + +import ( + "fmt" + "math" + "os" + "runtime" + "strconv" + + "github.com/cilium/ebpf/internal/unix" +) + +var ErrClosedFd = unix.EBADF + +type FD struct { + raw int +} + +func newFD(value int) *FD { + fd := &FD{value} + runtime.SetFinalizer(fd, (*FD).Close) + return fd +} + +// NewFD wraps a raw fd with a finalizer. +// +// You must not use the raw fd after calling this function, since the underlying +// file descriptor number may change. This is because the BPF UAPI assumes that +// zero is not a valid fd value. +func NewFD(value int) (*FD, error) { + if value < 0 { + return nil, fmt.Errorf("invalid fd %d", value) + } + + fd := newFD(value) + if value != 0 { + return fd, nil + } + + dup, err := fd.Dup() + _ = fd.Close() + return dup, err +} + +func (fd *FD) String() string { + return strconv.FormatInt(int64(fd.raw), 10) +} + +func (fd *FD) Int() int { + return fd.raw +} + +func (fd *FD) Uint() uint32 { + if fd.raw < 0 || int64(fd.raw) > math.MaxUint32 { + // Best effort: this is the number most likely to be an invalid file + // descriptor. It is equal to -1 (on two's complement arches). + return math.MaxUint32 + } + return uint32(fd.raw) +} + +func (fd *FD) Close() error { + if fd.raw < 0 { + return nil + } + + value := int(fd.raw) + fd.raw = -1 + + fd.Forget() + return unix.Close(value) +} + +func (fd *FD) Forget() { + runtime.SetFinalizer(fd, nil) +} + +func (fd *FD) Dup() (*FD, error) { + if fd.raw < 0 { + return nil, ErrClosedFd + } + + // Always require the fd to be larger than zero: the BPF API treats the value + // as "no argument provided". + dup, err := unix.FcntlInt(uintptr(fd.raw), unix.F_DUPFD_CLOEXEC, 1) + if err != nil { + return nil, fmt.Errorf("can't dup fd: %v", err) + } + + return newFD(dup), nil +} + +func (fd *FD) File(name string) *os.File { + fd.Forget() + return os.NewFile(uintptr(fd.raw), name) +} diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/ptr.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/sys/ptr.go similarity index 71% rename from cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/ptr.go rename to cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/sys/ptr.go index f295de72cfeb..a221006888d0 100644 --- a/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/ptr.go +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/sys/ptr.go @@ -1,4 +1,4 @@ -package internal +package sys import ( "unsafe" @@ -20,6 +20,13 @@ func NewSlicePointer(buf []byte) Pointer { return Pointer{ptr: unsafe.Pointer(&buf[0])} } +// NewSlicePointer creates a 64-bit pointer from a byte slice. +// +// Useful to assign both the pointer and the length in one go. +func NewSlicePointerLen(buf []byte) (Pointer, uint32) { + return NewSlicePointer(buf), uint32(len(buf)) +} + // NewStringPointer creates a 64-bit pointer from a string. func NewStringPointer(str string) Pointer { p, err := unix.BytePtrFromString(str) diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/ptr_32_be.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_be.go similarity index 93% rename from cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/ptr_32_be.go rename to cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_be.go index 8c114ddf4763..df903d780b15 100644 --- a/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/ptr_32_be.go +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_be.go @@ -1,7 +1,7 @@ //go:build armbe || mips || mips64p32 // +build armbe mips mips64p32 -package internal +package sys import ( "unsafe" diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/ptr_32_le.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_le.go similarity index 94% rename from cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/ptr_32_le.go rename to cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_le.go index e65a61e45d34..a6a51edb6e10 100644 --- a/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/ptr_32_le.go +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_le.go @@ -1,7 +1,7 @@ //go:build 386 || amd64p32 || arm || mipsle || mips64p32le // +build 386 amd64p32 arm mipsle mips64p32le -package internal +package sys import ( "unsafe" diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/ptr_64.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/sys/ptr_64.go similarity index 95% rename from cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/ptr_64.go rename to cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/sys/ptr_64.go index 71a3afe307b3..7c0279e487c7 100644 --- a/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/ptr_64.go +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/sys/ptr_64.go @@ -1,7 +1,7 @@ //go:build !386 && !amd64p32 && !arm && !mipsle && !mips64p32le && !armbe && !mips && !mips64p32 // +build !386,!amd64p32,!arm,!mipsle,!mips64p32le,!armbe,!mips,!mips64p32 -package internal +package sys import ( "unsafe" diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/sys/syscall.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/sys/syscall.go new file mode 100644 index 000000000000..2a5935dc9123 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/sys/syscall.go @@ -0,0 +1,126 @@ +package sys + +import ( + "runtime" + "syscall" + "unsafe" + + "github.com/cilium/ebpf/internal/unix" +) + +// BPF wraps SYS_BPF. +// +// Any pointers contained in attr must use the Pointer type from this package. +func BPF(cmd Cmd, attr unsafe.Pointer, size uintptr) (uintptr, error) { + for { + r1, _, errNo := unix.Syscall(unix.SYS_BPF, uintptr(cmd), uintptr(attr), size) + runtime.KeepAlive(attr) + + // As of ~4.20 the verifier can be interrupted by a signal, + // and returns EAGAIN in that case. + if errNo == unix.EAGAIN && cmd == BPF_PROG_LOAD { + continue + } + + var err error + if errNo != 0 { + err = wrappedErrno{errNo} + } + + return r1, err + } +} + +// Info is implemented by all structs that can be passed to the ObjInfo syscall. +// +// MapInfo +// ProgInfo +// LinkInfo +// BtfInfo +type Info interface { + info() (unsafe.Pointer, uint32) +} + +var _ Info = (*MapInfo)(nil) + +func (i *MapInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + +var _ Info = (*ProgInfo)(nil) + +func (i *ProgInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + +var _ Info = (*LinkInfo)(nil) + +func (i *LinkInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + +var _ Info = (*BtfInfo)(nil) + +func (i *BtfInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + +// ObjInfo retrieves information about a BPF Fd. +// +// info may be one of MapInfo, ProgInfo, LinkInfo and BtfInfo. +func ObjInfo(fd *FD, info Info) error { + ptr, len := info.info() + err := ObjGetInfoByFd(&ObjGetInfoByFdAttr{ + BpfFd: fd.Uint(), + InfoLen: len, + Info: NewPointer(ptr), + }) + runtime.KeepAlive(fd) + return err +} + +// BPFObjName is a null-terminated string made up of +// 'A-Za-z0-9_' characters. +type ObjName [unix.BPF_OBJ_NAME_LEN]byte + +// NewObjName truncates the result if it is too long. +func NewObjName(name string) ObjName { + var result ObjName + copy(result[:unix.BPF_OBJ_NAME_LEN-1], name) + return result +} + +// LinkID uniquely identifies a bpf_link. +type LinkID uint32 + +// BTFID uniquely identifies a BTF blob loaded into the kernel. +type BTFID uint32 + +// wrappedErrno wraps syscall.Errno to prevent direct comparisons with +// syscall.E* or unix.E* constants. +// +// You should never export an error of this type. +type wrappedErrno struct { + syscall.Errno +} + +func (we wrappedErrno) Unwrap() error { + return we.Errno +} + +type syscallError struct { + error + errno syscall.Errno +} + +func Error(err error, errno syscall.Errno) error { + return &syscallError{err, errno} +} + +func (se *syscallError) Is(target error) bool { + return target == se.error +} + +func (se *syscallError) Unwrap() error { + return se.errno +} diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/sys/types.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/sys/types.go new file mode 100644 index 000000000000..291e3a6196cb --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/sys/types.go @@ -0,0 +1,1052 @@ +// Code generated by internal/cmd/gentypes; DO NOT EDIT. + +package sys + +import ( + "unsafe" +) + +type AdjRoomMode int32 + +const ( + BPF_ADJ_ROOM_NET AdjRoomMode = 0 + BPF_ADJ_ROOM_MAC AdjRoomMode = 1 +) + +type AttachType int32 + +const ( + BPF_CGROUP_INET_INGRESS AttachType = 0 + BPF_CGROUP_INET_EGRESS AttachType = 1 + BPF_CGROUP_INET_SOCK_CREATE AttachType = 2 + BPF_CGROUP_SOCK_OPS AttachType = 3 + BPF_SK_SKB_STREAM_PARSER AttachType = 4 + BPF_SK_SKB_STREAM_VERDICT AttachType = 5 + BPF_CGROUP_DEVICE AttachType = 6 + BPF_SK_MSG_VERDICT AttachType = 7 + BPF_CGROUP_INET4_BIND AttachType = 8 + BPF_CGROUP_INET6_BIND AttachType = 9 + BPF_CGROUP_INET4_CONNECT AttachType = 10 + BPF_CGROUP_INET6_CONNECT AttachType = 11 + BPF_CGROUP_INET4_POST_BIND AttachType = 12 + BPF_CGROUP_INET6_POST_BIND AttachType = 13 + BPF_CGROUP_UDP4_SENDMSG AttachType = 14 + BPF_CGROUP_UDP6_SENDMSG AttachType = 15 + BPF_LIRC_MODE2 AttachType = 16 + BPF_FLOW_DISSECTOR AttachType = 17 + BPF_CGROUP_SYSCTL AttachType = 18 + BPF_CGROUP_UDP4_RECVMSG AttachType = 19 + BPF_CGROUP_UDP6_RECVMSG AttachType = 20 + BPF_CGROUP_GETSOCKOPT AttachType = 21 + BPF_CGROUP_SETSOCKOPT AttachType = 22 + BPF_TRACE_RAW_TP AttachType = 23 + BPF_TRACE_FENTRY AttachType = 24 + BPF_TRACE_FEXIT AttachType = 25 + BPF_MODIFY_RETURN AttachType = 26 + BPF_LSM_MAC AttachType = 27 + BPF_TRACE_ITER AttachType = 28 + BPF_CGROUP_INET4_GETPEERNAME AttachType = 29 + BPF_CGROUP_INET6_GETPEERNAME AttachType = 30 + BPF_CGROUP_INET4_GETSOCKNAME AttachType = 31 + BPF_CGROUP_INET6_GETSOCKNAME AttachType = 32 + BPF_XDP_DEVMAP AttachType = 33 + BPF_CGROUP_INET_SOCK_RELEASE AttachType = 34 + BPF_XDP_CPUMAP AttachType = 35 + BPF_SK_LOOKUP AttachType = 36 + BPF_XDP AttachType = 37 + BPF_SK_SKB_VERDICT AttachType = 38 + BPF_SK_REUSEPORT_SELECT AttachType = 39 + BPF_SK_REUSEPORT_SELECT_OR_MIGRATE AttachType = 40 + BPF_PERF_EVENT AttachType = 41 + BPF_TRACE_KPROBE_MULTI AttachType = 42 + __MAX_BPF_ATTACH_TYPE AttachType = 43 +) + +type Cmd int32 + +const ( + BPF_MAP_CREATE Cmd = 0 + BPF_MAP_LOOKUP_ELEM Cmd = 1 + BPF_MAP_UPDATE_ELEM Cmd = 2 + BPF_MAP_DELETE_ELEM Cmd = 3 + BPF_MAP_GET_NEXT_KEY Cmd = 4 + BPF_PROG_LOAD Cmd = 5 + BPF_OBJ_PIN Cmd = 6 + BPF_OBJ_GET Cmd = 7 + BPF_PROG_ATTACH Cmd = 8 + BPF_PROG_DETACH Cmd = 9 + BPF_PROG_TEST_RUN Cmd = 10 + BPF_PROG_RUN Cmd = 10 + BPF_PROG_GET_NEXT_ID Cmd = 11 + BPF_MAP_GET_NEXT_ID Cmd = 12 + BPF_PROG_GET_FD_BY_ID Cmd = 13 + BPF_MAP_GET_FD_BY_ID Cmd = 14 + BPF_OBJ_GET_INFO_BY_FD Cmd = 15 + BPF_PROG_QUERY Cmd = 16 + BPF_RAW_TRACEPOINT_OPEN Cmd = 17 + BPF_BTF_LOAD Cmd = 18 + BPF_BTF_GET_FD_BY_ID Cmd = 19 + BPF_TASK_FD_QUERY Cmd = 20 + BPF_MAP_LOOKUP_AND_DELETE_ELEM Cmd = 21 + BPF_MAP_FREEZE Cmd = 22 + BPF_BTF_GET_NEXT_ID Cmd = 23 + BPF_MAP_LOOKUP_BATCH Cmd = 24 + BPF_MAP_LOOKUP_AND_DELETE_BATCH Cmd = 25 + BPF_MAP_UPDATE_BATCH Cmd = 26 + BPF_MAP_DELETE_BATCH Cmd = 27 + BPF_LINK_CREATE Cmd = 28 + BPF_LINK_UPDATE Cmd = 29 + BPF_LINK_GET_FD_BY_ID Cmd = 30 + BPF_LINK_GET_NEXT_ID Cmd = 31 + BPF_ENABLE_STATS Cmd = 32 + BPF_ITER_CREATE Cmd = 33 + BPF_LINK_DETACH Cmd = 34 + BPF_PROG_BIND_MAP Cmd = 35 +) + +type FunctionId int32 + +const ( + BPF_FUNC_unspec FunctionId = 0 + BPF_FUNC_map_lookup_elem FunctionId = 1 + BPF_FUNC_map_update_elem FunctionId = 2 + BPF_FUNC_map_delete_elem FunctionId = 3 + BPF_FUNC_probe_read FunctionId = 4 + BPF_FUNC_ktime_get_ns FunctionId = 5 + BPF_FUNC_trace_printk FunctionId = 6 + BPF_FUNC_get_prandom_u32 FunctionId = 7 + BPF_FUNC_get_smp_processor_id FunctionId = 8 + BPF_FUNC_skb_store_bytes FunctionId = 9 + BPF_FUNC_l3_csum_replace FunctionId = 10 + BPF_FUNC_l4_csum_replace FunctionId = 11 + BPF_FUNC_tail_call FunctionId = 12 + BPF_FUNC_clone_redirect FunctionId = 13 + BPF_FUNC_get_current_pid_tgid FunctionId = 14 + BPF_FUNC_get_current_uid_gid FunctionId = 15 + BPF_FUNC_get_current_comm FunctionId = 16 + BPF_FUNC_get_cgroup_classid FunctionId = 17 + BPF_FUNC_skb_vlan_push FunctionId = 18 + BPF_FUNC_skb_vlan_pop FunctionId = 19 + BPF_FUNC_skb_get_tunnel_key FunctionId = 20 + BPF_FUNC_skb_set_tunnel_key FunctionId = 21 + BPF_FUNC_perf_event_read FunctionId = 22 + BPF_FUNC_redirect FunctionId = 23 + BPF_FUNC_get_route_realm FunctionId = 24 + BPF_FUNC_perf_event_output FunctionId = 25 + BPF_FUNC_skb_load_bytes FunctionId = 26 + BPF_FUNC_get_stackid FunctionId = 27 + BPF_FUNC_csum_diff FunctionId = 28 + BPF_FUNC_skb_get_tunnel_opt FunctionId = 29 + BPF_FUNC_skb_set_tunnel_opt FunctionId = 30 + BPF_FUNC_skb_change_proto FunctionId = 31 + BPF_FUNC_skb_change_type FunctionId = 32 + BPF_FUNC_skb_under_cgroup FunctionId = 33 + BPF_FUNC_get_hash_recalc FunctionId = 34 + BPF_FUNC_get_current_task FunctionId = 35 + BPF_FUNC_probe_write_user FunctionId = 36 + BPF_FUNC_current_task_under_cgroup FunctionId = 37 + BPF_FUNC_skb_change_tail FunctionId = 38 + BPF_FUNC_skb_pull_data FunctionId = 39 + BPF_FUNC_csum_update FunctionId = 40 + BPF_FUNC_set_hash_invalid FunctionId = 41 + BPF_FUNC_get_numa_node_id FunctionId = 42 + BPF_FUNC_skb_change_head FunctionId = 43 + BPF_FUNC_xdp_adjust_head FunctionId = 44 + BPF_FUNC_probe_read_str FunctionId = 45 + BPF_FUNC_get_socket_cookie FunctionId = 46 + BPF_FUNC_get_socket_uid FunctionId = 47 + BPF_FUNC_set_hash FunctionId = 48 + BPF_FUNC_setsockopt FunctionId = 49 + BPF_FUNC_skb_adjust_room FunctionId = 50 + BPF_FUNC_redirect_map FunctionId = 51 + BPF_FUNC_sk_redirect_map FunctionId = 52 + BPF_FUNC_sock_map_update FunctionId = 53 + BPF_FUNC_xdp_adjust_meta FunctionId = 54 + BPF_FUNC_perf_event_read_value FunctionId = 55 + BPF_FUNC_perf_prog_read_value FunctionId = 56 + BPF_FUNC_getsockopt FunctionId = 57 + BPF_FUNC_override_return FunctionId = 58 + BPF_FUNC_sock_ops_cb_flags_set FunctionId = 59 + BPF_FUNC_msg_redirect_map FunctionId = 60 + BPF_FUNC_msg_apply_bytes FunctionId = 61 + BPF_FUNC_msg_cork_bytes FunctionId = 62 + BPF_FUNC_msg_pull_data FunctionId = 63 + BPF_FUNC_bind FunctionId = 64 + BPF_FUNC_xdp_adjust_tail FunctionId = 65 + BPF_FUNC_skb_get_xfrm_state FunctionId = 66 + BPF_FUNC_get_stack FunctionId = 67 + BPF_FUNC_skb_load_bytes_relative FunctionId = 68 + BPF_FUNC_fib_lookup FunctionId = 69 + BPF_FUNC_sock_hash_update FunctionId = 70 + BPF_FUNC_msg_redirect_hash FunctionId = 71 + BPF_FUNC_sk_redirect_hash FunctionId = 72 + BPF_FUNC_lwt_push_encap FunctionId = 73 + BPF_FUNC_lwt_seg6_store_bytes FunctionId = 74 + BPF_FUNC_lwt_seg6_adjust_srh FunctionId = 75 + BPF_FUNC_lwt_seg6_action FunctionId = 76 + BPF_FUNC_rc_repeat FunctionId = 77 + BPF_FUNC_rc_keydown FunctionId = 78 + BPF_FUNC_skb_cgroup_id FunctionId = 79 + BPF_FUNC_get_current_cgroup_id FunctionId = 80 + BPF_FUNC_get_local_storage FunctionId = 81 + BPF_FUNC_sk_select_reuseport FunctionId = 82 + BPF_FUNC_skb_ancestor_cgroup_id FunctionId = 83 + BPF_FUNC_sk_lookup_tcp FunctionId = 84 + BPF_FUNC_sk_lookup_udp FunctionId = 85 + BPF_FUNC_sk_release FunctionId = 86 + BPF_FUNC_map_push_elem FunctionId = 87 + BPF_FUNC_map_pop_elem FunctionId = 88 + BPF_FUNC_map_peek_elem FunctionId = 89 + BPF_FUNC_msg_push_data FunctionId = 90 + BPF_FUNC_msg_pop_data FunctionId = 91 + BPF_FUNC_rc_pointer_rel FunctionId = 92 + BPF_FUNC_spin_lock FunctionId = 93 + BPF_FUNC_spin_unlock FunctionId = 94 + BPF_FUNC_sk_fullsock FunctionId = 95 + BPF_FUNC_tcp_sock FunctionId = 96 + BPF_FUNC_skb_ecn_set_ce FunctionId = 97 + BPF_FUNC_get_listener_sock FunctionId = 98 + BPF_FUNC_skc_lookup_tcp FunctionId = 99 + BPF_FUNC_tcp_check_syncookie FunctionId = 100 + BPF_FUNC_sysctl_get_name FunctionId = 101 + BPF_FUNC_sysctl_get_current_value FunctionId = 102 + BPF_FUNC_sysctl_get_new_value FunctionId = 103 + BPF_FUNC_sysctl_set_new_value FunctionId = 104 + BPF_FUNC_strtol FunctionId = 105 + BPF_FUNC_strtoul FunctionId = 106 + BPF_FUNC_sk_storage_get FunctionId = 107 + BPF_FUNC_sk_storage_delete FunctionId = 108 + BPF_FUNC_send_signal FunctionId = 109 + BPF_FUNC_tcp_gen_syncookie FunctionId = 110 + BPF_FUNC_skb_output FunctionId = 111 + BPF_FUNC_probe_read_user FunctionId = 112 + BPF_FUNC_probe_read_kernel FunctionId = 113 + BPF_FUNC_probe_read_user_str FunctionId = 114 + BPF_FUNC_probe_read_kernel_str FunctionId = 115 + BPF_FUNC_tcp_send_ack FunctionId = 116 + BPF_FUNC_send_signal_thread FunctionId = 117 + BPF_FUNC_jiffies64 FunctionId = 118 + BPF_FUNC_read_branch_records FunctionId = 119 + BPF_FUNC_get_ns_current_pid_tgid FunctionId = 120 + BPF_FUNC_xdp_output FunctionId = 121 + BPF_FUNC_get_netns_cookie FunctionId = 122 + BPF_FUNC_get_current_ancestor_cgroup_id FunctionId = 123 + BPF_FUNC_sk_assign FunctionId = 124 + BPF_FUNC_ktime_get_boot_ns FunctionId = 125 + BPF_FUNC_seq_printf FunctionId = 126 + BPF_FUNC_seq_write FunctionId = 127 + BPF_FUNC_sk_cgroup_id FunctionId = 128 + BPF_FUNC_sk_ancestor_cgroup_id FunctionId = 129 + BPF_FUNC_ringbuf_output FunctionId = 130 + BPF_FUNC_ringbuf_reserve FunctionId = 131 + BPF_FUNC_ringbuf_submit FunctionId = 132 + BPF_FUNC_ringbuf_discard FunctionId = 133 + BPF_FUNC_ringbuf_query FunctionId = 134 + BPF_FUNC_csum_level FunctionId = 135 + BPF_FUNC_skc_to_tcp6_sock FunctionId = 136 + BPF_FUNC_skc_to_tcp_sock FunctionId = 137 + BPF_FUNC_skc_to_tcp_timewait_sock FunctionId = 138 + BPF_FUNC_skc_to_tcp_request_sock FunctionId = 139 + BPF_FUNC_skc_to_udp6_sock FunctionId = 140 + BPF_FUNC_get_task_stack FunctionId = 141 + BPF_FUNC_load_hdr_opt FunctionId = 142 + BPF_FUNC_store_hdr_opt FunctionId = 143 + BPF_FUNC_reserve_hdr_opt FunctionId = 144 + BPF_FUNC_inode_storage_get FunctionId = 145 + BPF_FUNC_inode_storage_delete FunctionId = 146 + BPF_FUNC_d_path FunctionId = 147 + BPF_FUNC_copy_from_user FunctionId = 148 + BPF_FUNC_snprintf_btf FunctionId = 149 + BPF_FUNC_seq_printf_btf FunctionId = 150 + BPF_FUNC_skb_cgroup_classid FunctionId = 151 + BPF_FUNC_redirect_neigh FunctionId = 152 + BPF_FUNC_per_cpu_ptr FunctionId = 153 + BPF_FUNC_this_cpu_ptr FunctionId = 154 + BPF_FUNC_redirect_peer FunctionId = 155 + BPF_FUNC_task_storage_get FunctionId = 156 + BPF_FUNC_task_storage_delete FunctionId = 157 + BPF_FUNC_get_current_task_btf FunctionId = 158 + BPF_FUNC_bprm_opts_set FunctionId = 159 + BPF_FUNC_ktime_get_coarse_ns FunctionId = 160 + BPF_FUNC_ima_inode_hash FunctionId = 161 + BPF_FUNC_sock_from_file FunctionId = 162 + BPF_FUNC_check_mtu FunctionId = 163 + BPF_FUNC_for_each_map_elem FunctionId = 164 + BPF_FUNC_snprintf FunctionId = 165 + BPF_FUNC_sys_bpf FunctionId = 166 + BPF_FUNC_btf_find_by_name_kind FunctionId = 167 + BPF_FUNC_sys_close FunctionId = 168 + BPF_FUNC_timer_init FunctionId = 169 + BPF_FUNC_timer_set_callback FunctionId = 170 + BPF_FUNC_timer_start FunctionId = 171 + BPF_FUNC_timer_cancel FunctionId = 172 + BPF_FUNC_get_func_ip FunctionId = 173 + BPF_FUNC_get_attach_cookie FunctionId = 174 + BPF_FUNC_task_pt_regs FunctionId = 175 + BPF_FUNC_get_branch_snapshot FunctionId = 176 + BPF_FUNC_trace_vprintk FunctionId = 177 + BPF_FUNC_skc_to_unix_sock FunctionId = 178 + BPF_FUNC_kallsyms_lookup_name FunctionId = 179 + BPF_FUNC_find_vma FunctionId = 180 + BPF_FUNC_loop FunctionId = 181 + BPF_FUNC_strncmp FunctionId = 182 + BPF_FUNC_get_func_arg FunctionId = 183 + BPF_FUNC_get_func_ret FunctionId = 184 + BPF_FUNC_get_func_arg_cnt FunctionId = 185 + BPF_FUNC_get_retval FunctionId = 186 + BPF_FUNC_set_retval FunctionId = 187 + BPF_FUNC_xdp_get_buff_len FunctionId = 188 + BPF_FUNC_xdp_load_bytes FunctionId = 189 + BPF_FUNC_xdp_store_bytes FunctionId = 190 + BPF_FUNC_copy_from_user_task FunctionId = 191 + BPF_FUNC_skb_set_tstamp FunctionId = 192 + BPF_FUNC_ima_file_hash FunctionId = 193 + __BPF_FUNC_MAX_ID FunctionId = 194 +) + +type HdrStartOff int32 + +const ( + BPF_HDR_START_MAC HdrStartOff = 0 + BPF_HDR_START_NET HdrStartOff = 1 +) + +type LinkType int32 + +const ( + BPF_LINK_TYPE_UNSPEC LinkType = 0 + BPF_LINK_TYPE_RAW_TRACEPOINT LinkType = 1 + BPF_LINK_TYPE_TRACING LinkType = 2 + BPF_LINK_TYPE_CGROUP LinkType = 3 + BPF_LINK_TYPE_ITER LinkType = 4 + BPF_LINK_TYPE_NETNS LinkType = 5 + BPF_LINK_TYPE_XDP LinkType = 6 + BPF_LINK_TYPE_PERF_EVENT LinkType = 7 + BPF_LINK_TYPE_KPROBE_MULTI LinkType = 8 + MAX_BPF_LINK_TYPE LinkType = 9 +) + +type MapType int32 + +const ( + BPF_MAP_TYPE_UNSPEC MapType = 0 + BPF_MAP_TYPE_HASH MapType = 1 + BPF_MAP_TYPE_ARRAY MapType = 2 + BPF_MAP_TYPE_PROG_ARRAY MapType = 3 + BPF_MAP_TYPE_PERF_EVENT_ARRAY MapType = 4 + BPF_MAP_TYPE_PERCPU_HASH MapType = 5 + BPF_MAP_TYPE_PERCPU_ARRAY MapType = 6 + BPF_MAP_TYPE_STACK_TRACE MapType = 7 + BPF_MAP_TYPE_CGROUP_ARRAY MapType = 8 + BPF_MAP_TYPE_LRU_HASH MapType = 9 + BPF_MAP_TYPE_LRU_PERCPU_HASH MapType = 10 + BPF_MAP_TYPE_LPM_TRIE MapType = 11 + BPF_MAP_TYPE_ARRAY_OF_MAPS MapType = 12 + BPF_MAP_TYPE_HASH_OF_MAPS MapType = 13 + BPF_MAP_TYPE_DEVMAP MapType = 14 + BPF_MAP_TYPE_SOCKMAP MapType = 15 + BPF_MAP_TYPE_CPUMAP MapType = 16 + BPF_MAP_TYPE_XSKMAP MapType = 17 + BPF_MAP_TYPE_SOCKHASH MapType = 18 + BPF_MAP_TYPE_CGROUP_STORAGE MapType = 19 + BPF_MAP_TYPE_REUSEPORT_SOCKARRAY MapType = 20 + BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE MapType = 21 + BPF_MAP_TYPE_QUEUE MapType = 22 + BPF_MAP_TYPE_STACK MapType = 23 + BPF_MAP_TYPE_SK_STORAGE MapType = 24 + BPF_MAP_TYPE_DEVMAP_HASH MapType = 25 + BPF_MAP_TYPE_STRUCT_OPS MapType = 26 + BPF_MAP_TYPE_RINGBUF MapType = 27 + BPF_MAP_TYPE_INODE_STORAGE MapType = 28 + BPF_MAP_TYPE_TASK_STORAGE MapType = 29 + BPF_MAP_TYPE_BLOOM_FILTER MapType = 30 +) + +type ProgType int32 + +const ( + BPF_PROG_TYPE_UNSPEC ProgType = 0 + BPF_PROG_TYPE_SOCKET_FILTER ProgType = 1 + BPF_PROG_TYPE_KPROBE ProgType = 2 + BPF_PROG_TYPE_SCHED_CLS ProgType = 3 + BPF_PROG_TYPE_SCHED_ACT ProgType = 4 + BPF_PROG_TYPE_TRACEPOINT ProgType = 5 + BPF_PROG_TYPE_XDP ProgType = 6 + BPF_PROG_TYPE_PERF_EVENT ProgType = 7 + BPF_PROG_TYPE_CGROUP_SKB ProgType = 8 + BPF_PROG_TYPE_CGROUP_SOCK ProgType = 9 + BPF_PROG_TYPE_LWT_IN ProgType = 10 + BPF_PROG_TYPE_LWT_OUT ProgType = 11 + BPF_PROG_TYPE_LWT_XMIT ProgType = 12 + BPF_PROG_TYPE_SOCK_OPS ProgType = 13 + BPF_PROG_TYPE_SK_SKB ProgType = 14 + BPF_PROG_TYPE_CGROUP_DEVICE ProgType = 15 + BPF_PROG_TYPE_SK_MSG ProgType = 16 + BPF_PROG_TYPE_RAW_TRACEPOINT ProgType = 17 + BPF_PROG_TYPE_CGROUP_SOCK_ADDR ProgType = 18 + BPF_PROG_TYPE_LWT_SEG6LOCAL ProgType = 19 + BPF_PROG_TYPE_LIRC_MODE2 ProgType = 20 + BPF_PROG_TYPE_SK_REUSEPORT ProgType = 21 + BPF_PROG_TYPE_FLOW_DISSECTOR ProgType = 22 + BPF_PROG_TYPE_CGROUP_SYSCTL ProgType = 23 + BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE ProgType = 24 + BPF_PROG_TYPE_CGROUP_SOCKOPT ProgType = 25 + BPF_PROG_TYPE_TRACING ProgType = 26 + BPF_PROG_TYPE_STRUCT_OPS ProgType = 27 + BPF_PROG_TYPE_EXT ProgType = 28 + BPF_PROG_TYPE_LSM ProgType = 29 + BPF_PROG_TYPE_SK_LOOKUP ProgType = 30 + BPF_PROG_TYPE_SYSCALL ProgType = 31 +) + +type RetCode int32 + +const ( + BPF_OK RetCode = 0 + BPF_DROP RetCode = 2 + BPF_REDIRECT RetCode = 7 + BPF_LWT_REROUTE RetCode = 128 +) + +type SkAction int32 + +const ( + SK_DROP SkAction = 0 + SK_PASS SkAction = 1 +) + +type StackBuildIdStatus int32 + +const ( + BPF_STACK_BUILD_ID_EMPTY StackBuildIdStatus = 0 + BPF_STACK_BUILD_ID_VALID StackBuildIdStatus = 1 + BPF_STACK_BUILD_ID_IP StackBuildIdStatus = 2 +) + +type StatsType int32 + +const ( + BPF_STATS_RUN_TIME StatsType = 0 +) + +type XdpAction int32 + +const ( + XDP_ABORTED XdpAction = 0 + XDP_DROP XdpAction = 1 + XDP_PASS XdpAction = 2 + XDP_TX XdpAction = 3 + XDP_REDIRECT XdpAction = 4 +) + +type BtfInfo struct { + Btf Pointer + BtfSize uint32 + Id BTFID + Name Pointer + NameLen uint32 + KernelBtf uint32 +} + +type FuncInfo struct { + InsnOff uint32 + TypeId uint32 +} + +type LineInfo struct { + InsnOff uint32 + FileNameOff uint32 + LineOff uint32 + LineCol uint32 +} + +type LinkInfo struct { + Type LinkType + Id LinkID + ProgId uint32 + _ [4]byte + Extra [16]uint8 +} + +type MapInfo struct { + Type uint32 + Id uint32 + KeySize uint32 + ValueSize uint32 + MaxEntries uint32 + MapFlags uint32 + Name ObjName + Ifindex uint32 + BtfVmlinuxValueTypeId uint32 + NetnsDev uint64 + NetnsIno uint64 + BtfId uint32 + BtfKeyTypeId uint32 + BtfValueTypeId uint32 + _ [4]byte + MapExtra uint64 +} + +type ProgInfo struct { + Type uint32 + Id uint32 + Tag [8]uint8 + JitedProgLen uint32 + XlatedProgLen uint32 + JitedProgInsns uint64 + XlatedProgInsns Pointer + LoadTime uint64 + CreatedByUid uint32 + NrMapIds uint32 + MapIds Pointer + Name ObjName + Ifindex uint32 + _ [4]byte /* unsupported bitfield */ + NetnsDev uint64 + NetnsIno uint64 + NrJitedKsyms uint32 + NrJitedFuncLens uint32 + JitedKsyms uint64 + JitedFuncLens uint64 + BtfId uint32 + FuncInfoRecSize uint32 + FuncInfo uint64 + NrFuncInfo uint32 + NrLineInfo uint32 + LineInfo uint64 + JitedLineInfo uint64 + NrJitedLineInfo uint32 + LineInfoRecSize uint32 + JitedLineInfoRecSize uint32 + NrProgTags uint32 + ProgTags uint64 + RunTimeNs uint64 + RunCnt uint64 + RecursionMisses uint64 + VerifiedInsns uint32 + _ [4]byte +} + +type SkLookup struct { + Cookie uint64 + Family uint32 + Protocol uint32 + RemoteIp4 [4]uint8 + RemoteIp6 [16]uint8 + RemotePort uint16 + _ [2]byte + LocalIp4 [4]uint8 + LocalIp6 [16]uint8 + LocalPort uint32 + IngressIfindex uint32 + _ [4]byte +} + +type XdpMd struct { + Data uint32 + DataEnd uint32 + DataMeta uint32 + IngressIfindex uint32 + RxQueueIndex uint32 + EgressIfindex uint32 +} + +type BtfGetFdByIdAttr struct{ Id uint32 } + +func BtfGetFdById(attr *BtfGetFdByIdAttr) (*FD, error) { + fd, err := BPF(BPF_BTF_GET_FD_BY_ID, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type BtfGetNextIdAttr struct { + Id BTFID + NextId BTFID +} + +func BtfGetNextId(attr *BtfGetNextIdAttr) error { + _, err := BPF(BPF_BTF_GET_NEXT_ID, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type BtfLoadAttr struct { + Btf Pointer + BtfLogBuf Pointer + BtfSize uint32 + BtfLogSize uint32 + BtfLogLevel uint32 + _ [4]byte +} + +func BtfLoad(attr *BtfLoadAttr) (*FD, error) { + fd, err := BPF(BPF_BTF_LOAD, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type EnableStatsAttr struct{ Type uint32 } + +func EnableStats(attr *EnableStatsAttr) (*FD, error) { + fd, err := BPF(BPF_ENABLE_STATS, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type IterCreateAttr struct { + LinkFd uint32 + Flags uint32 +} + +func IterCreate(attr *IterCreateAttr) (*FD, error) { + fd, err := BPF(BPF_ITER_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type LinkCreateAttr struct { + ProgFd uint32 + TargetFd uint32 + AttachType AttachType + Flags uint32 + TargetBtfId uint32 + _ [28]byte +} + +func LinkCreate(attr *LinkCreateAttr) (*FD, error) { + fd, err := BPF(BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type LinkCreateIterAttr struct { + ProgFd uint32 + TargetFd uint32 + AttachType AttachType + Flags uint32 + IterInfo Pointer + IterInfoLen uint32 + _ [20]byte +} + +func LinkCreateIter(attr *LinkCreateIterAttr) (*FD, error) { + fd, err := BPF(BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type LinkCreatePerfEventAttr struct { + ProgFd uint32 + TargetFd uint32 + AttachType AttachType + Flags uint32 + BpfCookie uint64 + _ [24]byte +} + +func LinkCreatePerfEvent(attr *LinkCreatePerfEventAttr) (*FD, error) { + fd, err := BPF(BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type LinkUpdateAttr struct { + LinkFd uint32 + NewProgFd uint32 + Flags uint32 + OldProgFd uint32 +} + +func LinkUpdate(attr *LinkUpdateAttr) error { + _, err := BPF(BPF_LINK_UPDATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type MapCreateAttr struct { + MapType MapType + KeySize uint32 + ValueSize uint32 + MaxEntries uint32 + MapFlags uint32 + InnerMapFd uint32 + NumaNode uint32 + MapName ObjName + MapIfindex uint32 + BtfFd uint32 + BtfKeyTypeId uint32 + BtfValueTypeId uint32 + BtfVmlinuxValueTypeId uint32 + MapExtra uint64 +} + +func MapCreate(attr *MapCreateAttr) (*FD, error) { + fd, err := BPF(BPF_MAP_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type MapDeleteBatchAttr struct { + InBatch Pointer + OutBatch Pointer + Keys Pointer + Values Pointer + Count uint32 + MapFd uint32 + ElemFlags uint64 + Flags uint64 +} + +func MapDeleteBatch(attr *MapDeleteBatchAttr) error { + _, err := BPF(BPF_MAP_DELETE_BATCH, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type MapDeleteElemAttr struct { + MapFd uint32 + _ [4]byte + Key Pointer + Value Pointer + Flags uint64 +} + +func MapDeleteElem(attr *MapDeleteElemAttr) error { + _, err := BPF(BPF_MAP_DELETE_ELEM, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type MapFreezeAttr struct{ MapFd uint32 } + +func MapFreeze(attr *MapFreezeAttr) error { + _, err := BPF(BPF_MAP_FREEZE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type MapGetFdByIdAttr struct{ Id uint32 } + +func MapGetFdById(attr *MapGetFdByIdAttr) (*FD, error) { + fd, err := BPF(BPF_MAP_GET_FD_BY_ID, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type MapGetNextIdAttr struct { + Id uint32 + NextId uint32 +} + +func MapGetNextId(attr *MapGetNextIdAttr) error { + _, err := BPF(BPF_MAP_GET_NEXT_ID, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type MapGetNextKeyAttr struct { + MapFd uint32 + _ [4]byte + Key Pointer + NextKey Pointer +} + +func MapGetNextKey(attr *MapGetNextKeyAttr) error { + _, err := BPF(BPF_MAP_GET_NEXT_KEY, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type MapLookupAndDeleteBatchAttr struct { + InBatch Pointer + OutBatch Pointer + Keys Pointer + Values Pointer + Count uint32 + MapFd uint32 + ElemFlags uint64 + Flags uint64 +} + +func MapLookupAndDeleteBatch(attr *MapLookupAndDeleteBatchAttr) error { + _, err := BPF(BPF_MAP_LOOKUP_AND_DELETE_BATCH, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type MapLookupAndDeleteElemAttr struct { + MapFd uint32 + _ [4]byte + Key Pointer + Value Pointer + Flags uint64 +} + +func MapLookupAndDeleteElem(attr *MapLookupAndDeleteElemAttr) error { + _, err := BPF(BPF_MAP_LOOKUP_AND_DELETE_ELEM, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type MapLookupBatchAttr struct { + InBatch Pointer + OutBatch Pointer + Keys Pointer + Values Pointer + Count uint32 + MapFd uint32 + ElemFlags uint64 + Flags uint64 +} + +func MapLookupBatch(attr *MapLookupBatchAttr) error { + _, err := BPF(BPF_MAP_LOOKUP_BATCH, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type MapLookupElemAttr struct { + MapFd uint32 + _ [4]byte + Key Pointer + Value Pointer + Flags uint64 +} + +func MapLookupElem(attr *MapLookupElemAttr) error { + _, err := BPF(BPF_MAP_LOOKUP_ELEM, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type MapUpdateBatchAttr struct { + InBatch Pointer + OutBatch Pointer + Keys Pointer + Values Pointer + Count uint32 + MapFd uint32 + ElemFlags uint64 + Flags uint64 +} + +func MapUpdateBatch(attr *MapUpdateBatchAttr) error { + _, err := BPF(BPF_MAP_UPDATE_BATCH, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type MapUpdateElemAttr struct { + MapFd uint32 + _ [4]byte + Key Pointer + Value Pointer + Flags uint64 +} + +func MapUpdateElem(attr *MapUpdateElemAttr) error { + _, err := BPF(BPF_MAP_UPDATE_ELEM, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type ObjGetAttr struct { + Pathname Pointer + BpfFd uint32 + FileFlags uint32 +} + +func ObjGet(attr *ObjGetAttr) (*FD, error) { + fd, err := BPF(BPF_OBJ_GET, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type ObjGetInfoByFdAttr struct { + BpfFd uint32 + InfoLen uint32 + Info Pointer +} + +func ObjGetInfoByFd(attr *ObjGetInfoByFdAttr) error { + _, err := BPF(BPF_OBJ_GET_INFO_BY_FD, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type ObjPinAttr struct { + Pathname Pointer + BpfFd uint32 + FileFlags uint32 +} + +func ObjPin(attr *ObjPinAttr) error { + _, err := BPF(BPF_OBJ_PIN, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type ProgAttachAttr struct { + TargetFd uint32 + AttachBpfFd uint32 + AttachType uint32 + AttachFlags uint32 + ReplaceBpfFd uint32 +} + +func ProgAttach(attr *ProgAttachAttr) error { + _, err := BPF(BPF_PROG_ATTACH, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type ProgBindMapAttr struct { + ProgFd uint32 + MapFd uint32 + Flags uint32 +} + +func ProgBindMap(attr *ProgBindMapAttr) error { + _, err := BPF(BPF_PROG_BIND_MAP, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type ProgDetachAttr struct { + TargetFd uint32 + AttachBpfFd uint32 + AttachType uint32 +} + +func ProgDetach(attr *ProgDetachAttr) error { + _, err := BPF(BPF_PROG_DETACH, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type ProgGetFdByIdAttr struct{ Id uint32 } + +func ProgGetFdById(attr *ProgGetFdByIdAttr) (*FD, error) { + fd, err := BPF(BPF_PROG_GET_FD_BY_ID, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type ProgGetNextIdAttr struct { + Id uint32 + NextId uint32 +} + +func ProgGetNextId(attr *ProgGetNextIdAttr) error { + _, err := BPF(BPF_PROG_GET_NEXT_ID, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type ProgLoadAttr struct { + ProgType ProgType + InsnCnt uint32 + Insns Pointer + License Pointer + LogLevel uint32 + LogSize uint32 + LogBuf Pointer + KernVersion uint32 + ProgFlags uint32 + ProgName ObjName + ProgIfindex uint32 + ExpectedAttachType AttachType + ProgBtfFd uint32 + FuncInfoRecSize uint32 + FuncInfo Pointer + FuncInfoCnt uint32 + LineInfoRecSize uint32 + LineInfo Pointer + LineInfoCnt uint32 + AttachBtfId uint32 + AttachProgFd uint32 + CoreReloCnt uint32 + FdArray Pointer + CoreRelos Pointer + CoreReloRecSize uint32 + _ [4]byte +} + +func ProgLoad(attr *ProgLoadAttr) (*FD, error) { + fd, err := BPF(BPF_PROG_LOAD, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type ProgRunAttr struct { + ProgFd uint32 + Retval uint32 + DataSizeIn uint32 + DataSizeOut uint32 + DataIn Pointer + DataOut Pointer + Repeat uint32 + Duration uint32 + CtxSizeIn uint32 + CtxSizeOut uint32 + CtxIn Pointer + CtxOut Pointer + Flags uint32 + Cpu uint32 + BatchSize uint32 + _ [4]byte +} + +func ProgRun(attr *ProgRunAttr) error { + _, err := BPF(BPF_PROG_TEST_RUN, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type RawTracepointOpenAttr struct { + Name Pointer + ProgFd uint32 + _ [4]byte +} + +func RawTracepointOpen(attr *RawTracepointOpenAttr) (*FD, error) { + fd, err := BPF(BPF_RAW_TRACEPOINT_OPEN, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type CgroupLinkInfo struct { + CgroupId uint64 + AttachType AttachType + _ [4]byte +} + +type IterLinkInfo struct { + TargetName Pointer + TargetNameLen uint32 +} + +type NetNsLinkInfo struct { + NetnsIno uint32 + AttachType AttachType +} + +type RawTracepointLinkInfo struct { + TpName Pointer + TpNameLen uint32 + _ [4]byte +} + +type TracingLinkInfo struct { + AttachType AttachType + TargetObjId uint32 + TargetBtfId uint32 +} + +type XDPLinkInfo struct{ Ifindex uint32 } diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/syscall.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/syscall.go deleted file mode 100644 index b75037bb9d68..000000000000 --- a/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/syscall.go +++ /dev/null @@ -1,304 +0,0 @@ -package internal - -import ( - "errors" - "fmt" - "path/filepath" - "runtime" - "syscall" - "unsafe" - - "github.com/cilium/ebpf/internal/unix" -) - -//go:generate stringer -output syscall_string.go -type=BPFCmd - -// BPFCmd identifies a subcommand of the bpf syscall. -type BPFCmd int - -// Well known BPF commands. -const ( - BPF_MAP_CREATE BPFCmd = iota - BPF_MAP_LOOKUP_ELEM - BPF_MAP_UPDATE_ELEM - BPF_MAP_DELETE_ELEM - BPF_MAP_GET_NEXT_KEY - BPF_PROG_LOAD - BPF_OBJ_PIN - BPF_OBJ_GET - BPF_PROG_ATTACH - BPF_PROG_DETACH - BPF_PROG_TEST_RUN - BPF_PROG_GET_NEXT_ID - BPF_MAP_GET_NEXT_ID - BPF_PROG_GET_FD_BY_ID - BPF_MAP_GET_FD_BY_ID - BPF_OBJ_GET_INFO_BY_FD - BPF_PROG_QUERY - BPF_RAW_TRACEPOINT_OPEN - BPF_BTF_LOAD - BPF_BTF_GET_FD_BY_ID - BPF_TASK_FD_QUERY - BPF_MAP_LOOKUP_AND_DELETE_ELEM - BPF_MAP_FREEZE - BPF_BTF_GET_NEXT_ID - BPF_MAP_LOOKUP_BATCH - BPF_MAP_LOOKUP_AND_DELETE_BATCH - BPF_MAP_UPDATE_BATCH - BPF_MAP_DELETE_BATCH - BPF_LINK_CREATE - BPF_LINK_UPDATE - BPF_LINK_GET_FD_BY_ID - BPF_LINK_GET_NEXT_ID - BPF_ENABLE_STATS - BPF_ITER_CREATE -) - -// BPF wraps SYS_BPF. -// -// Any pointers contained in attr must use the Pointer type from this package. -func BPF(cmd BPFCmd, attr unsafe.Pointer, size uintptr) (uintptr, error) { - r1, _, errNo := unix.Syscall(unix.SYS_BPF, uintptr(cmd), uintptr(attr), size) - runtime.KeepAlive(attr) - - var err error - if errNo != 0 { - err = wrappedErrno{errNo} - } - - return r1, err -} - -type BPFProgLoadAttr struct { - ProgType uint32 - InsCount uint32 - Instructions Pointer - License Pointer - LogLevel uint32 - LogSize uint32 - LogBuf Pointer - KernelVersion uint32 // since 4.1 2541517c32be - ProgFlags uint32 // since 4.11 e07b98d9bffe - ProgName BPFObjName // since 4.15 067cae47771c - ProgIfIndex uint32 // since 4.15 1f6f4cb7ba21 - ExpectedAttachType uint32 // since 4.17 5e43f899b03a - ProgBTFFd uint32 - FuncInfoRecSize uint32 - FuncInfo Pointer - FuncInfoCnt uint32 - LineInfoRecSize uint32 - LineInfo Pointer - LineInfoCnt uint32 - AttachBTFID uint32 - AttachProgFd uint32 -} - -// BPFProgLoad wraps BPF_PROG_LOAD. -func BPFProgLoad(attr *BPFProgLoadAttr) (*FD, error) { - for { - fd, err := BPF(BPF_PROG_LOAD, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) - // As of ~4.20 the verifier can be interrupted by a signal, - // and returns EAGAIN in that case. - if errors.Is(err, unix.EAGAIN) { - continue - } - - if err != nil { - return nil, err - } - - return NewFD(uint32(fd)), nil - } -} - -type BPFProgAttachAttr struct { - TargetFd uint32 - AttachBpfFd uint32 - AttachType uint32 - AttachFlags uint32 - ReplaceBpfFd uint32 -} - -func BPFProgAttach(attr *BPFProgAttachAttr) error { - _, err := BPF(BPF_PROG_ATTACH, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) - return err -} - -type BPFProgDetachAttr struct { - TargetFd uint32 - AttachBpfFd uint32 - AttachType uint32 -} - -func BPFProgDetach(attr *BPFProgDetachAttr) error { - _, err := BPF(BPF_PROG_DETACH, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) - return err -} - -type BPFEnableStatsAttr struct { - StatsType uint32 -} - -func BPFEnableStats(attr *BPFEnableStatsAttr) (*FD, error) { - ptr, err := BPF(BPF_ENABLE_STATS, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) - if err != nil { - return nil, fmt.Errorf("enable stats: %w", err) - } - return NewFD(uint32(ptr)), nil - -} - -type bpfObjAttr struct { - fileName Pointer - fd uint32 - fileFlags uint32 -} - -const bpfFSType = 0xcafe4a11 - -// BPFObjPin wraps BPF_OBJ_PIN. -func BPFObjPin(fileName string, fd *FD) error { - dirName := filepath.Dir(fileName) - var statfs unix.Statfs_t - if err := unix.Statfs(dirName, &statfs); err != nil { - return err - } - if uint64(statfs.Type) != bpfFSType { - return fmt.Errorf("%s is not on a bpf filesystem", fileName) - } - - value, err := fd.Value() - if err != nil { - return err - } - - attr := bpfObjAttr{ - fileName: NewStringPointer(fileName), - fd: value, - } - _, err = BPF(BPF_OBJ_PIN, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) - if err != nil { - return fmt.Errorf("pin object %s: %w", fileName, err) - } - return nil -} - -// BPFObjGet wraps BPF_OBJ_GET. -func BPFObjGet(fileName string, flags uint32) (*FD, error) { - attr := bpfObjAttr{ - fileName: NewStringPointer(fileName), - fileFlags: flags, - } - ptr, err := BPF(BPF_OBJ_GET, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) - if err != nil { - return nil, fmt.Errorf("get object %s: %w", fileName, err) - } - return NewFD(uint32(ptr)), nil -} - -type bpfObjGetInfoByFDAttr struct { - fd uint32 - infoLen uint32 - info Pointer -} - -// BPFObjGetInfoByFD wraps BPF_OBJ_GET_INFO_BY_FD. -// -// Available from 4.13. -func BPFObjGetInfoByFD(fd *FD, info unsafe.Pointer, size uintptr) error { - value, err := fd.Value() - if err != nil { - return err - } - - attr := bpfObjGetInfoByFDAttr{ - fd: value, - infoLen: uint32(size), - info: NewPointer(info), - } - _, err = BPF(BPF_OBJ_GET_INFO_BY_FD, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) - if err != nil { - return fmt.Errorf("fd %v: %w", fd, err) - } - return nil -} - -type bpfGetFDByIDAttr struct { - id uint32 - next uint32 -} - -// BPFObjGetInfoByFD wraps BPF_*_GET_FD_BY_ID. -// -// Available from 4.13. -func BPFObjGetFDByID(cmd BPFCmd, id uint32) (*FD, error) { - attr := bpfGetFDByIDAttr{ - id: id, - } - ptr, err := BPF(cmd, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) - return NewFD(uint32(ptr)), err -} - -// BPFObjName is a null-terminated string made up of -// 'A-Za-z0-9_' characters. -type BPFObjName [unix.BPF_OBJ_NAME_LEN]byte - -// NewBPFObjName truncates the result if it is too long. -func NewBPFObjName(name string) BPFObjName { - var result BPFObjName - copy(result[:unix.BPF_OBJ_NAME_LEN-1], name) - return result -} - -type BPFMapCreateAttr struct { - MapType uint32 - KeySize uint32 - ValueSize uint32 - MaxEntries uint32 - Flags uint32 - InnerMapFd uint32 // since 4.12 56f668dfe00d - NumaNode uint32 // since 4.14 96eabe7a40aa - MapName BPFObjName // since 4.15 ad5b177bd73f - MapIfIndex uint32 - BTFFd uint32 - BTFKeyTypeID uint32 - BTFValueTypeID uint32 -} - -func BPFMapCreate(attr *BPFMapCreateAttr) (*FD, error) { - fd, err := BPF(BPF_MAP_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) - if err != nil { - return nil, err - } - - return NewFD(uint32(fd)), nil -} - -// wrappedErrno wraps syscall.Errno to prevent direct comparisons with -// syscall.E* or unix.E* constants. -// -// You should never export an error of this type. -type wrappedErrno struct { - syscall.Errno -} - -func (we wrappedErrno) Unwrap() error { - return we.Errno -} - -type syscallError struct { - error - errno syscall.Errno -} - -func SyscallError(err error, errno syscall.Errno) error { - return &syscallError{err, errno} -} - -func (se *syscallError) Is(target error) bool { - return target == se.error -} - -func (se *syscallError) Unwrap() error { - return se.errno -} diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/syscall_string.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/syscall_string.go deleted file mode 100644 index 85df04779731..000000000000 --- a/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/syscall_string.go +++ /dev/null @@ -1,56 +0,0 @@ -// Code generated by "stringer -output syscall_string.go -type=BPFCmd"; DO NOT EDIT. - -package internal - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[BPF_MAP_CREATE-0] - _ = x[BPF_MAP_LOOKUP_ELEM-1] - _ = x[BPF_MAP_UPDATE_ELEM-2] - _ = x[BPF_MAP_DELETE_ELEM-3] - _ = x[BPF_MAP_GET_NEXT_KEY-4] - _ = x[BPF_PROG_LOAD-5] - _ = x[BPF_OBJ_PIN-6] - _ = x[BPF_OBJ_GET-7] - _ = x[BPF_PROG_ATTACH-8] - _ = x[BPF_PROG_DETACH-9] - _ = x[BPF_PROG_TEST_RUN-10] - _ = x[BPF_PROG_GET_NEXT_ID-11] - _ = x[BPF_MAP_GET_NEXT_ID-12] - _ = x[BPF_PROG_GET_FD_BY_ID-13] - _ = x[BPF_MAP_GET_FD_BY_ID-14] - _ = x[BPF_OBJ_GET_INFO_BY_FD-15] - _ = x[BPF_PROG_QUERY-16] - _ = x[BPF_RAW_TRACEPOINT_OPEN-17] - _ = x[BPF_BTF_LOAD-18] - _ = x[BPF_BTF_GET_FD_BY_ID-19] - _ = x[BPF_TASK_FD_QUERY-20] - _ = x[BPF_MAP_LOOKUP_AND_DELETE_ELEM-21] - _ = x[BPF_MAP_FREEZE-22] - _ = x[BPF_BTF_GET_NEXT_ID-23] - _ = x[BPF_MAP_LOOKUP_BATCH-24] - _ = x[BPF_MAP_LOOKUP_AND_DELETE_BATCH-25] - _ = x[BPF_MAP_UPDATE_BATCH-26] - _ = x[BPF_MAP_DELETE_BATCH-27] - _ = x[BPF_LINK_CREATE-28] - _ = x[BPF_LINK_UPDATE-29] - _ = x[BPF_LINK_GET_FD_BY_ID-30] - _ = x[BPF_LINK_GET_NEXT_ID-31] - _ = x[BPF_ENABLE_STATS-32] - _ = x[BPF_ITER_CREATE-33] -} - -const _BPFCmd_name = "BPF_MAP_CREATEBPF_MAP_LOOKUP_ELEMBPF_MAP_UPDATE_ELEMBPF_MAP_DELETE_ELEMBPF_MAP_GET_NEXT_KEYBPF_PROG_LOADBPF_OBJ_PINBPF_OBJ_GETBPF_PROG_ATTACHBPF_PROG_DETACHBPF_PROG_TEST_RUNBPF_PROG_GET_NEXT_IDBPF_MAP_GET_NEXT_IDBPF_PROG_GET_FD_BY_IDBPF_MAP_GET_FD_BY_IDBPF_OBJ_GET_INFO_BY_FDBPF_PROG_QUERYBPF_RAW_TRACEPOINT_OPENBPF_BTF_LOADBPF_BTF_GET_FD_BY_IDBPF_TASK_FD_QUERYBPF_MAP_LOOKUP_AND_DELETE_ELEMBPF_MAP_FREEZEBPF_BTF_GET_NEXT_IDBPF_MAP_LOOKUP_BATCHBPF_MAP_LOOKUP_AND_DELETE_BATCHBPF_MAP_UPDATE_BATCHBPF_MAP_DELETE_BATCHBPF_LINK_CREATEBPF_LINK_UPDATEBPF_LINK_GET_FD_BY_IDBPF_LINK_GET_NEXT_IDBPF_ENABLE_STATSBPF_ITER_CREATE" - -var _BPFCmd_index = [...]uint16{0, 14, 33, 52, 71, 91, 104, 115, 126, 141, 156, 173, 193, 212, 233, 253, 275, 289, 312, 324, 344, 361, 391, 405, 424, 444, 475, 495, 515, 530, 545, 566, 586, 602, 617} - -func (i BPFCmd) String() string { - if i < 0 || i >= BPFCmd(len(_BPFCmd_index)-1) { - return "BPFCmd(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _BPFCmd_name[_BPFCmd_index[i]:_BPFCmd_index[i+1]] -} diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/unix/types_linux.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/unix/types_linux.go index 9aa70fa78c2a..db4a1f5bf9e7 100644 --- a/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/unix/types_linux.go +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/unix/types_linux.go @@ -4,7 +4,6 @@ package unix import ( - "bytes" "syscall" linux "golang.org/x/sys/unix" @@ -23,6 +22,8 @@ const ( ENODEV = linux.ENODEV EBADF = linux.EBADF E2BIG = linux.E2BIG + EFAULT = linux.EFAULT + EACCES = linux.EACCES // ENOTSUPP is not the same as ENOTSUP or EOPNOTSUP ENOTSUPP = syscall.Errno(0x20c) @@ -66,11 +67,16 @@ const ( PERF_RECORD_SAMPLE = linux.PERF_RECORD_SAMPLE AT_FDCWD = linux.AT_FDCWD RENAME_NOREPLACE = linux.RENAME_NOREPLACE + SO_ATTACH_BPF = linux.SO_ATTACH_BPF + SO_DETACH_BPF = linux.SO_DETACH_BPF + SOL_SOCKET = linux.SOL_SOCKET ) // Statfs_t is a wrapper type Statfs_t = linux.Statfs_t +type Stat_t = linux.Stat_t + // Rlimit is a wrapper type Rlimit = linux.Rlimit @@ -191,18 +197,14 @@ func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags return linux.Renameat2(olddirfd, oldpath, newdirfd, newpath, flags) } -func KernelRelease() (string, error) { - var uname Utsname - err := Uname(&uname) - if err != nil { - return "", err - } +func Prlimit(pid, resource int, new, old *Rlimit) error { + return linux.Prlimit(pid, resource, new, old) +} - end := bytes.IndexByte(uname.Release[:], 0) - release := string(uname.Release[:end]) - return release, nil +func Open(path string, mode int, perm uint32) (int, error) { + return linux.Open(path, mode, perm) } -func Prlimit(pid, resource int, new, old *Rlimit) error { - return linux.Prlimit(pid, resource, new, old) +func Fstat(fd int, stat *Stat_t) error { + return linux.Fstat(fd, stat) } diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/unix/types_other.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/unix/types_other.go index 4f50d896ebb8..133c267dbc3d 100644 --- a/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/unix/types_other.go +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/unix/types_other.go @@ -23,6 +23,8 @@ const ( ENODEV = syscall.ENODEV EBADF = syscall.Errno(0) E2BIG = syscall.Errno(0) + EFAULT = syscall.EFAULT + EACCES = syscall.Errno(0) // ENOTSUPP is not the same as ENOTSUP or EOPNOTSUP ENOTSUPP = syscall.Errno(0x20c) @@ -67,6 +69,9 @@ const ( PERF_RECORD_SAMPLE = 9 AT_FDCWD = -0x2 RENAME_NOREPLACE = 0x1 + SO_ATTACH_BPF = 0x32 + SO_DETACH_BPF = 0x1b + SOL_SOCKET = 0x1 ) // Statfs_t is a wrapper @@ -85,6 +90,8 @@ type Statfs_t struct { Spare [4]int64 } +type Stat_t struct{} + // Rlimit is a wrapper type Rlimit struct { Cur uint64 @@ -258,10 +265,14 @@ func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags return errNonLinux } -func KernelRelease() (string, error) { - return "", errNonLinux +func Prlimit(pid, resource int, new, old *Rlimit) error { + return errNonLinux } -func Prlimit(pid, resource int, new, old *Rlimit) error { +func Open(path string, mode int, perm uint32) (int, error) { + return -1, errNonLinux +} + +func Fstat(fd int, stat *Stat_t) error { return errNonLinux } diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/vdso.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/vdso.go new file mode 100644 index 000000000000..ae4821de20c4 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/vdso.go @@ -0,0 +1,150 @@ +package internal + +import ( + "debug/elf" + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "os" + + "github.com/cilium/ebpf/internal/unix" +) + +var ( + errAuxvNoVDSO = errors.New("no vdso address found in auxv") +) + +// vdsoVersion returns the LINUX_VERSION_CODE embedded in the vDSO library +// linked into the current process image. +func vdsoVersion() (uint32, error) { + // Read data from the auxiliary vector, which is normally passed directly + // to the process. Go does not expose that data, so we must read it from procfs. + // https://man7.org/linux/man-pages/man3/getauxval.3.html + av, err := os.Open("/proc/self/auxv") + if err != nil { + return 0, fmt.Errorf("opening auxv: %w", err) + } + defer av.Close() + + vdsoAddr, err := vdsoMemoryAddress(av) + if err != nil { + return 0, fmt.Errorf("finding vDSO memory address: %w", err) + } + + // Use /proc/self/mem rather than unsafe.Pointer tricks. + mem, err := os.Open("/proc/self/mem") + if err != nil { + return 0, fmt.Errorf("opening mem: %w", err) + } + defer mem.Close() + + // Open ELF at provided memory address, as offset into /proc/self/mem. + c, err := vdsoLinuxVersionCode(io.NewSectionReader(mem, int64(vdsoAddr), math.MaxInt64)) + if err != nil { + return 0, fmt.Errorf("reading linux version code: %w", err) + } + + return c, nil +} + +// vdsoMemoryAddress returns the memory address of the vDSO library +// linked into the current process image. r is an io.Reader into an auxv blob. +func vdsoMemoryAddress(r io.Reader) (uint64, error) { + const ( + _AT_NULL = 0 // End of vector + _AT_SYSINFO_EHDR = 33 // Offset to vDSO blob in process image + ) + + // Loop through all tag/value pairs in auxv until we find `AT_SYSINFO_EHDR`, + // the address of a page containing the virtual Dynamic Shared Object (vDSO). + aux := struct{ Tag, Val uint64 }{} + for { + if err := binary.Read(r, NativeEndian, &aux); err != nil { + return 0, fmt.Errorf("reading auxv entry: %w", err) + } + + switch aux.Tag { + case _AT_SYSINFO_EHDR: + if aux.Val != 0 { + return aux.Val, nil + } + return 0, fmt.Errorf("invalid vDSO address in auxv") + // _AT_NULL is always the last tag/val pair in the aux vector + // and can be treated like EOF. + case _AT_NULL: + return 0, errAuxvNoVDSO + } + } +} + +// format described at https://www.man7.org/linux/man-pages/man5/elf.5.html in section 'Notes (Nhdr)' +type elfNoteHeader struct { + NameSize int32 + DescSize int32 + Type int32 +} + +// vdsoLinuxVersionCode returns the LINUX_VERSION_CODE embedded in +// the ELF notes section of the binary provided by the reader. +func vdsoLinuxVersionCode(r io.ReaderAt) (uint32, error) { + hdr, err := NewSafeELFFile(r) + if err != nil { + return 0, fmt.Errorf("reading vDSO ELF: %w", err) + } + + sections := hdr.SectionsByType(elf.SHT_NOTE) + if len(sections) == 0 { + return 0, fmt.Errorf("no note section found in vDSO ELF") + } + + for _, sec := range sections { + sr := sec.Open() + var n elfNoteHeader + + // Read notes until we find one named 'Linux'. + for { + if err := binary.Read(sr, hdr.ByteOrder, &n); err != nil { + if errors.Is(err, io.EOF) { + // We looked at all the notes in this section + break + } + return 0, fmt.Errorf("reading note header: %w", err) + } + + // If a note name is defined, it follows the note header. + var name string + if n.NameSize > 0 { + // Read the note name, aligned to 4 bytes. + buf := make([]byte, Align(int(n.NameSize), 4)) + if err := binary.Read(sr, hdr.ByteOrder, &buf); err != nil { + return 0, fmt.Errorf("reading note name: %w", err) + } + + // Read nul-terminated string. + name = unix.ByteSliceToString(buf[:n.NameSize]) + } + + // If a note descriptor is defined, it follows the name. + // It is possible for a note to have a descriptor but not a name. + if n.DescSize > 0 { + // LINUX_VERSION_CODE is a uint32 value. + if name == "Linux" && n.DescSize == 4 && n.Type == 0 { + var version uint32 + if err := binary.Read(sr, hdr.ByteOrder, &version); err != nil { + return 0, fmt.Errorf("reading note descriptor: %w", err) + } + return version, nil + } + + // Discard the note descriptor if it exists but we're not interested in it. + if _, err := io.CopyN(io.Discard, sr, int64(Align(int(n.DescSize), 4))); err != nil { + return 0, err + } + } + } + } + + return 0, fmt.Errorf("no Linux note in ELF") +} diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/version.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/version.go index 4915e583767a..370e01e4447c 100644 --- a/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/version.go +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/internal/version.go @@ -2,8 +2,6 @@ package internal import ( "fmt" - "os" - "regexp" "sync" "github.com/cilium/ebpf/internal/unix" @@ -18,12 +16,6 @@ const ( ) var ( - // Match between one and three decimals separated by dots, with the last - // segment (patch level) being optional on some kernels. - // The x.y.z string must appear at the start of a string or right after - // whitespace to prevent sequences like 'x.y.z-a.b.c' from matching 'a.b.c'. - rgxKernelVersion = regexp.MustCompile(`(?:\A|\s)\d{1,3}\.\d{1,3}(?:\.\d{1,3})?`) - kernelVersion = struct { once sync.Once version Version @@ -46,6 +38,15 @@ func NewVersion(ver string) (Version, error) { return Version{major, minor, patch}, nil } +// NewVersionFromCode creates a version from a LINUX_VERSION_CODE. +func NewVersionFromCode(code uint32) Version { + return Version{ + uint16(uint8(code >> 16)), + uint16(uint8(code >> 8)), + uint16(uint8(code)), + } +} + func (v Version) String() string { if v[2] == 0 { return fmt.Sprintf("v%d.%d", v[0], v[1]) @@ -98,66 +99,24 @@ func KernelVersion() (Version, error) { return kernelVersion.version, nil } -// detectKernelVersion returns the version of the running kernel. It scans the -// following sources in order: /proc/version_signature, uname -v, uname -r. -// In each of those locations, the last-appearing x.y(.z) value is selected -// for parsing. The first location that yields a usable version number is -// returned. +// detectKernelVersion returns the version of the running kernel. func detectKernelVersion() (Version, error) { - - // Try reading /proc/version_signature for Ubuntu compatibility. - // Example format: Ubuntu 4.15.0-91.92-generic 4.15.18 - // This method exists in the kernel itself, see d18acd15c - // ("perf tools: Fix kernel version error in ubuntu"). - if pvs, err := os.ReadFile("/proc/version_signature"); err == nil { - // If /proc/version_signature exists, failing to parse it is an error. - // It only exists on Ubuntu, where the real patch level is not obtainable - // through any other method. - v, err := findKernelVersion(string(pvs)) - if err != nil { - return Version{}, err - } - return v, nil - } - - var uname unix.Utsname - if err := unix.Uname(&uname); err != nil { - return Version{}, fmt.Errorf("calling uname: %w", err) - } - - // Debian puts the version including the patch level in uname.Version. - // It is not an error if there's no version number in uname.Version, - // as most distributions don't use it. Parsing can continue on uname.Release. - // Example format: #1 SMP Debian 4.19.37-5+deb10u2 (2019-08-08) - if v, err := findKernelVersion(unix.ByteSliceToString(uname.Version[:])); err == nil { - return v, nil - } - - // Most other distributions have the full kernel version including patch - // level in uname.Release. - // Example format: 4.19.0-5-amd64, 5.5.10-arch1-1 - v, err := findKernelVersion(unix.ByteSliceToString(uname.Release[:])) + vc, err := vdsoVersion() if err != nil { return Version{}, err } - - return v, nil + return NewVersionFromCode(vc), nil } -// findKernelVersion matches s against rgxKernelVersion and parses the result -// into a Version. If s contains multiple matches, the last entry is selected. -func findKernelVersion(s string) (Version, error) { - m := rgxKernelVersion.FindAllString(s, -1) - if m == nil { - return Version{}, fmt.Errorf("no kernel version in string: %s", s) - } - // Pick the last match of the string in case there are multiple. - s = m[len(m)-1] - - v, err := NewVersion(s) - if err != nil { - return Version{}, fmt.Errorf("parsing version string %s: %w", s, err) +// KernelRelease returns the release string of the running kernel. +// Its format depends on the Linux distribution and corresponds to directory +// names in /lib/modules by convention. Some examples are 5.15.17-1-lts and +// 4.19.0-16-amd64. +func KernelRelease() (string, error) { + var uname unix.Utsname + if err := unix.Uname(&uname); err != nil { + return "", fmt.Errorf("uname failed: %w", err) } - return v, nil + return unix.ByteSliceToString(uname.Release[:]), nil } diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/link/cgroup.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/link/cgroup.go index 5540bb068cd2..003b0638e89d 100644 --- a/cluster-autoscaler/vendor/github.com/cilium/ebpf/link/cgroup.go +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/link/cgroup.go @@ -56,16 +56,6 @@ func AttachCgroup(opts CgroupOptions) (Link, error) { return cg, nil } -// LoadPinnedCgroup loads a pinned cgroup from a bpffs. -func LoadPinnedCgroup(fileName string, opts *ebpf.LoadPinOptions) (Link, error) { - link, err := LoadPinnedRawLink(fileName, CgroupType, opts) - if err != nil { - return nil, err - } - - return &linkCgroup{*link}, nil -} - type progAttachCgroup struct { cgroup *os.File current *ebpf.Program @@ -151,6 +141,10 @@ func (cg *progAttachCgroup) Unpin() error { return fmt.Errorf("can't pin cgroup: %w", ErrNotSupported) } +func (cg *progAttachCgroup) Info() (*Info, error) { + return nil, fmt.Errorf("can't get cgroup info: %w", ErrNotSupported) +} + type linkCgroup struct { RawLink } diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/link/freplace.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/link/freplace.go deleted file mode 100644 index a698e1a9d30c..000000000000 --- a/cluster-autoscaler/vendor/github.com/cilium/ebpf/link/freplace.go +++ /dev/null @@ -1,88 +0,0 @@ -package link - -import ( - "fmt" - - "github.com/cilium/ebpf" - "github.com/cilium/ebpf/internal/btf" -) - -type FreplaceLink struct { - RawLink -} - -// AttachFreplace attaches the given eBPF program to the function it replaces. -// -// The program and name can either be provided at link time, or can be provided -// at program load time. If they were provided at load time, they should be nil -// and empty respectively here, as they will be ignored by the kernel. -// Examples: -// -// AttachFreplace(dispatcher, "function", replacement) -// AttachFreplace(nil, "", replacement) -func AttachFreplace(targetProg *ebpf.Program, name string, prog *ebpf.Program) (*FreplaceLink, error) { - if (name == "") != (targetProg == nil) { - return nil, fmt.Errorf("must provide both or neither of name and targetProg: %w", errInvalidInput) - } - if prog == nil { - return nil, fmt.Errorf("prog cannot be nil: %w", errInvalidInput) - } - if prog.Type() != ebpf.Extension { - return nil, fmt.Errorf("eBPF program type %s is not an Extension: %w", prog.Type(), errInvalidInput) - } - - var ( - target int - typeID btf.TypeID - ) - if targetProg != nil { - info, err := targetProg.Info() - if err != nil { - return nil, err - } - btfID, ok := info.BTFID() - if !ok { - return nil, fmt.Errorf("could not get BTF ID for program %s: %w", info.Name, errInvalidInput) - } - btfHandle, err := btf.NewHandleFromID(btfID) - if err != nil { - return nil, err - } - defer btfHandle.Close() - - var function *btf.Func - if err := btfHandle.Spec().FindType(name, &function); err != nil { - return nil, err - } - - target = targetProg.FD() - typeID = function.ID() - } - - link, err := AttachRawLink(RawLinkOptions{ - Target: target, - Program: prog, - Attach: ebpf.AttachNone, - BTF: typeID, - }) - if err != nil { - return nil, err - } - - return &FreplaceLink{*link}, nil -} - -// Update implements the Link interface. -func (f *FreplaceLink) Update(new *ebpf.Program) error { - return fmt.Errorf("freplace update: %w", ErrNotSupported) -} - -// LoadPinnedFreplace loads a pinned iterator from a bpffs. -func LoadPinnedFreplace(fileName string, opts *ebpf.LoadPinOptions) (*FreplaceLink, error) { - link, err := LoadPinnedRawLink(fileName, TracingType, opts) - if err != nil { - return nil, err - } - - return &FreplaceLink{*link}, err -} diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/link/iter.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/link/iter.go index 654d34ef8482..d2b32ef331cd 100644 --- a/cluster-autoscaler/vendor/github.com/cilium/ebpf/link/iter.go +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/link/iter.go @@ -6,7 +6,7 @@ import ( "unsafe" "github.com/cilium/ebpf" - "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" ) type IterOptions struct { @@ -31,26 +31,26 @@ func AttachIter(opts IterOptions) (*Iter, error) { progFd := opts.Program.FD() if progFd < 0 { - return nil, fmt.Errorf("invalid program: %s", internal.ErrClosedFd) + return nil, fmt.Errorf("invalid program: %s", sys.ErrClosedFd) } var info bpfIterLinkInfoMap if opts.Map != nil { mapFd := opts.Map.FD() if mapFd < 0 { - return nil, fmt.Errorf("invalid map: %w", internal.ErrClosedFd) + return nil, fmt.Errorf("invalid map: %w", sys.ErrClosedFd) } info.map_fd = uint32(mapFd) } - attr := bpfLinkCreateIterAttr{ - prog_fd: uint32(progFd), - attach_type: ebpf.AttachTraceIter, - iter_info: internal.NewPointer(unsafe.Pointer(&info)), - iter_info_len: uint32(unsafe.Sizeof(info)), + attr := sys.LinkCreateIterAttr{ + ProgFd: uint32(progFd), + AttachType: sys.AttachType(ebpf.AttachTraceIter), + IterInfo: sys.NewPointer(unsafe.Pointer(&info)), + IterInfoLen: uint32(unsafe.Sizeof(info)), } - fd, err := bpfLinkCreateIter(&attr) + fd, err := sys.LinkCreateIter(&attr) if err != nil { return nil, fmt.Errorf("can't link iterator: %w", err) } @@ -58,16 +58,6 @@ func AttachIter(opts IterOptions) (*Iter, error) { return &Iter{RawLink{fd, ""}}, err } -// LoadPinnedIter loads a pinned iterator from a bpffs. -func LoadPinnedIter(fileName string, opts *ebpf.LoadPinOptions) (*Iter, error) { - link, err := LoadPinnedRawLink(fileName, IterType, opts) - if err != nil { - return nil, err - } - - return &Iter{*link}, err -} - // Iter represents an attached bpf_iter. type Iter struct { RawLink @@ -77,16 +67,11 @@ type Iter struct { // // Reading from the returned reader triggers the BPF program. func (it *Iter) Open() (io.ReadCloser, error) { - linkFd, err := it.fd.Value() - if err != nil { - return nil, err - } - - attr := &bpfIterCreateAttr{ - linkFd: linkFd, + attr := &sys.IterCreateAttr{ + LinkFd: it.fd.Uint(), } - fd, err := bpfIterCreate(attr) + fd, err := sys.IterCreate(attr) if err != nil { return nil, fmt.Errorf("can't create iterator: %w", err) } diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/link/kprobe.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/link/kprobe.go index b6577b5a9925..fdf622a0c07f 100644 --- a/cluster-autoscaler/vendor/github.com/cilium/ebpf/link/kprobe.go +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/link/kprobe.go @@ -8,11 +8,13 @@ import ( "os" "path/filepath" "runtime" + "strings" "sync" + "syscall" "unsafe" "github.com/cilium/ebpf" - "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" "github.com/cilium/ebpf/internal/unix" ) @@ -28,6 +30,27 @@ var ( type probeType uint8 +type probeArgs struct { + symbol, group, path string + offset, refCtrOffset, cookie uint64 + pid int + ret bool +} + +// KprobeOptions defines additional parameters that will be used +// when loading Kprobes. +type KprobeOptions struct { + // Arbitrary value that can be fetched from an eBPF program + // via `bpf_get_attach_cookie()`. + // + // Needs kernel 5.15+. + Cookie uint64 + // Offset of the kprobe relative to the traced symbol. + // Can be used to insert kprobes at arbitrary offsets in kernel functions, + // e.g. in places where functions have been inlined. + Offset uint64 +} + const ( kprobeType probeType = iota uprobeType @@ -71,70 +94,109 @@ func (pt probeType) RetprobeBit() (uint64, error) { // given kernel symbol starts executing. See /proc/kallsyms for available // symbols. For example, printk(): // -// kp, err := Kprobe("printk", prog) +// kp, err := Kprobe("printk", prog, nil) // // Losing the reference to the resulting Link (kp) will close the Kprobe // and prevent further execution of prog. The Link must be Closed during // program shutdown to avoid leaking system resources. -func Kprobe(symbol string, prog *ebpf.Program) (Link, error) { - k, err := kprobe(symbol, prog, false) +func Kprobe(symbol string, prog *ebpf.Program, opts *KprobeOptions) (Link, error) { + k, err := kprobe(symbol, prog, opts, false) if err != nil { return nil, err } - err = k.attach(prog) + lnk, err := attachPerfEvent(k, prog) if err != nil { k.Close() return nil, err } - return k, nil + return lnk, nil } // Kretprobe attaches the given eBPF program to a perf event that fires right // before the given kernel symbol exits, with the function stack left intact. // See /proc/kallsyms for available symbols. For example, printk(): // -// kp, err := Kretprobe("printk", prog) +// kp, err := Kretprobe("printk", prog, nil) // // Losing the reference to the resulting Link (kp) will close the Kretprobe // and prevent further execution of prog. The Link must be Closed during // program shutdown to avoid leaking system resources. -func Kretprobe(symbol string, prog *ebpf.Program) (Link, error) { - k, err := kprobe(symbol, prog, true) +func Kretprobe(symbol string, prog *ebpf.Program, opts *KprobeOptions) (Link, error) { + k, err := kprobe(symbol, prog, opts, true) if err != nil { return nil, err } - err = k.attach(prog) + lnk, err := attachPerfEvent(k, prog) if err != nil { k.Close() return nil, err } - return k, nil + return lnk, nil +} + +// isValidKprobeSymbol implements the equivalent of a regex match +// against "^[a-zA-Z_][0-9a-zA-Z_.]*$". +func isValidKprobeSymbol(s string) bool { + if len(s) < 1 { + return false + } + + for i, c := range []byte(s) { + switch { + case c >= 'a' && c <= 'z': + case c >= 'A' && c <= 'Z': + case c == '_': + case i > 0 && c >= '0' && c <= '9': + + // Allow `.` in symbol name. GCC-compiled kernel may change symbol name + // to have a `.isra.$n` suffix, like `udp_send_skb.isra.52`. + // See: https://gcc.gnu.org/gcc-10/changes.html + case i > 0 && c == '.': + + default: + return false + } + } + + return true } // kprobe opens a perf event on the given symbol and attaches prog to it. // If ret is true, create a kretprobe. -func kprobe(symbol string, prog *ebpf.Program, ret bool) (*perfEvent, error) { +func kprobe(symbol string, prog *ebpf.Program, opts *KprobeOptions, ret bool) (*perfEvent, error) { if symbol == "" { return nil, fmt.Errorf("symbol name cannot be empty: %w", errInvalidInput) } if prog == nil { return nil, fmt.Errorf("prog cannot be nil: %w", errInvalidInput) } - if !rgxTraceEvent.MatchString(symbol) { - return nil, fmt.Errorf("symbol '%s' must be alphanumeric or underscore: %w", symbol, errInvalidInput) + if !isValidKprobeSymbol(symbol) { + return nil, fmt.Errorf("symbol '%s' must be a valid symbol in /proc/kallsyms: %w", symbol, errInvalidInput) } if prog.Type() != ebpf.Kprobe { return nil, fmt.Errorf("eBPF program type %s is not a Kprobe: %w", prog.Type(), errInvalidInput) } + args := probeArgs{ + pid: perfAllThreads, + symbol: symbol, + ret: ret, + } + + if opts != nil { + args.cookie = opts.Cookie + args.offset = opts.Offset + } + // Use kprobe PMU if the kernel has it available. - tp, err := pmuKprobe(platformPrefix(symbol), ret) + tp, err := pmuKprobe(args) if errors.Is(err, os.ErrNotExist) { - tp, err = pmuKprobe(symbol, ret) + args.symbol = platformPrefix(symbol) + tp, err = pmuKprobe(args) } if err == nil { return tp, nil @@ -144,9 +206,11 @@ func kprobe(symbol string, prog *ebpf.Program, ret bool) (*perfEvent, error) { } // Use tracefs if kprobe PMU is missing. - tp, err = tracefsKprobe(platformPrefix(symbol), ret) + args.symbol = symbol + tp, err = tracefsKprobe(args) if errors.Is(err, os.ErrNotExist) { - tp, err = tracefsKprobe(symbol, ret) + args.symbol = platformPrefix(symbol) + tp, err = tracefsKprobe(args) } if err != nil { return nil, fmt.Errorf("creating trace event '%s' in tracefs: %w", symbol, err) @@ -157,8 +221,8 @@ func kprobe(symbol string, prog *ebpf.Program, ret bool) (*perfEvent, error) { // pmuKprobe opens a perf event based on the kprobe PMU. // Returns os.ErrNotExist if the given symbol does not exist in the kernel. -func pmuKprobe(symbol string, ret bool) (*perfEvent, error) { - return pmuProbe(kprobeType, symbol, "", 0, perfAllThreads, ret) +func pmuKprobe(args probeArgs) (*perfEvent, error) { + return pmuProbe(kprobeType, args) } // pmuProbe opens a perf event based on a Performance Monitoring Unit. @@ -168,7 +232,7 @@ func pmuKprobe(symbol string, ret bool) (*perfEvent, error) { // 33ea4b24277b "perf/core: Implement the 'perf_uprobe' PMU" // // Returns ErrNotSupported if the kernel doesn't support perf_[k,u]probe PMU -func pmuProbe(typ probeType, symbol, path string, offset uint64, pid int, ret bool) (*perfEvent, error) { +func pmuProbe(typ probeType, args probeArgs) (*perfEvent, error) { // Getting the PMU type will fail if the kernel doesn't support // the perf_[k,u]probe PMU. et, err := getPMUEventType(typ) @@ -177,7 +241,7 @@ func pmuProbe(typ probeType, symbol, path string, offset uint64, pid int, ret bo } var config uint64 - if ret { + if args.ret { bit, err := typ.RetprobeBit() if err != nil { return nil, err @@ -192,22 +256,30 @@ func pmuProbe(typ probeType, symbol, path string, offset uint64, pid int, ret bo switch typ { case kprobeType: // Create a pointer to a NUL-terminated string for the kernel. - sp, err = unsafeStringPtr(symbol) + sp, err = unsafeStringPtr(args.symbol) if err != nil { return nil, err } attr = unix.PerfEventAttr{ + // The minimum size required for PMU kprobes is PERF_ATTR_SIZE_VER1, + // since it added the config2 (Ext2) field. Use Ext2 as probe_offset. + Size: unix.PERF_ATTR_SIZE_VER1, Type: uint32(et), // PMU event type read from sysfs Ext1: uint64(uintptr(sp)), // Kernel symbol to trace + Ext2: args.offset, // Kernel symbol offset Config: config, // Retprobe flag } case uprobeType: - sp, err = unsafeStringPtr(path) + sp, err = unsafeStringPtr(args.path) if err != nil { return nil, err } + if args.refCtrOffset != 0 { + config |= args.refCtrOffset << uprobeRefCtrOffsetShift + } + attr = unix.PerfEventAttr{ // The minimum size required for PMU uprobes is PERF_ATTR_SIZE_VER1, // since it added the config2 (Ext2) field. The Size field controls the @@ -216,23 +288,34 @@ func pmuProbe(typ probeType, symbol, path string, offset uint64, pid int, ret bo Size: unix.PERF_ATTR_SIZE_VER1, Type: uint32(et), // PMU event type read from sysfs Ext1: uint64(uintptr(sp)), // Uprobe path - Ext2: offset, // Uprobe offset - Config: config, // Retprobe flag + Ext2: args.offset, // Uprobe offset + Config: config, // RefCtrOffset, Retprobe flag } } - fd, err := unix.PerfEventOpen(&attr, pid, 0, -1, unix.PERF_FLAG_FD_CLOEXEC) + rawFd, err := unix.PerfEventOpen(&attr, args.pid, 0, -1, unix.PERF_FLAG_FD_CLOEXEC) + // On some old kernels, kprobe PMU doesn't allow `.` in symbol names and + // return -EINVAL. Return ErrNotSupported to allow falling back to tracefs. + // https://github.com/torvalds/linux/blob/94710cac0ef4/kernel/trace/trace_kprobe.c#L340-L343 + if errors.Is(err, unix.EINVAL) && strings.Contains(args.symbol, ".") { + return nil, fmt.Errorf("symbol '%s+%#x': older kernels don't accept dots: %w", args.symbol, args.offset, ErrNotSupported) + } // Since commit 97c753e62e6c, ENOENT is correctly returned instead of EINVAL // when trying to create a kretprobe for a missing symbol. Make sure ENOENT // is returned to the caller. if errors.Is(err, os.ErrNotExist) || errors.Is(err, unix.EINVAL) { - return nil, fmt.Errorf("symbol '%s' not found: %w", symbol, os.ErrNotExist) + return nil, fmt.Errorf("symbol '%s+%#x' not found: %w", args.symbol, args.offset, os.ErrNotExist) + } + // Since commit ab105a4fb894, -EILSEQ is returned when a kprobe sym+offset is resolved + // to an invalid insn boundary. + if errors.Is(err, syscall.EILSEQ) { + return nil, fmt.Errorf("symbol '%s+%#x' not found (bad insn boundary): %w", args.symbol, args.offset, os.ErrNotExist) } // Since at least commit cb9a19fe4aa51, ENOTSUPP is returned // when attempting to set a uprobe on a trap instruction. if errors.Is(err, unix.ENOTSUPP) { - return nil, fmt.Errorf("failed setting uprobe on offset %#x (possible trap insn): %w", offset, err) + return nil, fmt.Errorf("failed setting uprobe on offset %#x (possible trap insn): %w", args.offset, err) } if err != nil { return nil, fmt.Errorf("opening perf event: %w", err) @@ -241,18 +324,24 @@ func pmuProbe(typ probeType, symbol, path string, offset uint64, pid int, ret bo // Ensure the string pointer is not collected before PerfEventOpen returns. runtime.KeepAlive(sp) + fd, err := sys.NewFD(rawFd) + if err != nil { + return nil, err + } + // Kernel has perf_[k,u]probe PMU available, initialize perf event. return &perfEvent{ - fd: internal.NewFD(uint32(fd)), - pmuID: et, - name: symbol, - typ: typ.PerfEventType(ret), + typ: typ.PerfEventType(args.ret), + name: args.symbol, + pmuID: et, + cookie: args.cookie, + fd: fd, }, nil } // tracefsKprobe creates a Kprobe tracefs entry. -func tracefsKprobe(symbol string, ret bool) (*perfEvent, error) { - return tracefsProbe(kprobeType, symbol, "", 0, perfAllThreads, ret) +func tracefsKprobe(args probeArgs) (*perfEvent, error) { + return tracefsProbe(kprobeType, args) } // tracefsProbe creates a trace event by writing an entry to /[k,u]probe_events. @@ -261,7 +350,7 @@ func tracefsKprobe(symbol string, ret bool) (*perfEvent, error) { // Path and offset are only set in the case of uprobe(s) and are used to set // the executable/library path on the filesystem and the offset where the probe is inserted. // A perf event is then opened on the newly-created trace event and returned to the caller. -func tracefsProbe(typ probeType, symbol, path string, offset uint64, pid int, ret bool) (*perfEvent, error) { +func tracefsProbe(typ probeType, args probeArgs) (_ *perfEvent, err error) { // Generate a random string for each trace event we attempt to create. // This value is used as the 'group' token in tracefs to allow creating // multiple kprobe trace events with the same name. @@ -269,42 +358,53 @@ func tracefsProbe(typ probeType, symbol, path string, offset uint64, pid int, re if err != nil { return nil, fmt.Errorf("randomizing group name: %w", err) } + args.group = group // Before attempting to create a trace event through tracefs, // check if an event with the same group and name already exists. // Kernels 4.x and earlier don't return os.ErrExist on writing a duplicate // entry, so we need to rely on reads for detecting uniqueness. - _, err = getTraceEventID(group, symbol) + _, err = getTraceEventID(group, args.symbol) if err == nil { - return nil, fmt.Errorf("trace event already exists: %s/%s", group, symbol) + return nil, fmt.Errorf("trace event already exists: %s/%s", group, args.symbol) } if err != nil && !errors.Is(err, os.ErrNotExist) { - return nil, fmt.Errorf("checking trace event %s/%s: %w", group, symbol, err) + return nil, fmt.Errorf("checking trace event %s/%s: %w", group, args.symbol, err) } // Create the [k,u]probe trace event using tracefs. - if err := createTraceFSProbeEvent(typ, group, symbol, path, offset, ret); err != nil { + if err := createTraceFSProbeEvent(typ, args); err != nil { return nil, fmt.Errorf("creating probe entry on tracefs: %w", err) } + defer func() { + if err != nil { + // Make sure we clean up the created tracefs event when we return error. + // If a livepatch handler is already active on the symbol, the write to + // tracefs will succeed, a trace event will show up, but creating the + // perf event will fail with EBUSY. + _ = closeTraceFSProbeEvent(typ, args.group, args.symbol) + } + }() // Get the newly-created trace event's id. - tid, err := getTraceEventID(group, symbol) + tid, err := getTraceEventID(group, args.symbol) if err != nil { return nil, fmt.Errorf("getting trace event id: %w", err) } // Kprobes are ephemeral tracepoints and share the same perf event type. - fd, err := openTracepointPerfEvent(tid, pid) + fd, err := openTracepointPerfEvent(tid, args.pid) if err != nil { return nil, err } return &perfEvent{ - fd: fd, + typ: typ.PerfEventType(args.ret), group: group, - name: symbol, + name: args.symbol, tracefsID: tid, - typ: typ.PerfEventType(ret), + cookie: args.cookie, + fd: fd, }, nil } @@ -312,7 +412,7 @@ func tracefsProbe(typ probeType, symbol, path string, offset uint64, pid int, re // /[k,u]probe_events. Returns os.ErrNotExist if symbol is not a valid // kernel symbol, or if it is not traceable with kprobes. Returns os.ErrExist // if a probe with the same group and symbol already exists. -func createTraceFSProbeEvent(typ probeType, group, symbol, path string, offset uint64, ret bool) error { +func createTraceFSProbeEvent(typ probeType, args probeArgs) error { // Open the kprobe_events file in tracefs. f, err := os.OpenFile(typ.EventsPath(), os.O_APPEND|os.O_WRONLY, 0666) if err != nil { @@ -320,7 +420,7 @@ func createTraceFSProbeEvent(typ probeType, group, symbol, path string, offset u } defer f.Close() - var pe string + var pe, token string switch typ { case kprobeType: // The kprobe_events syntax is as follows (see Documentation/trace/kprobetrace.txt): @@ -337,7 +437,8 @@ func createTraceFSProbeEvent(typ probeType, group, symbol, path string, offset u // subsampling or rate limiting logic can be more accurately implemented in // the eBPF program itself. // See Documentation/kprobes.txt for more details. - pe = fmt.Sprintf("%s:%s/%s %s", probePrefix(ret), group, symbol, symbol) + token = kprobeToken(args) + pe = fmt.Sprintf("%s:%s/%s %s", probePrefix(args.ret), args.group, sanitizeSymbol(args.symbol), token) case uprobeType: // The uprobe_events syntax is as follows: // p[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS] : Set a probe @@ -346,18 +447,30 @@ func createTraceFSProbeEvent(typ probeType, group, symbol, path string, offset u // // Some examples: // r:ebpf_1234/readline /bin/bash:0x12345 - // p:ebpf_5678/main_mySymbol /bin/mybin:0x12345 + // p:ebpf_5678/main_mySymbol /bin/mybin:0x12345(0x123) // // See Documentation/trace/uprobetracer.txt for more details. - pathOffset := uprobePathOffset(path, offset) - pe = fmt.Sprintf("%s:%s/%s %s", probePrefix(ret), group, symbol, pathOffset) + token = uprobeToken(args) + pe = fmt.Sprintf("%s:%s/%s %s", probePrefix(args.ret), args.group, args.symbol, token) } _, err = f.WriteString(pe) // Since commit 97c753e62e6c, ENOENT is correctly returned instead of EINVAL // when trying to create a kretprobe for a missing symbol. Make sure ENOENT // is returned to the caller. + // EINVAL is also returned on pre-5.2 kernels when the `SYM[+offs]` token + // is resolved to an invalid insn boundary. if errors.Is(err, os.ErrNotExist) || errors.Is(err, unix.EINVAL) { - return fmt.Errorf("symbol %s not found: %w", symbol, os.ErrNotExist) + return fmt.Errorf("token %s: %w", token, os.ErrNotExist) + } + // Since commit ab105a4fb894, -EILSEQ is returned when a kprobe sym+offset is resolved + // to an invalid insn boundary. + if errors.Is(err, syscall.EILSEQ) { + return fmt.Errorf("token %s: bad insn boundary: %w", token, os.ErrNotExist) + } + // ERANGE is returned when the `SYM[+offs]` token is too big and cannot + // be resolved. + if errors.Is(err, syscall.ERANGE) { + return fmt.Errorf("token %s: offset too big: %w", token, os.ErrNotExist) } if err != nil { return fmt.Errorf("writing '%s' to '%s': %w", pe, typ.EventsPath(), err) @@ -377,7 +490,7 @@ func closeTraceFSProbeEvent(typ probeType, group, symbol string) error { // See [k,u]probe_events syntax above. The probe type does not need to be specified // for removals. - pe := fmt.Sprintf("-:%s/%s", group, symbol) + pe := fmt.Sprintf("-:%s/%s", group, sanitizeSymbol(symbol)) if _, err = f.WriteString(pe); err != nil { return fmt.Errorf("writing '%s' to '%s': %w", pe, typ.EventsPath(), err) } @@ -388,9 +501,9 @@ func closeTraceFSProbeEvent(typ probeType, group, symbol string) error { // randomGroup generates a pseudorandom string for use as a tracefs group name. // Returns an error when the output string would exceed 63 characters (kernel // limitation), when rand.Read() fails or when prefix contains characters not -// allowed by rgxTraceEvent. +// allowed by isValidTraceID. func randomGroup(prefix string) (string, error) { - if !rgxTraceEvent.MatchString(prefix) { + if !isValidTraceID(prefix) { return "", fmt.Errorf("prefix '%s' must be alphanumeric or underscore: %w", prefix, errInvalidInput) } @@ -442,3 +555,14 @@ func kretprobeBit() (uint64, error) { }) return kprobeRetprobeBit.value, kprobeRetprobeBit.err } + +// kprobeToken creates the SYM[+offs] token for the tracefs api. +func kprobeToken(args probeArgs) string { + po := args.symbol + + if args.offset != 0 { + po += fmt.Sprintf("+%#x", args.offset) + } + + return po +} diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/link/link.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/link/link.go index 4926584696bb..067d0101aa9f 100644 --- a/cluster-autoscaler/vendor/github.com/cilium/ebpf/link/link.go +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/link/link.go @@ -1,12 +1,14 @@ package link import ( + "bytes" + "encoding/binary" "fmt" - "unsafe" "github.com/cilium/ebpf" + "github.com/cilium/ebpf/btf" "github.com/cilium/ebpf/internal" - "github.com/cilium/ebpf/internal/btf" + "github.com/cilium/ebpf/internal/sys" ) var ErrNotSupported = internal.ErrNotSupported @@ -35,12 +37,53 @@ type Link interface { // not called. Close() error + // Info returns metadata on a link. + // + // May return an error wrapping ErrNotSupported. + Info() (*Info, error) + // Prevent external users from implementing this interface. isLink() } +// LoadPinnedLink loads a link that was persisted into a bpffs. +func LoadPinnedLink(fileName string, opts *ebpf.LoadPinOptions) (Link, error) { + raw, err := loadPinnedRawLink(fileName, opts) + if err != nil { + return nil, err + } + + return wrapRawLink(raw) +} + +// wrap a RawLink in a more specific type if possible. +// +// The function takes ownership of raw and closes it on error. +func wrapRawLink(raw *RawLink) (Link, error) { + info, err := raw.Info() + if err != nil { + raw.Close() + return nil, err + } + + switch info.Type { + case RawTracepointType: + return &rawTracepoint{*raw}, nil + case TracingType: + return &tracing{*raw}, nil + case CgroupType: + return &linkCgroup{*raw}, nil + case IterType: + return &Iter{*raw}, nil + case NetNsType: + return &NetNsLink{*raw}, nil + default: + return raw, nil + } +} + // ID uniquely identifies a BPF link. -type ID uint32 +type ID = sys.LinkID // RawLinkOptions control the creation of a raw link. type RawLinkOptions struct { @@ -52,13 +95,53 @@ type RawLinkOptions struct { Attach ebpf.AttachType // BTF is the BTF of the attachment target. BTF btf.TypeID + // Flags control the attach behaviour. + Flags uint32 } -// RawLinkInfo contains metadata on a link. -type RawLinkInfo struct { +// Info contains metadata on a link. +type Info struct { Type Type ID ID Program ebpf.ProgramID + extra interface{} +} + +type TracingInfo sys.TracingLinkInfo +type CgroupInfo sys.CgroupLinkInfo +type NetNsInfo sys.NetNsLinkInfo +type XDPInfo sys.XDPLinkInfo + +// Tracing returns tracing type-specific link info. +// +// Returns nil if the type-specific link info isn't available. +func (r Info) Tracing() *TracingInfo { + e, _ := r.extra.(*TracingInfo) + return e +} + +// Cgroup returns cgroup type-specific link info. +// +// Returns nil if the type-specific link info isn't available. +func (r Info) Cgroup() *CgroupInfo { + e, _ := r.extra.(*CgroupInfo) + return e +} + +// NetNs returns netns type-specific link info. +// +// Returns nil if the type-specific link info isn't available. +func (r Info) NetNs() *NetNsInfo { + e, _ := r.extra.(*NetNsInfo) + return e +} + +// ExtraNetNs returns XDP type-specific link info. +// +// Returns nil if the type-specific link info isn't available. +func (r Info) XDP() *XDPInfo { + e, _ := r.extra.(*XDPInfo) + return e } // RawLink is the low-level API to bpf_link. @@ -66,7 +149,7 @@ type RawLinkInfo struct { // You should consider using the higher level interfaces in this // package instead. type RawLink struct { - fd *internal.FD + fd *sys.FD pinnedPath string } @@ -77,21 +160,22 @@ func AttachRawLink(opts RawLinkOptions) (*RawLink, error) { } if opts.Target < 0 { - return nil, fmt.Errorf("invalid target: %s", internal.ErrClosedFd) + return nil, fmt.Errorf("invalid target: %s", sys.ErrClosedFd) } progFd := opts.Program.FD() if progFd < 0 { - return nil, fmt.Errorf("invalid program: %s", internal.ErrClosedFd) + return nil, fmt.Errorf("invalid program: %s", sys.ErrClosedFd) } - attr := bpfLinkCreateAttr{ - targetFd: uint32(opts.Target), - progFd: uint32(progFd), - attachType: opts.Attach, - targetBTFID: uint32(opts.BTF), + attr := sys.LinkCreateAttr{ + TargetFd: uint32(opts.Target), + ProgFd: uint32(progFd), + AttachType: sys.AttachType(opts.Attach), + TargetBtfId: uint32(opts.BTF), + Flags: opts.Flags, } - fd, err := bpfLinkCreate(&attr) + fd, err := sys.LinkCreate(&attr) if err != nil { return nil, fmt.Errorf("can't create link: %s", err) } @@ -99,44 +183,23 @@ func AttachRawLink(opts RawLinkOptions) (*RawLink, error) { return &RawLink{fd, ""}, nil } -// LoadPinnedRawLink loads a persisted link from a bpffs. -// -// Returns an error if the pinned link type doesn't match linkType. Pass -// UnspecifiedType to disable this behaviour. -func LoadPinnedRawLink(fileName string, linkType Type, opts *ebpf.LoadPinOptions) (*RawLink, error) { - fd, err := internal.BPFObjGet(fileName, opts.Marshal()) +func loadPinnedRawLink(fileName string, opts *ebpf.LoadPinOptions) (*RawLink, error) { + fd, err := sys.ObjGet(&sys.ObjGetAttr{ + Pathname: sys.NewStringPointer(fileName), + FileFlags: opts.Marshal(), + }) if err != nil { return nil, fmt.Errorf("load pinned link: %w", err) } - link := &RawLink{fd, fileName} - if linkType == UnspecifiedType { - return link, nil - } - - info, err := link.Info() - if err != nil { - link.Close() - return nil, fmt.Errorf("get pinned link info: %s", err) - } - - if info.Type != linkType { - link.Close() - return nil, fmt.Errorf("link type %v doesn't match %v", info.Type, linkType) - } - - return link, nil + return &RawLink{fd, fileName}, nil } func (l *RawLink) isLink() {} // FD returns the raw file descriptor. func (l *RawLink) FD() int { - fd, err := l.fd.Value() - if err != nil { - return -1 - } - return int(fd) + return l.fd.Int() } // Close breaks the link. @@ -185,49 +248,66 @@ type RawLinkUpdateOptions struct { func (l *RawLink) UpdateArgs(opts RawLinkUpdateOptions) error { newFd := opts.New.FD() if newFd < 0 { - return fmt.Errorf("invalid program: %s", internal.ErrClosedFd) + return fmt.Errorf("invalid program: %s", sys.ErrClosedFd) } var oldFd int if opts.Old != nil { oldFd = opts.Old.FD() if oldFd < 0 { - return fmt.Errorf("invalid replacement program: %s", internal.ErrClosedFd) + return fmt.Errorf("invalid replacement program: %s", sys.ErrClosedFd) } } - linkFd, err := l.fd.Value() - if err != nil { - return fmt.Errorf("can't update link: %s", err) - } - - attr := bpfLinkUpdateAttr{ - linkFd: linkFd, - newProgFd: uint32(newFd), - oldProgFd: uint32(oldFd), - flags: opts.Flags, + attr := sys.LinkUpdateAttr{ + LinkFd: l.fd.Uint(), + NewProgFd: uint32(newFd), + OldProgFd: uint32(oldFd), + Flags: opts.Flags, } - return bpfLinkUpdate(&attr) -} - -// struct bpf_link_info -type bpfLinkInfo struct { - typ uint32 - id uint32 - prog_id uint32 + return sys.LinkUpdate(&attr) } // Info returns metadata about the link. -func (l *RawLink) Info() (*RawLinkInfo, error) { - var info bpfLinkInfo - err := internal.BPFObjGetInfoByFD(l.fd, unsafe.Pointer(&info), unsafe.Sizeof(info)) - if err != nil { +func (l *RawLink) Info() (*Info, error) { + var info sys.LinkInfo + + if err := sys.ObjInfo(l.fd, &info); err != nil { return nil, fmt.Errorf("link info: %s", err) } - return &RawLinkInfo{ - Type(info.typ), - ID(info.id), - ebpf.ProgramID(info.prog_id), + var extra interface{} + switch info.Type { + case CgroupType: + extra = &CgroupInfo{} + case IterType: + // not supported + case NetNsType: + extra = &NetNsInfo{} + case RawTracepointType: + // not supported + case TracingType: + extra = &TracingInfo{} + case XDPType: + extra = &XDPInfo{} + case PerfEventType: + // no extra + default: + return nil, fmt.Errorf("unknown link info type: %d", info.Type) + } + + if info.Type != RawTracepointType && info.Type != IterType && info.Type != PerfEventType { + buf := bytes.NewReader(info.Extra[:]) + err := binary.Read(buf, internal.NativeEndian, extra) + if err != nil { + return nil, fmt.Errorf("can not read extra link info: %w", err) + } + } + + return &Info{ + info.Type, + info.Id, + ebpf.ProgramID(info.ProgId), + extra, }, nil } diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/link/netns.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/link/netns.go index 37e5b84c4ddf..344ecced6bea 100644 --- a/cluster-autoscaler/vendor/github.com/cilium/ebpf/link/netns.go +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/link/netns.go @@ -6,14 +6,9 @@ import ( "github.com/cilium/ebpf" ) -// NetNsInfo contains metadata about a network namespace link. -type NetNsInfo struct { - RawLinkInfo -} - // NetNsLink is a program attached to a network namespace. type NetNsLink struct { - *RawLink + RawLink } // AttachNetNs attaches a program to a network namespace. @@ -37,24 +32,5 @@ func AttachNetNs(ns int, prog *ebpf.Program) (*NetNsLink, error) { return nil, err } - return &NetNsLink{link}, nil -} - -// LoadPinnedNetNs loads a network namespace link from bpffs. -func LoadPinnedNetNs(fileName string, opts *ebpf.LoadPinOptions) (*NetNsLink, error) { - link, err := LoadPinnedRawLink(fileName, NetNsType, opts) - if err != nil { - return nil, err - } - - return &NetNsLink{link}, nil -} - -// Info returns information about the link. -func (nns *NetNsLink) Info() (*NetNsInfo, error) { - info, err := nns.RawLink.Info() - if err != nil { - return nil, err - } - return &NetNsInfo{*info}, nil + return &NetNsLink{*link}, nil } diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/link/perf_event.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/link/perf_event.go index 7e0443a75cb0..0e5bd47911bb 100644 --- a/cluster-autoscaler/vendor/github.com/cilium/ebpf/link/perf_event.go +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/link/perf_event.go @@ -6,14 +6,15 @@ import ( "fmt" "os" "path/filepath" - "regexp" "runtime" "strconv" "strings" "unsafe" "github.com/cilium/ebpf" + "github.com/cilium/ebpf/asm" "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" "github.com/cilium/ebpf/internal/unix" ) @@ -43,11 +44,6 @@ import ( var ( tracefsPath = "/sys/kernel/debug/tracing" - // Trace event groups, names and kernel symbols must adhere to this set - // of characters. Non-empty, first character must not be a number, all - // characters must be alphanumeric or underscore. - rgxTraceEvent = regexp.MustCompile("^[a-zA-Z_][0-9a-zA-Z_]*$") - errInvalidInput = errors.New("invalid input") ) @@ -69,6 +65,8 @@ const ( // can be attached to it. It is created based on a tracefs trace event or a // Performance Monitoring Unit (PMU). type perfEvent struct { + // The event type determines the types of programs that can be attached. + typ perfEventType // Group and name of the tracepoint/kprobe/uprobe. group string @@ -79,53 +77,15 @@ type perfEvent struct { // ID of the trace event read from tracefs. Valid IDs are non-zero. tracefsID uint64 - // The event type determines the types of programs that can be attached. - typ perfEventType + // User provided arbitrary value. + cookie uint64 - fd *internal.FD -} - -func (pe *perfEvent) isLink() {} - -func (pe *perfEvent) Pin(string) error { - return fmt.Errorf("pin perf event: %w", ErrNotSupported) -} - -func (pe *perfEvent) Unpin() error { - return fmt.Errorf("unpin perf event: %w", ErrNotSupported) -} - -// Since 4.15 (e87c6bc3852b "bpf: permit multiple bpf attachments for a single perf event"), -// calling PERF_EVENT_IOC_SET_BPF appends the given program to a prog_array -// owned by the perf event, which means multiple programs can be attached -// simultaneously. -// -// Before 4.15, calling PERF_EVENT_IOC_SET_BPF more than once on a perf event -// returns EEXIST. -// -// Detaching a program from a perf event is currently not possible, so a -// program replacement mechanism cannot be implemented for perf events. -func (pe *perfEvent) Update(prog *ebpf.Program) error { - return fmt.Errorf("can't replace eBPF program in perf event: %w", ErrNotSupported) + // This is the perf event FD. + fd *sys.FD } func (pe *perfEvent) Close() error { - if pe.fd == nil { - return nil - } - - pfd, err := pe.fd.Value() - if err != nil { - return fmt.Errorf("getting perf event fd: %w", err) - } - - err = unix.IoctlSetInt(int(pfd), unix.PERF_EVENT_IOC_DISABLE, 0) - if err != nil { - return fmt.Errorf("disabling perf event: %w", err) - } - - err = pe.fd.Close() - if err != nil { + if err := pe.fd.Close(); err != nil { return fmt.Errorf("closing perf event fd: %w", err) } @@ -148,49 +108,150 @@ func (pe *perfEvent) Close() error { return nil } +// perfEventLink represents a bpf perf link. +type perfEventLink struct { + RawLink + pe *perfEvent +} + +func (pl *perfEventLink) isLink() {} + +// Pinning requires the underlying perf event FD to stay open. +// +// | PerfEvent FD | BpfLink FD | Works | +// |--------------|------------|-------| +// | Open | Open | Yes | +// | Closed | Open | No | +// | Open | Closed | No (Pin() -> EINVAL) | +// | Closed | Closed | No (Pin() -> EINVAL) | +// +// There is currently no pretty way to recover the perf event FD +// when loading a pinned link, so leave as not supported for now. +func (pl *perfEventLink) Pin(string) error { + return fmt.Errorf("perf event link pin: %w", ErrNotSupported) +} + +func (pl *perfEventLink) Unpin() error { + return fmt.Errorf("perf event link unpin: %w", ErrNotSupported) +} + +func (pl *perfEventLink) Close() error { + if err := pl.pe.Close(); err != nil { + return fmt.Errorf("perf event link close: %w", err) + } + return pl.fd.Close() +} + +func (pl *perfEventLink) Update(prog *ebpf.Program) error { + return fmt.Errorf("perf event link update: %w", ErrNotSupported) +} + +// perfEventIoctl implements Link and handles the perf event lifecycle +// via ioctl(). +type perfEventIoctl struct { + *perfEvent +} + +func (pi *perfEventIoctl) isLink() {} + +// Since 4.15 (e87c6bc3852b "bpf: permit multiple bpf attachments for a single perf event"), +// calling PERF_EVENT_IOC_SET_BPF appends the given program to a prog_array +// owned by the perf event, which means multiple programs can be attached +// simultaneously. +// +// Before 4.15, calling PERF_EVENT_IOC_SET_BPF more than once on a perf event +// returns EEXIST. +// +// Detaching a program from a perf event is currently not possible, so a +// program replacement mechanism cannot be implemented for perf events. +func (pi *perfEventIoctl) Update(prog *ebpf.Program) error { + return fmt.Errorf("perf event ioctl update: %w", ErrNotSupported) +} + +func (pi *perfEventIoctl) Pin(string) error { + return fmt.Errorf("perf event ioctl pin: %w", ErrNotSupported) +} + +func (pi *perfEventIoctl) Unpin() error { + return fmt.Errorf("perf event ioctl unpin: %w", ErrNotSupported) +} + +func (pi *perfEventIoctl) Info() (*Info, error) { + return nil, fmt.Errorf("perf event ioctl info: %w", ErrNotSupported) +} + // attach the given eBPF prog to the perf event stored in pe. // pe must contain a valid perf event fd. // prog's type must match the program type stored in pe. -func (pe *perfEvent) attach(prog *ebpf.Program) error { +func attachPerfEvent(pe *perfEvent, prog *ebpf.Program) (Link, error) { if prog == nil { - return errors.New("cannot attach a nil program") - } - if pe.fd == nil { - return errors.New("cannot attach to nil perf event") + return nil, errors.New("cannot attach a nil program") } if prog.FD() < 0 { - return fmt.Errorf("invalid program: %w", internal.ErrClosedFd) + return nil, fmt.Errorf("invalid program: %w", sys.ErrClosedFd) } + switch pe.typ { case kprobeEvent, kretprobeEvent, uprobeEvent, uretprobeEvent: if t := prog.Type(); t != ebpf.Kprobe { - return fmt.Errorf("invalid program type (expected %s): %s", ebpf.Kprobe, t) + return nil, fmt.Errorf("invalid program type (expected %s): %s", ebpf.Kprobe, t) } case tracepointEvent: if t := prog.Type(); t != ebpf.TracePoint { - return fmt.Errorf("invalid program type (expected %s): %s", ebpf.TracePoint, t) + return nil, fmt.Errorf("invalid program type (expected %s): %s", ebpf.TracePoint, t) } default: - return fmt.Errorf("unknown perf event type: %d", pe.typ) + return nil, fmt.Errorf("unknown perf event type: %d", pe.typ) + } + + if err := haveBPFLinkPerfEvent(); err == nil { + return attachPerfEventLink(pe, prog) } + return attachPerfEventIoctl(pe, prog) +} - // The ioctl below will fail when the fd is invalid. - kfd, _ := pe.fd.Value() +func attachPerfEventIoctl(pe *perfEvent, prog *ebpf.Program) (*perfEventIoctl, error) { + if pe.cookie != 0 { + return nil, fmt.Errorf("cookies are not supported: %w", ErrNotSupported) + } // Assign the eBPF program to the perf event. - err := unix.IoctlSetInt(int(kfd), unix.PERF_EVENT_IOC_SET_BPF, prog.FD()) + err := unix.IoctlSetInt(pe.fd.Int(), unix.PERF_EVENT_IOC_SET_BPF, prog.FD()) if err != nil { - return fmt.Errorf("setting perf event bpf program: %w", err) + return nil, fmt.Errorf("setting perf event bpf program: %w", err) } // PERF_EVENT_IOC_ENABLE and _DISABLE ignore their given values. - if err := unix.IoctlSetInt(int(kfd), unix.PERF_EVENT_IOC_ENABLE, 0); err != nil { - return fmt.Errorf("enable perf event: %s", err) + if err := unix.IoctlSetInt(pe.fd.Int(), unix.PERF_EVENT_IOC_ENABLE, 0); err != nil { + return nil, fmt.Errorf("enable perf event: %s", err) } + pi := &perfEventIoctl{pe} + // Close the perf event when its reference is lost to avoid leaking system resources. - runtime.SetFinalizer(pe, (*perfEvent).Close) - return nil + runtime.SetFinalizer(pi, (*perfEventIoctl).Close) + return pi, nil +} + +// Use the bpf api to attach the perf event (BPF_LINK_TYPE_PERF_EVENT, 5.15+). +// +// https://github.com/torvalds/linux/commit/b89fbfbb854c9afc3047e8273cc3a694650b802e +func attachPerfEventLink(pe *perfEvent, prog *ebpf.Program) (*perfEventLink, error) { + fd, err := sys.LinkCreatePerfEvent(&sys.LinkCreatePerfEventAttr{ + ProgFd: uint32(prog.FD()), + TargetFd: pe.fd.Uint(), + AttachType: sys.BPF_PERF_EVENT, + BpfCookie: pe.cookie, + }) + if err != nil { + return nil, fmt.Errorf("cannot create bpf perf link: %v", err) + } + + pl := &perfEventLink{RawLink{fd: fd}, pe} + + // Close the perf event when its reference is lost to avoid leaking system resources. + runtime.SetFinalizer(pl, (*perfEventLink).Close) + return pl, nil } // unsafeStringPtr returns an unsafe.Pointer to a NUL-terminated copy of str. @@ -203,8 +264,12 @@ func unsafeStringPtr(str string) (unsafe.Pointer, error) { } // getTraceEventID reads a trace event's ID from tracefs given its group and name. -// group and name must be alphanumeric or underscore, as required by the kernel. +// The kernel requires group and name to be alphanumeric or underscore. +// +// name automatically has its invalid symbols converted to underscores so the caller +// can pass a raw symbol name, e.g. a kernel symbol containing dots. func getTraceEventID(group, name string) (uint64, error) { + name = sanitizeSymbol(name) tid, err := uint64FromFile(tracefsPath, "events", group, name, "id") if errors.Is(err, os.ErrNotExist) { return 0, fmt.Errorf("trace event %s/%s: %w", group, name, os.ErrNotExist) @@ -235,7 +300,7 @@ func getPMUEventType(typ probeType) (uint64, error) { // openTracepointPerfEvent opens a tracepoint-type perf event. System-wide // [k,u]probes created by writing to /[k,u]probe_events are tracepoints // behind the scenes, and can be attached to using these perf events. -func openTracepointPerfEvent(tid uint64, pid int) (*internal.FD, error) { +func openTracepointPerfEvent(tid uint64, pid int) (*sys.FD, error) { attr := unix.PerfEventAttr{ Type: unix.PERF_TYPE_TRACEPOINT, Config: tid, @@ -249,7 +314,7 @@ func openTracepointPerfEvent(tid uint64, pid int) (*internal.FD, error) { return nil, fmt.Errorf("opening tracepoint perf event: %w", err) } - return internal.NewFD(uint32(fd)), nil + return sys.NewFD(fd) } // uint64FromFile reads a uint64 from a file. All elements of path are sanitized @@ -270,3 +335,60 @@ func uint64FromFile(base string, path ...string) (uint64, error) { et := bytes.TrimSpace(data) return strconv.ParseUint(string(et), 10, 64) } + +// Probe BPF perf link. +// +// https://elixir.bootlin.com/linux/v5.16.8/source/kernel/bpf/syscall.c#L4307 +// https://github.com/torvalds/linux/commit/b89fbfbb854c9afc3047e8273cc3a694650b802e +var haveBPFLinkPerfEvent = internal.FeatureTest("bpf_link_perf_event", "5.15", func() error { + prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{ + Name: "probe_bpf_perf_link", + Type: ebpf.Kprobe, + Instructions: asm.Instructions{ + asm.Mov.Imm(asm.R0, 0), + asm.Return(), + }, + License: "MIT", + }) + if err != nil { + return err + } + defer prog.Close() + + _, err = sys.LinkCreatePerfEvent(&sys.LinkCreatePerfEventAttr{ + ProgFd: uint32(prog.FD()), + AttachType: sys.BPF_PERF_EVENT, + }) + if errors.Is(err, unix.EINVAL) { + return internal.ErrNotSupported + } + if errors.Is(err, unix.EBADF) { + return nil + } + return err +}) + +// isValidTraceID implements the equivalent of a regex match +// against "^[a-zA-Z_][0-9a-zA-Z_]*$". +// +// Trace event groups, names and kernel symbols must adhere to this set +// of characters. Non-empty, first character must not be a number, all +// characters must be alphanumeric or underscore. +func isValidTraceID(s string) bool { + if len(s) < 1 { + return false + } + for i, c := range []byte(s) { + switch { + case c >= 'a' && c <= 'z': + case c >= 'A' && c <= 'Z': + case c == '_': + case i > 0 && c >= '0' && c <= '9': + + default: + return false + } + } + + return true +} diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/link/program.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/link/program.go index b90c4574676e..ea31817377fc 100644 --- a/cluster-autoscaler/vendor/github.com/cilium/ebpf/link/program.go +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/link/program.go @@ -4,7 +4,7 @@ import ( "fmt" "github.com/cilium/ebpf" - "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" ) type RawAttachProgramOptions struct { @@ -34,7 +34,7 @@ func RawAttachProgram(opts RawAttachProgramOptions) error { replaceFd = uint32(opts.Replace.FD()) } - attr := internal.BPFProgAttachAttr{ + attr := sys.ProgAttachAttr{ TargetFd: uint32(opts.Target), AttachBpfFd: uint32(opts.Program.FD()), ReplaceBpfFd: replaceFd, @@ -42,7 +42,7 @@ func RawAttachProgram(opts RawAttachProgramOptions) error { AttachFlags: uint32(opts.Flags), } - if err := internal.BPFProgAttach(&attr); err != nil { + if err := sys.ProgAttach(&attr); err != nil { return fmt.Errorf("can't attach program: %w", err) } return nil @@ -63,12 +63,12 @@ func RawDetachProgram(opts RawDetachProgramOptions) error { return err } - attr := internal.BPFProgDetachAttr{ + attr := sys.ProgDetachAttr{ TargetFd: uint32(opts.Target), AttachBpfFd: uint32(opts.Program.FD()), AttachType: uint32(opts.Attach), } - if err := internal.BPFProgDetach(&attr); err != nil { + if err := sys.ProgDetach(&attr); err != nil { return fmt.Errorf("can't detach program: %w", err) } diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/link/raw_tracepoint.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/link/raw_tracepoint.go index f4beb1e07863..925e621cbbc7 100644 --- a/cluster-autoscaler/vendor/github.com/cilium/ebpf/link/raw_tracepoint.go +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/link/raw_tracepoint.go @@ -1,10 +1,11 @@ package link import ( + "errors" "fmt" "github.com/cilium/ebpf" - "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" ) type RawTracepointOptions struct { @@ -22,40 +23,65 @@ func AttachRawTracepoint(opts RawTracepointOptions) (Link, error) { return nil, fmt.Errorf("invalid program type %s, expected RawTracepoint(Writable)", t) } if opts.Program.FD() < 0 { - return nil, fmt.Errorf("invalid program: %w", internal.ErrClosedFd) + return nil, fmt.Errorf("invalid program: %w", sys.ErrClosedFd) } - fd, err := bpfRawTracepointOpen(&bpfRawTracepointOpenAttr{ - name: internal.NewStringPointer(opts.Name), - fd: uint32(opts.Program.FD()), + fd, err := sys.RawTracepointOpen(&sys.RawTracepointOpenAttr{ + Name: sys.NewStringPointer(opts.Name), + ProgFd: uint32(opts.Program.FD()), }) if err != nil { return nil, err } - return &progAttachRawTracepoint{fd: fd}, nil + err = haveBPFLink() + if errors.Is(err, ErrNotSupported) { + // Prior to commit 70ed506c3bbc ("bpf: Introduce pinnable bpf_link abstraction") + // raw_tracepoints are just a plain fd. + return &simpleRawTracepoint{fd}, nil + } + + if err != nil { + return nil, err + } + + return &rawTracepoint{RawLink{fd: fd}}, nil } -type progAttachRawTracepoint struct { - fd *internal.FD +type simpleRawTracepoint struct { + fd *sys.FD } -var _ Link = (*progAttachRawTracepoint)(nil) +var _ Link = (*simpleRawTracepoint)(nil) -func (rt *progAttachRawTracepoint) isLink() {} +func (frt *simpleRawTracepoint) isLink() {} -func (rt *progAttachRawTracepoint) Close() error { - return rt.fd.Close() +func (frt *simpleRawTracepoint) Close() error { + return frt.fd.Close() } -func (rt *progAttachRawTracepoint) Update(_ *ebpf.Program) error { - return fmt.Errorf("can't update raw_tracepoint: %w", ErrNotSupported) +func (frt *simpleRawTracepoint) Update(_ *ebpf.Program) error { + return fmt.Errorf("update raw_tracepoint: %w", ErrNotSupported) } -func (rt *progAttachRawTracepoint) Pin(_ string) error { - return fmt.Errorf("can't pin raw_tracepoint: %w", ErrNotSupported) +func (frt *simpleRawTracepoint) Pin(string) error { + return fmt.Errorf("pin raw_tracepoint: %w", ErrNotSupported) } -func (rt *progAttachRawTracepoint) Unpin() error { +func (frt *simpleRawTracepoint) Unpin() error { return fmt.Errorf("unpin raw_tracepoint: %w", ErrNotSupported) } + +func (frt *simpleRawTracepoint) Info() (*Info, error) { + return nil, fmt.Errorf("can't get raw_tracepoint info: %w", ErrNotSupported) +} + +type rawTracepoint struct { + RawLink +} + +var _ Link = (*rawTracepoint)(nil) + +func (rt *rawTracepoint) Update(_ *ebpf.Program) error { + return fmt.Errorf("update raw_tracepoint: %w", ErrNotSupported) +} diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/link/socket_filter.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/link/socket_filter.go new file mode 100644 index 000000000000..94f3958cc4d8 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/link/socket_filter.go @@ -0,0 +1,40 @@ +package link + +import ( + "syscall" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/internal/unix" +) + +// AttachSocketFilter attaches a SocketFilter BPF program to a socket. +func AttachSocketFilter(conn syscall.Conn, program *ebpf.Program) error { + rawConn, err := conn.SyscallConn() + if err != nil { + return err + } + var ssoErr error + err = rawConn.Control(func(fd uintptr) { + ssoErr = syscall.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_ATTACH_BPF, program.FD()) + }) + if ssoErr != nil { + return ssoErr + } + return err +} + +// DetachSocketFilter detaches a SocketFilter BPF program from a socket. +func DetachSocketFilter(conn syscall.Conn) error { + rawConn, err := conn.SyscallConn() + if err != nil { + return err + } + var ssoErr error + err = rawConn.Control(func(fd uintptr) { + ssoErr = syscall.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_DETACH_BPF, 0) + }) + if ssoErr != nil { + return ssoErr + } + return err +} diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/link/syscalls.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/link/syscalls.go index a61499438b22..a661395b360b 100644 --- a/cluster-autoscaler/vendor/github.com/cilium/ebpf/link/syscalls.go +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/link/syscalls.go @@ -2,35 +2,33 @@ package link import ( "errors" - "unsafe" "github.com/cilium/ebpf" "github.com/cilium/ebpf/asm" "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" "github.com/cilium/ebpf/internal/unix" ) // Type is the kind of link. -type Type uint32 +type Type = sys.LinkType // Valid link types. -// -// Equivalent to enum bpf_link_type. const ( - UnspecifiedType Type = iota - RawTracepointType - TracingType - CgroupType - IterType - NetNsType - XDPType + UnspecifiedType = sys.BPF_LINK_TYPE_UNSPEC + RawTracepointType = sys.BPF_LINK_TYPE_RAW_TRACEPOINT + TracingType = sys.BPF_LINK_TYPE_TRACING + CgroupType = sys.BPF_LINK_TYPE_CGROUP + IterType = sys.BPF_LINK_TYPE_ITER + NetNsType = sys.BPF_LINK_TYPE_NETNS + XDPType = sys.BPF_LINK_TYPE_XDP + PerfEventType = sys.BPF_LINK_TYPE_PERF_EVENT ) var haveProgAttach = internal.FeatureTest("BPF_PROG_ATTACH", "4.10", func() error { prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{ - Type: ebpf.CGroupSKB, - AttachType: ebpf.AttachCGroupInetIngress, - License: "MIT", + Type: ebpf.CGroupSKB, + License: "MIT", Instructions: asm.Instructions{ asm.Mov.Imm(asm.R0, 0), asm.Return(), @@ -69,7 +67,7 @@ var haveProgAttachReplace = internal.FeatureTest("BPF_PROG_ATTACH atomic replace // We know that we have BPF_PROG_ATTACH since we can load CGroupSKB programs. // If passing BPF_F_REPLACE gives us EINVAL we know that the feature isn't // present. - attr := internal.BPFProgAttachAttr{ + attr := sys.ProgAttachAttr{ // We rely on this being checked after attachFlags. TargetFd: ^uint32(0), AttachBpfFd: uint32(prog.FD()), @@ -77,7 +75,7 @@ var haveProgAttachReplace = internal.FeatureTest("BPF_PROG_ATTACH atomic replace AttachFlags: uint32(flagReplace), } - err = internal.BPFProgAttach(&attr) + err = sys.ProgAttach(&attr) if errors.Is(err, unix.EINVAL) { return internal.ErrNotSupported } @@ -87,73 +85,14 @@ var haveProgAttachReplace = internal.FeatureTest("BPF_PROG_ATTACH atomic replace return err }) -type bpfLinkCreateAttr struct { - progFd uint32 - targetFd uint32 - attachType ebpf.AttachType - flags uint32 - targetBTFID uint32 -} - -func bpfLinkCreate(attr *bpfLinkCreateAttr) (*internal.FD, error) { - ptr, err := internal.BPF(internal.BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) - if err != nil { - return nil, err - } - return internal.NewFD(uint32(ptr)), nil -} - -type bpfLinkCreateIterAttr struct { - prog_fd uint32 - target_fd uint32 - attach_type ebpf.AttachType - flags uint32 - iter_info internal.Pointer - iter_info_len uint32 -} - -func bpfLinkCreateIter(attr *bpfLinkCreateIterAttr) (*internal.FD, error) { - ptr, err := internal.BPF(internal.BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) - if err != nil { - return nil, err - } - return internal.NewFD(uint32(ptr)), nil -} - -type bpfLinkUpdateAttr struct { - linkFd uint32 - newProgFd uint32 - flags uint32 - oldProgFd uint32 -} - -func bpfLinkUpdate(attr *bpfLinkUpdateAttr) error { - _, err := internal.BPF(internal.BPF_LINK_UPDATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) - return err -} - var haveBPFLink = internal.FeatureTest("bpf_link", "5.7", func() error { - prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{ - Type: ebpf.CGroupSKB, - AttachType: ebpf.AttachCGroupInetIngress, - License: "MIT", - Instructions: asm.Instructions{ - asm.Mov.Imm(asm.R0, 0), - asm.Return(), - }, - }) - if err != nil { - return internal.ErrNotSupported - } - defer prog.Close() - - attr := bpfLinkCreateAttr{ + attr := sys.LinkCreateAttr{ // This is a hopefully invalid file descriptor, which triggers EBADF. - targetFd: ^uint32(0), - progFd: uint32(prog.FD()), - attachType: ebpf.AttachCGroupInetIngress, + TargetFd: ^uint32(0), + ProgFd: ^uint32(0), + AttachType: sys.AttachType(ebpf.AttachCGroupInetIngress), } - _, err = bpfLinkCreate(&attr) + _, err := sys.LinkCreate(&attr) if errors.Is(err, unix.EINVAL) { return internal.ErrNotSupported } @@ -162,30 +101,3 @@ var haveBPFLink = internal.FeatureTest("bpf_link", "5.7", func() error { } return err }) - -type bpfIterCreateAttr struct { - linkFd uint32 - flags uint32 -} - -func bpfIterCreate(attr *bpfIterCreateAttr) (*internal.FD, error) { - ptr, err := internal.BPF(internal.BPF_ITER_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) - if err == nil { - return internal.NewFD(uint32(ptr)), nil - } - return nil, err -} - -type bpfRawTracepointOpenAttr struct { - name internal.Pointer - fd uint32 - _ uint32 -} - -func bpfRawTracepointOpen(attr *bpfRawTracepointOpenAttr) (*internal.FD, error) { - ptr, err := internal.BPF(internal.BPF_RAW_TRACEPOINT_OPEN, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) - if err == nil { - return internal.NewFD(uint32(ptr)), nil - } - return nil, err -} diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/link/tracepoint.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/link/tracepoint.go index 7423df86b137..a59ef9d1c527 100644 --- a/cluster-autoscaler/vendor/github.com/cilium/ebpf/link/tracepoint.go +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/link/tracepoint.go @@ -6,12 +6,22 @@ import ( "github.com/cilium/ebpf" ) +// TracepointOptions defines additional parameters that will be used +// when loading Tracepoints. +type TracepointOptions struct { + // Arbitrary value that can be fetched from an eBPF program + // via `bpf_get_attach_cookie()`. + // + // Needs kernel 5.15+. + Cookie uint64 +} + // Tracepoint attaches the given eBPF program to the tracepoint with the given // group and name. See /sys/kernel/debug/tracing/events to find available // tracepoints. The top-level directory is the group, the event's subdirectory // is the name. Example: // -// tp, err := Tracepoint("syscalls", "sys_enter_fork", prog) +// tp, err := Tracepoint("syscalls", "sys_enter_fork", prog, nil) // // Losing the reference to the resulting Link (tp) will close the Tracepoint // and prevent further execution of prog. The Link must be Closed during @@ -19,14 +29,14 @@ import ( // // Note that attaching eBPF programs to syscalls (sys_enter_*/sys_exit_*) is // only possible as of kernel 4.14 (commit cf5f5ce). -func Tracepoint(group, name string, prog *ebpf.Program) (Link, error) { +func Tracepoint(group, name string, prog *ebpf.Program, opts *TracepointOptions) (Link, error) { if group == "" || name == "" { return nil, fmt.Errorf("group and name cannot be empty: %w", errInvalidInput) } if prog == nil { return nil, fmt.Errorf("prog cannot be nil: %w", errInvalidInput) } - if !rgxTraceEvent.MatchString(group) || !rgxTraceEvent.MatchString(name) { + if !isValidTraceID(group) || !isValidTraceID(name) { return nil, fmt.Errorf("group and name '%s/%s' must be alphanumeric or underscore: %w", group, name, errInvalidInput) } if prog.Type() != ebpf.TracePoint { @@ -43,18 +53,25 @@ func Tracepoint(group, name string, prog *ebpf.Program) (Link, error) { return nil, err } + var cookie uint64 + if opts != nil { + cookie = opts.Cookie + } + pe := &perfEvent{ - fd: fd, - tracefsID: tid, + typ: tracepointEvent, group: group, name: name, - typ: tracepointEvent, + tracefsID: tid, + cookie: cookie, + fd: fd, } - if err := pe.attach(prog); err != nil { + lnk, err := attachPerfEvent(pe, prog) + if err != nil { pe.Close() return nil, err } - return pe, nil + return lnk, nil } diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/link/tracing.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/link/tracing.go new file mode 100644 index 000000000000..e47e61a3b843 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/link/tracing.go @@ -0,0 +1,141 @@ +package link + +import ( + "fmt" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/btf" + "github.com/cilium/ebpf/internal/sys" +) + +type tracing struct { + RawLink +} + +func (f *tracing) Update(new *ebpf.Program) error { + return fmt.Errorf("tracing update: %w", ErrNotSupported) +} + +// AttachFreplace attaches the given eBPF program to the function it replaces. +// +// The program and name can either be provided at link time, or can be provided +// at program load time. If they were provided at load time, they should be nil +// and empty respectively here, as they will be ignored by the kernel. +// Examples: +// +// AttachFreplace(dispatcher, "function", replacement) +// AttachFreplace(nil, "", replacement) +func AttachFreplace(targetProg *ebpf.Program, name string, prog *ebpf.Program) (Link, error) { + if (name == "") != (targetProg == nil) { + return nil, fmt.Errorf("must provide both or neither of name and targetProg: %w", errInvalidInput) + } + if prog == nil { + return nil, fmt.Errorf("prog cannot be nil: %w", errInvalidInput) + } + if prog.Type() != ebpf.Extension { + return nil, fmt.Errorf("eBPF program type %s is not an Extension: %w", prog.Type(), errInvalidInput) + } + + var ( + target int + typeID btf.TypeID + ) + if targetProg != nil { + btfHandle, err := targetProg.Handle() + if err != nil { + return nil, err + } + defer btfHandle.Close() + + spec, err := btfHandle.Spec(nil) + if err != nil { + return nil, err + } + + var function *btf.Func + if err := spec.TypeByName(name, &function); err != nil { + return nil, err + } + + target = targetProg.FD() + typeID, err = spec.TypeID(function) + if err != nil { + return nil, err + } + } + + link, err := AttachRawLink(RawLinkOptions{ + Target: target, + Program: prog, + Attach: ebpf.AttachNone, + BTF: typeID, + }) + if err != nil { + return nil, err + } + + return &tracing{*link}, nil +} + +type TracingOptions struct { + // Program must be of type Tracing with attach type + // AttachTraceFEntry/AttachTraceFExit/AttachModifyReturn or + // AttachTraceRawTp. + Program *ebpf.Program +} + +type LSMOptions struct { + // Program must be of type LSM with attach type + // AttachLSMMac. + Program *ebpf.Program +} + +// attachBTFID links all BPF program types (Tracing/LSM) that they attach to a btf_id. +func attachBTFID(program *ebpf.Program) (Link, error) { + if program.FD() < 0 { + return nil, fmt.Errorf("invalid program %w", sys.ErrClosedFd) + } + + fd, err := sys.RawTracepointOpen(&sys.RawTracepointOpenAttr{ + ProgFd: uint32(program.FD()), + }) + if err != nil { + return nil, err + } + + raw := RawLink{fd: fd} + info, err := raw.Info() + if err != nil { + raw.Close() + return nil, err + } + + if info.Type == RawTracepointType { + // Sadness upon sadness: a Tracing program with AttachRawTp returns + // a raw_tracepoint link. Other types return a tracing link. + return &rawTracepoint{raw}, nil + } + + return &tracing{RawLink: RawLink{fd: fd}}, nil +} + +// AttachTracing links a tracing (fentry/fexit/fmod_ret) BPF program or +// a BTF-powered raw tracepoint (tp_btf) BPF Program to a BPF hook defined +// in kernel modules. +func AttachTracing(opts TracingOptions) (Link, error) { + if t := opts.Program.Type(); t != ebpf.Tracing { + return nil, fmt.Errorf("invalid program type %s, expected Tracing", t) + } + + return attachBTFID(opts.Program) +} + +// AttachLSM links a Linux security module (LSM) BPF Program to a BPF +// hook defined in kernel modules. +func AttachLSM(opts LSMOptions) (Link, error) { + if t := opts.Program.Type(); t != ebpf.LSM { + return nil, fmt.Errorf("invalid program type %s, expected LSM", t) + } + + return attachBTFID(opts.Program) +} diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/link/uprobe.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/link/uprobe.go index 59170ce0468d..edf925b57020 100644 --- a/cluster-autoscaler/vendor/github.com/cilium/ebpf/link/uprobe.go +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/link/uprobe.go @@ -6,7 +6,7 @@ import ( "fmt" "os" "path/filepath" - "regexp" + "strings" "sync" "github.com/cilium/ebpf" @@ -16,16 +16,23 @@ import ( var ( uprobeEventsPath = filepath.Join(tracefsPath, "uprobe_events") - // rgxUprobeSymbol is used to strip invalid characters from the uprobe symbol - // as they are not allowed to be used as the EVENT token in tracefs. - rgxUprobeSymbol = regexp.MustCompile("[^a-zA-Z0-9]+") - uprobeRetprobeBit = struct { once sync.Once value uint64 err error }{} + uprobeRefCtrOffsetPMUPath = "/sys/bus/event_source/devices/uprobe/format/ref_ctr_offset" + // elixir.bootlin.com/linux/v5.15-rc7/source/kernel/events/core.c#L9799 + uprobeRefCtrOffsetShift = 32 + haveRefCtrOffsetPMU = internal.FeatureTest("RefCtrOffsetPMU", "4.20", func() error { + _, err := os.Stat(uprobeRefCtrOffsetPMUPath) + if err != nil { + return internal.ErrNotSupported + } + return nil + }) + // ErrNoSymbol indicates that the given symbol was not found // in the ELF symbols table. ErrNoSymbol = errors.New("not found") @@ -35,24 +42,46 @@ var ( type Executable struct { // Path of the executable on the filesystem. path string - // Parsed ELF symbols and dynamic symbols offsets. - offsets map[string]uint64 + // Parsed ELF and dynamic symbols' addresses. + addresses map[string]uint64 } // UprobeOptions defines additional parameters that will be used // when loading Uprobes. type UprobeOptions struct { - // Symbol offset. Must be provided in case of external symbols (shared libs). - // If set, overrides the offset eventually parsed from the executable. + // Symbol address. Must be provided in case of external symbols (shared libs). + // If set, overrides the address eventually parsed from the executable. + Address uint64 + // The offset relative to given symbol. Useful when tracing an arbitrary point + // inside the frame of given symbol. + // + // Note: this field changed from being an absolute offset to being relative + // to Address. Offset uint64 // Only set the uprobe on the given process ID. Useful when tracing // shared library calls or programs that have many running instances. PID int + // Automatically manage SDT reference counts (semaphores). + // + // If this field is set, the Kernel will increment/decrement the + // semaphore located in the process memory at the provided address on + // probe attach/detach. + // + // See also: + // sourceware.org/systemtap/wiki/UserSpaceProbeImplementation (Semaphore Handling) + // github.com/torvalds/linux/commit/1cc33161a83d + // github.com/torvalds/linux/commit/a6ca88b241d5 + RefCtrOffset uint64 + // Arbitrary value that can be fetched from an eBPF program + // via `bpf_get_attach_cookie()`. + // + // Needs kernel 5.15+. + Cookie uint64 } // To open a new Executable, use: // -// OpenExecutable("/bin/bash") +// OpenExecutable("/bin/bash") // // The returned value can then be used to open Uprobe(s). func OpenExecutable(path string) (*Executable, error) { @@ -77,8 +106,8 @@ func OpenExecutable(path string) (*Executable, error) { } ex := Executable{ - path: path, - offsets: make(map[string]uint64), + path: path, + addresses: make(map[string]uint64), } if err := ex.load(se); err != nil { @@ -107,7 +136,7 @@ func (ex *Executable) load(f *internal.SafeELFFile) error { continue } - off := s.Value + address := s.Value // Loop over ELF segments. for _, prog := range f.Progs { @@ -123,32 +152,42 @@ func (ex *Executable) load(f *internal.SafeELFFile) error { // fn symbol offset = fn symbol VA - .text VA + .text offset // // stackoverflow.com/a/40249502 - off = s.Value - prog.Vaddr + prog.Off + address = s.Value - prog.Vaddr + prog.Off break } } - ex.offsets[s.Name] = off + ex.addresses[s.Name] = address } return nil } -func (ex *Executable) offset(symbol string) (uint64, error) { - if off, ok := ex.offsets[symbol]; ok { - // Symbols with location 0 from section undef are shared library calls and - // are relocated before the binary is executed. Dynamic linking is not - // implemented by the library, so mark this as unsupported for now. - // - // Since only offset values are stored and not elf.Symbol, if the value is 0, - // assume it's an external symbol. - if off == 0 { - return 0, fmt.Errorf("cannot resolve %s library call '%s', "+ - "consider providing the offset via options: %w", ex.path, symbol, ErrNotSupported) - } - return off, nil +// address calculates the address of a symbol in the executable. +// +// opts must not be nil. +func (ex *Executable) address(symbol string, opts *UprobeOptions) (uint64, error) { + if opts.Address > 0 { + return opts.Address + opts.Offset, nil } - return 0, fmt.Errorf("symbol %s: %w", symbol, ErrNoSymbol) + + address, ok := ex.addresses[symbol] + if !ok { + return 0, fmt.Errorf("symbol %s: %w", symbol, ErrNoSymbol) + } + + // Symbols with location 0 from section undef are shared library calls and + // are relocated before the binary is executed. Dynamic linking is not + // implemented by the library, so mark this as unsupported for now. + // + // Since only offset values are stored and not elf.Symbol, if the value is 0, + // assume it's an external symbol. + if address == 0 { + return 0, fmt.Errorf("cannot resolve %s library call '%s': %w "+ + "(consider providing UprobeOptions.Address)", ex.path, symbol, ErrNotSupported) + } + + return address + opts.Offset, nil } // Uprobe attaches the given eBPF program to a perf event that fires when the @@ -161,7 +200,9 @@ func (ex *Executable) offset(symbol string) (uint64, error) { // When using symbols which belongs to shared libraries, // an offset must be provided via options: // -// up, err := ex.Uprobe("main", prog, &UprobeOptions{Offset: 0x123}) +// up, err := ex.Uprobe("main", prog, &UprobeOptions{Offset: 0x123}) +// +// Note: Setting the Offset field in the options supersedes the symbol's offset. // // Losing the reference to the resulting Link (up) will close the Uprobe // and prevent further execution of prog. The Link must be Closed during @@ -175,13 +216,13 @@ func (ex *Executable) Uprobe(symbol string, prog *ebpf.Program, opts *UprobeOpti return nil, err } - err = u.attach(prog) + lnk, err := attachPerfEvent(u, prog) if err != nil { u.Close() return nil, err } - return u, nil + return lnk, nil } // Uretprobe attaches the given eBPF program to a perf event that fires right @@ -193,7 +234,9 @@ func (ex *Executable) Uprobe(symbol string, prog *ebpf.Program, opts *UprobeOpti // When using symbols which belongs to shared libraries, // an offset must be provided via options: // -// up, err := ex.Uretprobe("main", prog, &UprobeOptions{Offset: 0x123}) +// up, err := ex.Uretprobe("main", prog, &UprobeOptions{Offset: 0x123}) +// +// Note: Setting the Offset field in the options supersedes the symbol's offset. // // Losing the reference to the resulting Link (up) will close the Uprobe // and prevent further execution of prog. The Link must be Closed during @@ -207,13 +250,13 @@ func (ex *Executable) Uretprobe(symbol string, prog *ebpf.Program, opts *UprobeO return nil, err } - err = u.attach(prog) + lnk, err := attachPerfEvent(u, prog) if err != nil { u.Close() return nil, err } - return u, nil + return lnk, nil } // uprobe opens a perf event for the given binary/symbol and attaches prog to it. @@ -225,25 +268,38 @@ func (ex *Executable) uprobe(symbol string, prog *ebpf.Program, opts *UprobeOpti if prog.Type() != ebpf.Kprobe { return nil, fmt.Errorf("eBPF program type %s is not Kprobe: %w", prog.Type(), errInvalidInput) } + if opts == nil { + opts = &UprobeOptions{} + } - var offset uint64 - if opts != nil && opts.Offset != 0 { - offset = opts.Offset - } else { - off, err := ex.offset(symbol) - if err != nil { - return nil, err + offset, err := ex.address(symbol, opts) + if err != nil { + return nil, err + } + + pid := opts.PID + if pid == 0 { + pid = perfAllThreads + } + + if opts.RefCtrOffset != 0 { + if err := haveRefCtrOffsetPMU(); err != nil { + return nil, fmt.Errorf("uprobe ref_ctr_offset: %w", err) } - offset = off } - pid := perfAllThreads - if opts != nil && opts.PID != 0 { - pid = opts.PID + args := probeArgs{ + symbol: symbol, + path: ex.path, + offset: offset, + pid: pid, + refCtrOffset: opts.RefCtrOffset, + ret: ret, + cookie: opts.Cookie, } // Use uprobe PMU if the kernel has it available. - tp, err := pmuUprobe(symbol, ex.path, offset, pid, ret) + tp, err := pmuUprobe(args) if err == nil { return tp, nil } @@ -252,7 +308,8 @@ func (ex *Executable) uprobe(symbol string, prog *ebpf.Program, opts *UprobeOpti } // Use tracefs if uprobe PMU is missing. - tp, err = tracefsUprobe(uprobeSanitizedSymbol(symbol), ex.path, offset, pid, ret) + args.symbol = sanitizeSymbol(symbol) + tp, err = tracefsUprobe(args) if err != nil { return nil, fmt.Errorf("creating trace event '%s:%s' in tracefs: %w", ex.path, symbol, err) } @@ -261,23 +318,51 @@ func (ex *Executable) uprobe(symbol string, prog *ebpf.Program, opts *UprobeOpti } // pmuUprobe opens a perf event based on the uprobe PMU. -func pmuUprobe(symbol, path string, offset uint64, pid int, ret bool) (*perfEvent, error) { - return pmuProbe(uprobeType, symbol, path, offset, pid, ret) +func pmuUprobe(args probeArgs) (*perfEvent, error) { + return pmuProbe(uprobeType, args) } // tracefsUprobe creates a Uprobe tracefs entry. -func tracefsUprobe(symbol, path string, offset uint64, pid int, ret bool) (*perfEvent, error) { - return tracefsProbe(uprobeType, symbol, path, offset, pid, ret) +func tracefsUprobe(args probeArgs) (*perfEvent, error) { + return tracefsProbe(uprobeType, args) } -// uprobeSanitizedSymbol replaces every invalid characted for the tracefs api with an underscore. -func uprobeSanitizedSymbol(symbol string) string { - return rgxUprobeSymbol.ReplaceAllString(symbol, "_") +// sanitizeSymbol replaces every invalid character for the tracefs api with an underscore. +// It is equivalent to calling regexp.MustCompile("[^a-zA-Z0-9]+").ReplaceAllString("_"). +func sanitizeSymbol(s string) string { + var b strings.Builder + b.Grow(len(s)) + var skip bool + for _, c := range []byte(s) { + switch { + case c >= 'a' && c <= 'z', + c >= 'A' && c <= 'Z', + c >= '0' && c <= '9': + skip = false + b.WriteByte(c) + + default: + if !skip { + b.WriteByte('_') + skip = true + } + } + } + + return b.String() } -// uprobePathOffset creates the PATH:OFFSET token for the tracefs api. -func uprobePathOffset(path string, offset uint64) string { - return fmt.Sprintf("%s:%#x", path, offset) +// uprobeToken creates the PATH:OFFSET(REF_CTR_OFFSET) token for the tracefs api. +func uprobeToken(args probeArgs) string { + po := fmt.Sprintf("%s:%#x", args.path, args.offset) + + if args.refCtrOffset != 0 { + // This is not documented in Documentation/trace/uprobetracer.txt. + // elixir.bootlin.com/linux/v5.15-rc7/source/kernel/trace/trace.c#L5564 + po += fmt.Sprintf("(%#x)", args.refCtrOffset) + } + + return po } func uretprobeBit() (uint64, error) { diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/link/xdp.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/link/xdp.go new file mode 100644 index 000000000000..aa8dd3a4cb39 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/link/xdp.go @@ -0,0 +1,54 @@ +package link + +import ( + "fmt" + + "github.com/cilium/ebpf" +) + +// XDPAttachFlags represents how XDP program will be attached to interface. +type XDPAttachFlags uint32 + +const ( + // XDPGenericMode (SKB) links XDP BPF program for drivers which do + // not yet support native XDP. + XDPGenericMode XDPAttachFlags = 1 << (iota + 1) + // XDPDriverMode links XDP BPF program into the driver’s receive path. + XDPDriverMode + // XDPOffloadMode offloads the entire XDP BPF program into hardware. + XDPOffloadMode +) + +type XDPOptions struct { + // Program must be an XDP BPF program. + Program *ebpf.Program + + // Interface is the interface index to attach program to. + Interface int + + // Flags is one of XDPAttachFlags (optional). + // + // Only one XDP mode should be set, without flag defaults + // to driver/generic mode (best effort). + Flags XDPAttachFlags +} + +// AttachXDP links an XDP BPF program to an XDP hook. +func AttachXDP(opts XDPOptions) (Link, error) { + if t := opts.Program.Type(); t != ebpf.XDP { + return nil, fmt.Errorf("invalid program type %s, expected XDP", t) + } + + if opts.Interface < 1 { + return nil, fmt.Errorf("invalid interface index: %d", opts.Interface) + } + + rawLink, err := AttachRawLink(RawLinkOptions{ + Program: opts.Program, + Attach: ebpf.AttachXDP, + Target: opts.Interface, + Flags: uint32(opts.Flags), + }) + + return rawLink, err +} diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/linker.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/linker.go index f3b1629e70a8..e6276b1829b2 100644 --- a/cluster-autoscaler/vendor/github.com/cilium/ebpf/linker.go +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/linker.go @@ -1,159 +1,238 @@ package ebpf import ( + "errors" "fmt" + "sync" "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/btf" ) -// link resolves bpf-to-bpf calls. +// splitSymbols splits insns into subsections delimited by Symbol Instructions. +// insns cannot be empty and must start with a Symbol Instruction. // -// Each library may contain multiple functions / labels, and is only linked -// if prog references one of these functions. -// -// Libraries also linked. -func link(prog *ProgramSpec, libs []*ProgramSpec) error { - var ( - linked = make(map[*ProgramSpec]bool) - pending = []asm.Instructions{prog.Instructions} - insns asm.Instructions - ) - for len(pending) > 0 { - insns, pending = pending[0], pending[1:] - for _, lib := range libs { - if linked[lib] { - continue - } +// The resulting map is indexed by Symbol name. +func splitSymbols(insns asm.Instructions) (map[string]asm.Instructions, error) { + if len(insns) == 0 { + return nil, errors.New("insns is empty") + } - needed, err := needSection(insns, lib.Instructions) - if err != nil { - return fmt.Errorf("linking %s: %w", lib.Name, err) - } + if insns[0].Symbol() == "" { + return nil, errors.New("insns must start with a Symbol") + } - if !needed { - continue + var name string + progs := make(map[string]asm.Instructions) + for _, ins := range insns { + if sym := ins.Symbol(); sym != "" { + if progs[sym] != nil { + return nil, fmt.Errorf("insns contains duplicate Symbol %s", sym) } + name = sym + } - linked[lib] = true - prog.Instructions = append(prog.Instructions, lib.Instructions...) - pending = append(pending, lib.Instructions) + progs[name] = append(progs[name], ins) + } - if prog.BTF != nil && lib.BTF != nil { - if err := prog.BTF.Append(lib.BTF); err != nil { - return fmt.Errorf("linking BTF of %s: %w", lib.Name, err) - } - } + return progs, nil +} + +// The linker is responsible for resolving bpf-to-bpf calls between programs +// within an ELF. Each BPF program must be a self-contained binary blob, +// so when an instruction in one ELF program section wants to jump to +// a function in another, the linker needs to pull in the bytecode +// (and BTF info) of the target function and concatenate the instruction +// streams. +// +// Later on in the pipeline, all call sites are fixed up with relative jumps +// within this newly-created instruction stream to then finally hand off to +// the kernel with BPF_PROG_LOAD. +// +// Each function is denoted by an ELF symbol and the compiler takes care of +// register setup before each jump instruction. + +// hasFunctionReferences returns true if insns contains one or more bpf2bpf +// function references. +func hasFunctionReferences(insns asm.Instructions) bool { + for _, i := range insns { + if i.IsFunctionReference() { + return true } } - - return nil + return false } -func needSection(insns, section asm.Instructions) (bool, error) { - // A map of symbols to the libraries which contain them. - symbols, err := section.SymbolOffsets() +// applyRelocations collects and applies any CO-RE relocations in insns. +// +// Passing a nil target will relocate against the running kernel. insns are +// modified in place. +func applyRelocations(insns asm.Instructions, local, target *btf.Spec) error { + var relos []*btf.CORERelocation + var reloInsns []*asm.Instruction + iter := insns.Iterate() + for iter.Next() { + if relo := btf.CORERelocationMetadata(iter.Ins); relo != nil { + relos = append(relos, relo) + reloInsns = append(reloInsns, iter.Ins) + } + } + + if len(relos) == 0 { + return nil + } + + target, err := maybeLoadKernelBTF(target) if err != nil { - return false, err + return err } - for _, ins := range insns { - if ins.Reference == "" { - continue - } + fixups, err := btf.CORERelocate(local, target, relos) + if err != nil { + return err + } - if ins.OpCode.JumpOp() != asm.Call || ins.Src != asm.PseudoCall { - continue + for i, fixup := range fixups { + if err := fixup.Apply(reloInsns[i]); err != nil { + return fmt.Errorf("apply fixup %s: %w", &fixup, err) } + } - if ins.Constant != -1 { - // This is already a valid call, no need to link again. - continue - } + return nil +} - if _, ok := symbols[ins.Reference]; !ok { - // Symbol isn't available in this section - continue - } +// flattenPrograms resolves bpf-to-bpf calls for a set of programs. +// +// Links all programs in names by modifying their ProgramSpec in progs. +func flattenPrograms(progs map[string]*ProgramSpec, names []string) { + // Pre-calculate all function references. + refs := make(map[*ProgramSpec][]string) + for _, prog := range progs { + refs[prog] = prog.Instructions.FunctionReferences() + } - // At this point we know that at least one function in the - // library is called from insns, so we have to link it. - return true, nil + // Create a flattened instruction stream, but don't modify progs yet to + // avoid linking multiple times. + flattened := make([]asm.Instructions, 0, len(names)) + for _, name := range names { + flattened = append(flattened, flattenInstructions(name, progs, refs)) } - // None of the functions in the section are called. - return false, nil + // Finally, assign the flattened instructions. + for i, name := range names { + progs[name].Instructions = flattened[i] + } } -func fixupJumpsAndCalls(insns asm.Instructions) error { - symbolOffsets := make(map[string]asm.RawInstructionOffset) - iter := insns.Iterate() - for iter.Next() { - ins := iter.Ins +// flattenInstructions resolves bpf-to-bpf calls for a single program. +// +// Flattens the instructions of prog by concatenating the instructions of all +// direct and indirect dependencies. +// +// progs contains all referenceable programs, while refs contain the direct +// dependencies of each program. +func flattenInstructions(name string, progs map[string]*ProgramSpec, refs map[*ProgramSpec][]string) asm.Instructions { + prog := progs[name] + + insns := make(asm.Instructions, len(prog.Instructions)) + copy(insns, prog.Instructions) + + // Add all direct references of prog to the list of to be linked programs. + pending := make([]string, len(refs[prog])) + copy(pending, refs[prog]) + + // All references for which we've appended instructions. + linked := make(map[string]bool) + + // Iterate all pending references. We can't use a range since pending is + // modified in the body below. + for len(pending) > 0 { + var ref string + ref, pending = pending[0], pending[1:] - if ins.Symbol == "" { + if linked[ref] { + // We've already linked this ref, don't append instructions again. continue } - if _, ok := symbolOffsets[ins.Symbol]; ok { - return fmt.Errorf("duplicate symbol %s", ins.Symbol) + progRef := progs[ref] + if progRef == nil { + // We don't have instructions that go with this reference. This + // happens when calling extern functions. + continue } - symbolOffsets[ins.Symbol] = iter.Offset + insns = append(insns, progRef.Instructions...) + linked[ref] = true + + // Make sure we link indirect references. + pending = append(pending, refs[progRef]...) } - iter = insns.Iterate() + return insns +} + +// fixupAndValidate is called by the ELF reader right before marshaling the +// instruction stream. It performs last-minute adjustments to the program and +// runs some sanity checks before sending it off to the kernel. +func fixupAndValidate(insns asm.Instructions) error { + iter := insns.Iterate() for iter.Next() { - i := iter.Index - offset := iter.Offset ins := iter.Ins - if ins.Reference == "" { - continue + // Map load was tagged with a Reference, but does not contain a Map pointer. + if ins.IsLoadFromMap() && ins.Reference() != "" && ins.Map() == nil { + return fmt.Errorf("instruction %d: map %s: %w", iter.Index, ins.Reference(), asm.ErrUnsatisfiedMapReference) } - switch { - case ins.IsFunctionCall() && ins.Constant == -1: - // Rewrite bpf to bpf call - callOffset, ok := symbolOffsets[ins.Reference] - if !ok { - return fmt.Errorf("call at %d: reference to missing symbol %q", i, ins.Reference) - } + fixupProbeReadKernel(ins) + } - ins.Constant = int64(callOffset - offset - 1) + return nil +} - case ins.OpCode.Class() == asm.JumpClass && ins.Offset == -1: - // Rewrite jump to label - jumpOffset, ok := symbolOffsets[ins.Reference] - if !ok { - return fmt.Errorf("jump at %d: reference to missing symbol %q", i, ins.Reference) - } +// fixupProbeReadKernel replaces calls to bpf_probe_read_{kernel,user}(_str) +// with bpf_probe_read(_str) on kernels that don't support it yet. +func fixupProbeReadKernel(ins *asm.Instruction) { + if !ins.IsBuiltinCall() { + return + } - ins.Offset = int16(jumpOffset - offset - 1) + // Kernel supports bpf_probe_read_kernel, nothing to do. + if haveProbeReadKernel() == nil { + return + } - case ins.IsLoadFromMap() && ins.MapPtr() == -1: - return fmt.Errorf("map %s: %w", ins.Reference, errUnsatisfiedReference) - } + switch asm.BuiltinFunc(ins.Constant) { + case asm.FnProbeReadKernel, asm.FnProbeReadUser: + ins.Constant = int64(asm.FnProbeRead) + case asm.FnProbeReadKernelStr, asm.FnProbeReadUserStr: + ins.Constant = int64(asm.FnProbeReadStr) } +} - // fixupBPFCalls replaces bpf_probe_read_{kernel,user}[_str] with bpf_probe_read[_str] on older kernels - // https://github.com/libbpf/libbpf/blob/master/src/libbpf.c#L6009 - iter = insns.Iterate() - for iter.Next() { - ins := iter.Ins - if !ins.IsBuiltinCall() { - continue - } - switch asm.BuiltinFunc(ins.Constant) { - case asm.FnProbeReadKernel, asm.FnProbeReadUser: - if err := haveProbeReadKernel(); err != nil { - ins.Constant = int64(asm.FnProbeRead) - } - case asm.FnProbeReadKernelStr, asm.FnProbeReadUserStr: - if err := haveProbeReadKernel(); err != nil { - ins.Constant = int64(asm.FnProbeReadStr) - } - } +var kernelBTF struct { + sync.Mutex + spec *btf.Spec +} + +// maybeLoadKernelBTF loads the current kernel's BTF if spec is nil, otherwise +// it returns spec unchanged. +// +// The kernel BTF is cached for the lifetime of the process. +func maybeLoadKernelBTF(spec *btf.Spec) (*btf.Spec, error) { + if spec != nil { + return spec, nil } - return nil + kernelBTF.Lock() + defer kernelBTF.Unlock() + + if kernelBTF.spec != nil { + return kernelBTF.spec, nil + } + + var err error + kernelBTF.spec, err = btf.LoadKernelSpec() + return kernelBTF.spec, err } diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/map.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/map.go index cca387ead019..e4a6c87e924b 100644 --- a/cluster-autoscaler/vendor/github.com/cilium/ebpf/map.go +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/map.go @@ -5,12 +5,15 @@ import ( "errors" "fmt" "io" + "math/rand" "path/filepath" "reflect" - "strings" + "time" + "unsafe" + "github.com/cilium/ebpf/btf" "github.com/cilium/ebpf/internal" - "github.com/cilium/ebpf/internal/btf" + "github.com/cilium/ebpf/internal/sys" "github.com/cilium/ebpf/internal/unix" ) @@ -19,7 +22,8 @@ var ( ErrKeyNotExist = errors.New("key does not exist") ErrKeyExist = errors.New("key already exists") ErrIterationAborted = errors.New("iteration aborted") - ErrMapIncompatible = errors.New("map's spec is incompatible with pinned map") + ErrMapIncompatible = errors.New("map spec is incompatible with existing map") + errMapNoBTFValue = errors.New("map spec does not contain a BTF Value") ) // MapOptions control loading a map into the kernel. @@ -67,12 +71,15 @@ type MapSpec struct { InnerMap *MapSpec // Extra trailing bytes found in the ELF map definition when using structs - // larger than libbpf's bpf_map_def. Must be empty before instantiating - // the MapSpec into a Map. - Extra bytes.Reader + // larger than libbpf's bpf_map_def. nil if no trailing bytes were present. + // Must be nil or empty before instantiating the MapSpec into a Map. + Extra *bytes.Reader + + // The key and value type of this map. May be nil. + Key, Value btf.Type // The BTF associated with this map. - BTF *btf.Map + BTF *btf.Spec } func (ms *MapSpec) String() string { @@ -97,6 +104,12 @@ func (ms *MapSpec) Copy() *MapSpec { return &cpy } +// hasBTF returns true if the MapSpec has a valid BTF spec and if its +// map type supports associated BTF metadata in the kernel. +func (ms *MapSpec) hasBTF() bool { + return ms.BTF != nil && ms.Type.hasBTF() +} + func (ms *MapSpec) clampPerfEventArraySize() error { if ms.Type != PerfEventArray { return nil @@ -114,6 +127,31 @@ func (ms *MapSpec) clampPerfEventArraySize() error { return nil } +// dataSection returns the contents and BTF Datasec descriptor of the spec. +func (ms *MapSpec) dataSection() ([]byte, *btf.Datasec, error) { + + if ms.Value == nil { + return nil, nil, errMapNoBTFValue + } + + ds, ok := ms.Value.(*btf.Datasec) + if !ok { + return nil, nil, fmt.Errorf("map value BTF is a %T, not a *btf.Datasec", ms.Value) + } + + if n := len(ms.Contents); n != 1 { + return nil, nil, fmt.Errorf("expected one key, found %d", n) + } + + kv := ms.Contents[0] + value, ok := kv.Value.([]byte) + if !ok { + return nil, nil, fmt.Errorf("value at first map key is %T, not []byte", kv.Value) + } + + return value, ds, nil +} + // MapKV is used to initialize the contents of a Map. type MapKV struct { Key interface{} @@ -131,7 +169,8 @@ func (ms *MapSpec) checkCompatibility(m *Map) error { case m.valueSize != ms.ValueSize: return fmt.Errorf("expected value size %v, got %v: %w", ms.ValueSize, m.valueSize, ErrMapIncompatible) - case m.maxEntries != ms.MaxEntries: + case !(ms.Type == PerfEventArray && ms.MaxEntries == 0) && + m.maxEntries != ms.MaxEntries: return fmt.Errorf("expected max entries %v, got %v: %w", ms.MaxEntries, m.maxEntries, ErrMapIncompatible) case m.flags != ms.Flags: @@ -151,7 +190,7 @@ func (ms *MapSpec) checkCompatibility(m *Map) error { // if you require custom encoding. type Map struct { name string - fd *internal.FD + fd *sys.FD typ MapType keySize uint32 valueSize uint32 @@ -166,18 +205,19 @@ type Map struct { // // You should not use fd after calling this function. func NewMapFromFD(fd int) (*Map, error) { - if fd < 0 { - return nil, errors.New("invalid fd") + f, err := sys.NewFD(fd) + if err != nil { + return nil, err } - return newMapFromFD(internal.NewFD(uint32(fd))) + return newMapFromFD(f) } -func newMapFromFD(fd *internal.FD) (*Map, error) { +func newMapFromFD(fd *sys.FD) (*Map, error) { info, err := newMapInfoFromFd(fd) if err != nil { fd.Close() - return nil, fmt.Errorf("get map info: %s", err) + return nil, fmt.Errorf("get map info: %w", err) } return newMap(fd, info.Name, info.Type, info.KeySize, info.ValueSize, info.MaxEntries, info.Flags) @@ -209,8 +249,8 @@ func NewMapWithOptions(spec *MapSpec, opts MapOptions) (*Map, error) { return nil, fmt.Errorf("creating map: %w", err) } - err = m.finalize(spec) - if err != nil { + if err := m.finalize(spec); err != nil { + m.Close() return nil, fmt.Errorf("populating map: %w", err) } @@ -257,7 +297,7 @@ func newMapWithOptions(spec *MapSpec, opts MapOptions, handles *handleCache) (_ return nil, fmt.Errorf("pin type %d: %w", int(spec.Pinning), ErrNotSupported) } - var innerFd *internal.FD + var innerFd *sys.FD if spec.Type == ArrayOfMaps || spec.Type == HashOfMaps { if spec.InnerMap == nil { return nil, fmt.Errorf("%s requires InnerMap", spec.Type) @@ -288,7 +328,7 @@ func newMapWithOptions(spec *MapSpec, opts MapOptions, handles *handleCache) (_ if spec.Pinning == PinByName { path := filepath.Join(opts.PinPath, spec.Name) if err := m.Pin(path); err != nil { - return nil, fmt.Errorf("pin map: %s", err) + return nil, fmt.Errorf("pin map: %w", err) } } @@ -297,7 +337,7 @@ func newMapWithOptions(spec *MapSpec, opts MapOptions, handles *handleCache) (_ // createMap validates the spec's properties and creates the map in the kernel // using the given opts. It does not populate or freeze the map. -func (spec *MapSpec) createMap(inner *internal.FD, opts MapOptions, handles *handleCache) (_ *Map, err error) { +func (spec *MapSpec) createMap(inner *sys.FD, opts MapOptions, handles *handleCache) (_ *Map, err error) { closeOnError := func(closer io.Closer) { if err != nil { closer.Close() @@ -310,8 +350,10 @@ func (spec *MapSpec) createMap(inner *internal.FD, opts MapOptions, handles *han // additional 'inner_map_idx' and later 'numa_node' fields. // In order to support loading these definitions, tolerate the presence of // extra bytes, but require them to be zeroes. - if _, err := io.Copy(internal.DiscardZeroes{}, &spec.Extra); err != nil { - return nil, errors.New("extra contains unhandled non-zero bytes, drain before creating map") + if spec.Extra != nil { + if _, err := io.Copy(internal.DiscardZeroes{}, spec.Extra); err != nil { + return nil, errors.New("extra contains unhandled non-zero bytes, drain before creating map") + } } switch spec.Type { @@ -360,51 +402,63 @@ func (spec *MapSpec) createMap(inner *internal.FD, opts MapOptions, handles *han return nil, fmt.Errorf("map create: %w", err) } } + if spec.Flags&unix.BPF_F_NO_PREALLOC > 0 { + if err := haveNoPreallocMaps(); err != nil { + return nil, fmt.Errorf("map create: %w", err) + } + } - attr := internal.BPFMapCreateAttr{ - MapType: uint32(spec.Type), + attr := sys.MapCreateAttr{ + MapType: sys.MapType(spec.Type), KeySize: spec.KeySize, ValueSize: spec.ValueSize, MaxEntries: spec.MaxEntries, - Flags: spec.Flags, + MapFlags: spec.Flags, NumaNode: spec.NumaNode, } if inner != nil { - var err error - attr.InnerMapFd, err = inner.Value() - if err != nil { - return nil, fmt.Errorf("map create: %w", err) - } + attr.InnerMapFd = inner.Uint() } if haveObjName() == nil { - attr.MapName = internal.NewBPFObjName(spec.Name) + attr.MapName = sys.NewObjName(spec.Name) } - var btfDisabled bool - if spec.BTF != nil { - handle, err := handles.btfHandle(spec.BTF.Spec) - btfDisabled = errors.Is(err, btf.ErrNotSupported) - if err != nil && !btfDisabled { + if spec.hasBTF() { + handle, err := handles.btfHandle(spec.BTF) + if err != nil && !errors.Is(err, btf.ErrNotSupported) { return nil, fmt.Errorf("load BTF: %w", err) } if handle != nil { - attr.BTFFd = uint32(handle.FD()) - attr.BTFKeyTypeID = uint32(spec.BTF.Key.ID()) - attr.BTFValueTypeID = uint32(spec.BTF.Value.ID()) + keyTypeID, err := spec.BTF.TypeID(spec.Key) + if err != nil { + return nil, err + } + + valueTypeID, err := spec.BTF.TypeID(spec.Value) + if err != nil { + return nil, err + } + + attr.BtfFd = uint32(handle.FD()) + attr.BtfKeyTypeId = uint32(keyTypeID) + attr.BtfValueTypeId = uint32(valueTypeID) } } - fd, err := internal.BPFMapCreate(&attr) + fd, err := sys.MapCreate(&attr) if err != nil { if errors.Is(err, unix.EPERM) { - return nil, fmt.Errorf("map create: %w (MEMLOCK bay be too low, consider rlimit.RemoveMemlock)", err) + return nil, fmt.Errorf("map create: %w (MEMLOCK may be too low, consider rlimit.RemoveMemlock)", err) } - if btfDisabled { + if !spec.hasBTF() { return nil, fmt.Errorf("map create without BTF: %w", err) } + if errors.Is(err, unix.EINVAL) && attr.MaxEntries == 0 { + return nil, fmt.Errorf("map create: %w (MaxEntries may be incorrectly set to zero)", err) + } return nil, fmt.Errorf("map create: %w", err) } defer closeOnError(fd) @@ -419,7 +473,7 @@ func (spec *MapSpec) createMap(inner *internal.FD, opts MapOptions, handles *han // newMap allocates and returns a new Map structure. // Sets the fullValueSize on per-CPU maps. -func newMap(fd *internal.FD, name string, typ MapType, keySize, valueSize, maxEntries, flags uint32) (*Map, error) { +func newMap(fd *sys.FD, name string, typ MapType, keySize, valueSize, maxEntries, flags uint32) (*Map, error) { m := &Map{ name, fd, @@ -482,6 +536,12 @@ func (m *Map) Info() (*MapInfo, error) { return newMapInfoFromFd(m.fd) } +// MapLookupFlags controls the behaviour of the map lookup calls. +type MapLookupFlags uint64 + +// LookupLock look up the value of a spin-locked map. +const LookupLock MapLookupFlags = 4 + // Lookup retrieves a value from a Map. // // Calls Close() on valueOut if it is of type **Map or **Program, @@ -490,39 +550,58 @@ func (m *Map) Info() (*MapInfo, error) { // Returns an error if the key doesn't exist, see ErrKeyNotExist. func (m *Map) Lookup(key, valueOut interface{}) error { valuePtr, valueBytes := makeBuffer(valueOut, m.fullValueSize) - if err := m.lookup(key, valuePtr); err != nil { + if err := m.lookup(key, valuePtr, 0); err != nil { return err } return m.unmarshalValue(valueOut, valueBytes) } -// LookupAndDelete retrieves and deletes a value from a Map. +// LookupWithFlags retrieves a value from a Map with flags. // -// Returns ErrKeyNotExist if the key doesn't exist. -func (m *Map) LookupAndDelete(key, valueOut interface{}) error { +// Passing LookupLock flag will look up the value of a spin-locked +// map without returning the lock. This must be specified if the +// elements contain a spinlock. +// +// Calls Close() on valueOut if it is of type **Map or **Program, +// and *valueOut is not nil. +// +// Returns an error if the key doesn't exist, see ErrKeyNotExist. +func (m *Map) LookupWithFlags(key, valueOut interface{}, flags MapLookupFlags) error { valuePtr, valueBytes := makeBuffer(valueOut, m.fullValueSize) - - keyPtr, err := m.marshalKey(key) - if err != nil { - return fmt.Errorf("can't marshal key: %w", err) - } - - if err := bpfMapLookupAndDelete(m.fd, keyPtr, valuePtr); err != nil { - return fmt.Errorf("lookup and delete failed: %w", err) + if err := m.lookup(key, valuePtr, flags); err != nil { + return err } return m.unmarshalValue(valueOut, valueBytes) } +// LookupAndDelete retrieves and deletes a value from a Map. +// +// Returns ErrKeyNotExist if the key doesn't exist. +func (m *Map) LookupAndDelete(key, valueOut interface{}) error { + return m.lookupAndDelete(key, valueOut, 0) +} + +// LookupAndDeleteWithFlags retrieves and deletes a value from a Map. +// +// Passing LookupLock flag will look up and delete the value of a spin-locked +// map without returning the lock. This must be specified if the elements +// contain a spinlock. +// +// Returns ErrKeyNotExist if the key doesn't exist. +func (m *Map) LookupAndDeleteWithFlags(key, valueOut interface{}, flags MapLookupFlags) error { + return m.lookupAndDelete(key, valueOut, flags) +} + // LookupBytes gets a value from Map. // // Returns a nil value if a key doesn't exist. func (m *Map) LookupBytes(key interface{}) ([]byte, error) { valueBytes := make([]byte, m.fullValueSize) - valuePtr := internal.NewSlicePointer(valueBytes) + valuePtr := sys.NewSlicePointer(valueBytes) - err := m.lookup(key, valuePtr) + err := m.lookup(key, valuePtr, 0) if errors.Is(err, ErrKeyNotExist) { return nil, nil } @@ -530,18 +609,47 @@ func (m *Map) LookupBytes(key interface{}) ([]byte, error) { return valueBytes, err } -func (m *Map) lookup(key interface{}, valueOut internal.Pointer) error { +func (m *Map) lookup(key interface{}, valueOut sys.Pointer, flags MapLookupFlags) error { keyPtr, err := m.marshalKey(key) if err != nil { return fmt.Errorf("can't marshal key: %w", err) } - if err = bpfMapLookupElem(m.fd, keyPtr, valueOut); err != nil { - return fmt.Errorf("lookup failed: %w", err) + attr := sys.MapLookupElemAttr{ + MapFd: m.fd.Uint(), + Key: keyPtr, + Value: valueOut, + Flags: uint64(flags), + } + + if err = sys.MapLookupElem(&attr); err != nil { + return fmt.Errorf("lookup: %w", wrapMapError(err)) } return nil } +func (m *Map) lookupAndDelete(key, valueOut interface{}, flags MapLookupFlags) error { + valuePtr, valueBytes := makeBuffer(valueOut, m.fullValueSize) + + keyPtr, err := m.marshalKey(key) + if err != nil { + return fmt.Errorf("can't marshal key: %w", err) + } + + attr := sys.MapLookupAndDeleteElemAttr{ + MapFd: m.fd.Uint(), + Key: keyPtr, + Value: valuePtr, + Flags: uint64(flags), + } + + if err := sys.MapLookupAndDeleteElem(&attr); err != nil { + return fmt.Errorf("lookup and delete: %w", wrapMapError(err)) + } + + return m.unmarshalValue(valueOut, valueBytes) +} + // MapUpdateFlags controls the behaviour of the Map.Update call. // // The exact semantics depend on the specific MapType. @@ -554,6 +662,8 @@ const ( UpdateNoExist MapUpdateFlags = 1 << (iota - 1) // UpdateExist updates an existing element. UpdateExist + // UpdateLock updates elements under bpf_spin_lock. + UpdateLock ) // Put replaces or creates a value in map. @@ -575,8 +685,15 @@ func (m *Map) Update(key, value interface{}, flags MapUpdateFlags) error { return fmt.Errorf("can't marshal value: %w", err) } - if err = bpfMapUpdateElem(m.fd, keyPtr, valuePtr, uint64(flags)); err != nil { - return fmt.Errorf("update failed: %w", err) + attr := sys.MapUpdateElemAttr{ + MapFd: m.fd.Uint(), + Key: keyPtr, + Value: valuePtr, + Flags: uint64(flags), + } + + if err = sys.MapUpdateElem(&attr); err != nil { + return fmt.Errorf("update: %w", wrapMapError(err)) } return nil @@ -591,8 +708,13 @@ func (m *Map) Delete(key interface{}) error { return fmt.Errorf("can't marshal key: %w", err) } - if err = bpfMapDeleteElem(m.fd, keyPtr); err != nil { - return fmt.Errorf("delete failed: %w", err) + attr := sys.MapDeleteElemAttr{ + MapFd: m.fd.Uint(), + Key: keyPtr, + } + + if err = sys.MapDeleteElem(&attr); err != nil { + return fmt.Errorf("delete: %w", wrapMapError(err)) } return nil } @@ -624,7 +746,7 @@ func (m *Map) NextKey(key, nextKeyOut interface{}) error { // Returns nil if there are no more keys. func (m *Map) NextKeyBytes(key interface{}) ([]byte, error) { nextKey := make([]byte, m.keySize) - nextKeyPtr := internal.NewSlicePointer(nextKey) + nextKeyPtr := sys.NewSlicePointer(nextKey) err := m.nextKey(key, nextKeyPtr) if errors.Is(err, ErrKeyNotExist) { @@ -634,9 +756,9 @@ func (m *Map) NextKeyBytes(key interface{}) ([]byte, error) { return nextKey, err } -func (m *Map) nextKey(key interface{}, nextKeyOut internal.Pointer) error { +func (m *Map) nextKey(key interface{}, nextKeyOut sys.Pointer) error { var ( - keyPtr internal.Pointer + keyPtr sys.Pointer err error ) @@ -647,12 +769,77 @@ func (m *Map) nextKey(key interface{}, nextKeyOut internal.Pointer) error { } } - if err = bpfMapGetNextKey(m.fd, keyPtr, nextKeyOut); err != nil { - return fmt.Errorf("next key failed: %w", err) + attr := sys.MapGetNextKeyAttr{ + MapFd: m.fd.Uint(), + Key: keyPtr, + NextKey: nextKeyOut, + } + + if err = sys.MapGetNextKey(&attr); err != nil { + // Kernels 4.4.131 and earlier return EFAULT instead of a pointer to the + // first map element when a nil key pointer is specified. + if key == nil && errors.Is(err, unix.EFAULT) { + var guessKey []byte + guessKey, err = m.guessNonExistentKey() + if err != nil { + return err + } + + // Retry the syscall with a valid non-existing key. + attr.Key = sys.NewSlicePointer(guessKey) + if err = sys.MapGetNextKey(&attr); err == nil { + return nil + } + } + + return fmt.Errorf("next key: %w", wrapMapError(err)) } + return nil } +// guessNonExistentKey attempts to perform a map lookup that returns ENOENT. +// This is necessary on kernels before 4.4.132, since those don't support +// iterating maps from the start by providing an invalid key pointer. +func (m *Map) guessNonExistentKey() ([]byte, error) { + // Provide an invalid value pointer to prevent a copy on the kernel side. + valuePtr := sys.NewPointer(unsafe.Pointer(^uintptr(0))) + randKey := make([]byte, int(m.keySize)) + + for i := 0; i < 4; i++ { + switch i { + // For hash maps, the 0 key is less likely to be occupied. They're often + // used for storing data related to pointers, and their access pattern is + // generally scattered across the keyspace. + case 0: + // An all-0xff key is guaranteed to be out of bounds of any array, since + // those have a fixed key size of 4 bytes. The only corner case being + // arrays with 2^32 max entries, but those are prohibitively expensive + // in many environments. + case 1: + for r := range randKey { + randKey[r] = 0xff + } + // Inspired by BCC, 0x55 is an alternating binary pattern (0101), so + // is unlikely to be taken. + case 2: + for r := range randKey { + randKey[r] = 0x55 + } + // Last ditch effort, generate a random key. + case 3: + rand.New(rand.NewSource(time.Now().UnixNano())).Read(randKey) + } + + err := m.lookup(randKey, valuePtr, 0) + if errors.Is(err, ErrKeyNotExist) { + return randKey, nil + } + } + + return nil, errors.New("couldn't find non-existing key") +} + // BatchLookup looks up many elements in a map at once. // // "keysOut" and "valuesOut" must be of type slice, a pointer @@ -664,7 +851,7 @@ func (m *Map) nextKey(key interface{}, nextKeyOut internal.Pointer) error { // the end of all possible results, even when partial results // are returned. It should be used to evaluate when lookup is "done". func (m *Map) BatchLookup(prevKey, nextKeyOut, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) { - return m.batchLookup(internal.BPF_MAP_LOOKUP_BATCH, prevKey, nextKeyOut, keysOut, valuesOut, opts) + return m.batchLookup(sys.BPF_MAP_LOOKUP_BATCH, prevKey, nextKeyOut, keysOut, valuesOut, opts) } // BatchLookupAndDelete looks up many elements in a map at once, @@ -679,10 +866,10 @@ func (m *Map) BatchLookup(prevKey, nextKeyOut, keysOut, valuesOut interface{}, o // the end of all possible results, even when partial results // are returned. It should be used to evaluate when lookup is "done". func (m *Map) BatchLookupAndDelete(prevKey, nextKeyOut, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) { - return m.batchLookup(internal.BPF_MAP_LOOKUP_AND_DELETE_BATCH, prevKey, nextKeyOut, keysOut, valuesOut, opts) + return m.batchLookup(sys.BPF_MAP_LOOKUP_AND_DELETE_BATCH, prevKey, nextKeyOut, keysOut, valuesOut, opts) } -func (m *Map) batchLookup(cmd internal.BPFCmd, startKey, nextKeyOut, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) { +func (m *Map) batchLookup(cmd sys.Cmd, startKey, nextKeyOut, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) { if err := haveBatchAPI(); err != nil { return 0, err } @@ -702,29 +889,36 @@ func (m *Map) batchLookup(cmd internal.BPFCmd, startKey, nextKeyOut, keysOut, va return 0, fmt.Errorf("keysOut and valuesOut must be the same length") } keyBuf := make([]byte, count*int(m.keySize)) - keyPtr := internal.NewSlicePointer(keyBuf) + keyPtr := sys.NewSlicePointer(keyBuf) valueBuf := make([]byte, count*int(m.fullValueSize)) - valuePtr := internal.NewSlicePointer(valueBuf) + valuePtr := sys.NewSlicePointer(valueBuf) + nextPtr, nextBuf := makeBuffer(nextKeyOut, int(m.keySize)) - var ( - startPtr internal.Pointer - err error - retErr error - ) + attr := sys.MapLookupBatchAttr{ + MapFd: m.fd.Uint(), + Keys: keyPtr, + Values: valuePtr, + Count: uint32(count), + OutBatch: nextPtr, + } + + if opts != nil { + attr.ElemFlags = opts.ElemFlags + attr.Flags = opts.Flags + } + + var err error if startKey != nil { - startPtr, err = marshalPtr(startKey, int(m.keySize)) + attr.InBatch, err = marshalPtr(startKey, int(m.keySize)) if err != nil { return 0, err } } - nextPtr, nextBuf := makeBuffer(nextKeyOut, int(m.keySize)) - ct, err := bpfMapBatch(cmd, m.fd, startPtr, nextPtr, keyPtr, valuePtr, uint32(count), opts) - if err != nil { - if !errors.Is(err, ErrKeyNotExist) { - return 0, err - } - retErr = ErrKeyNotExist + _, sysErr := sys.BPF(cmd, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) + sysErr = wrapMapError(sysErr) + if sysErr != nil && !errors.Is(sysErr, unix.ENOENT) { + return 0, sysErr } err = m.unmarshalKey(nextKeyOut, nextBuf) @@ -737,9 +931,10 @@ func (m *Map) batchLookup(cmd internal.BPFCmd, startKey, nextKeyOut, keysOut, va } err = unmarshalBytes(valuesOut, valueBuf) if err != nil { - retErr = err + return 0, err } - return int(ct), retErr + + return int(attr.Count), sysErr } // BatchUpdate updates the map with multiple keys and values @@ -763,7 +958,7 @@ func (m *Map) BatchUpdate(keys, values interface{}, opts *BatchOptions) (int, er } var ( count = keysValue.Len() - valuePtr internal.Pointer + valuePtr sys.Pointer err error ) if count != valuesValue.Len() { @@ -777,9 +972,24 @@ func (m *Map) BatchUpdate(keys, values interface{}, opts *BatchOptions) (int, er if err != nil { return 0, err } - var nilPtr internal.Pointer - ct, err := bpfMapBatch(internal.BPF_MAP_UPDATE_BATCH, m.fd, nilPtr, nilPtr, keyPtr, valuePtr, uint32(count), opts) - return int(ct), err + + attr := sys.MapUpdateBatchAttr{ + MapFd: m.fd.Uint(), + Keys: keyPtr, + Values: valuePtr, + Count: uint32(count), + } + if opts != nil { + attr.ElemFlags = opts.ElemFlags + attr.Flags = opts.Flags + } + + err = sys.MapUpdateBatch(&attr) + if err != nil { + return int(attr.Count), fmt.Errorf("batch update: %w", wrapMapError(err)) + } + + return int(attr.Count), nil } // BatchDelete batch deletes entries in the map by keys. @@ -800,9 +1010,23 @@ func (m *Map) BatchDelete(keys interface{}, opts *BatchOptions) (int, error) { if err != nil { return 0, fmt.Errorf("cannot marshal keys: %v", err) } - var nilPtr internal.Pointer - ct, err := bpfMapBatch(internal.BPF_MAP_DELETE_BATCH, m.fd, nilPtr, nilPtr, keyPtr, nilPtr, uint32(count), opts) - return int(ct), err + + attr := sys.MapDeleteBatchAttr{ + MapFd: m.fd.Uint(), + Keys: keyPtr, + Count: uint32(count), + } + + if opts != nil { + attr.ElemFlags = opts.ElemFlags + attr.Flags = opts.Flags + } + + if err = sys.MapDeleteBatch(&attr); err != nil { + return int(attr.Count), fmt.Errorf("batch delete: %w", wrapMapError(err)) + } + + return int(attr.Count), nil } // Iterate traverses a map. @@ -815,7 +1039,8 @@ func (m *Map) Iterate() *MapIterator { return newMapIterator(m) } -// Close removes a Map +// Close the Map's underlying file descriptor, which could unload the +// Map from the kernel if it is not pinned or in use by a loaded Program. func (m *Map) Close() error { if m == nil { // This makes it easier to clean up when iterating maps @@ -830,14 +1055,7 @@ func (m *Map) Close() error { // // Calling this function is invalid after Close has been called. func (m *Map) FD() int { - fd, err := m.fd.Value() - if err != nil { - // Best effort: -1 is the number most likely to be an - // invalid file descriptor. - return -1 - } - - return int(fd) + return m.fd.Int() } // Clone creates a duplicate of the Map. @@ -912,7 +1130,11 @@ func (m *Map) Freeze() error { return fmt.Errorf("can't freeze map: %w", err) } - if err := bpfMapFreeze(m.fd); err != nil { + attr := sys.MapFreezeAttr{ + MapFd: m.fd.Uint(), + } + + if err := sys.MapFreeze(&attr); err != nil { return fmt.Errorf("can't freeze map: %w", err) } return nil @@ -936,13 +1158,13 @@ func (m *Map) finalize(spec *MapSpec) error { return nil } -func (m *Map) marshalKey(data interface{}) (internal.Pointer, error) { +func (m *Map) marshalKey(data interface{}) (sys.Pointer, error) { if data == nil { if m.keySize == 0 { // Queues have a key length of zero, so passing nil here is valid. - return internal.NewPointer(nil), nil + return sys.NewPointer(nil), nil } - return internal.Pointer{}, errors.New("can't use nil as key of map") + return sys.Pointer{}, errors.New("can't use nil as key of map") } return marshalPtr(data, int(m.keySize)) @@ -957,7 +1179,7 @@ func (m *Map) unmarshalKey(data interface{}, buf []byte) error { return unmarshalBytes(data, buf) } -func (m *Map) marshalValue(data interface{}) (internal.Pointer, error) { +func (m *Map) marshalValue(data interface{}) (sys.Pointer, error) { if m.typ.hasPerCPUValue() { return marshalPerCPUValue(data, int(m.valueSize)) } @@ -970,13 +1192,13 @@ func (m *Map) marshalValue(data interface{}) (internal.Pointer, error) { switch value := data.(type) { case *Map: if !m.typ.canStoreMap() { - return internal.Pointer{}, fmt.Errorf("can't store map in %s", m.typ) + return sys.Pointer{}, fmt.Errorf("can't store map in %s", m.typ) } buf, err = marshalMap(value, int(m.valueSize)) case *Program: if !m.typ.canStoreProgram() { - return internal.Pointer{}, fmt.Errorf("can't store program in %s", m.typ) + return sys.Pointer{}, fmt.Errorf("can't store program in %s", m.typ) } buf, err = marshalProgram(value, int(m.valueSize)) @@ -985,10 +1207,10 @@ func (m *Map) marshalValue(data interface{}) (internal.Pointer, error) { } if err != nil { - return internal.Pointer{}, err + return sys.Pointer{}, err } - return internal.NewSlicePointer(buf), nil + return sys.NewSlicePointer(buf), nil } func (m *Map) unmarshalValue(value interface{}, buf []byte) error { @@ -1052,7 +1274,10 @@ func (m *Map) unmarshalValue(value interface{}, buf []byte) error { // LoadPinnedMap loads a Map from a BPF file. func LoadPinnedMap(fileName string, opts *LoadPinOptions) (*Map, error) { - fd, err := internal.BPFObjGet(fileName, opts.Marshal()) + fd, err := sys.ObjGet(&sys.ObjGetAttr{ + Pathname: sys.NewStringPointer(fileName), + FileFlags: opts.Marshal(), + }) if err != nil { return nil, err } @@ -1081,70 +1306,11 @@ func marshalMap(m *Map, length int) ([]byte, error) { return nil, fmt.Errorf("can't marshal map to %d bytes", length) } - fd, err := m.fd.Value() - if err != nil { - return nil, err - } - buf := make([]byte, 4) - internal.NativeEndian.PutUint32(buf, fd) + internal.NativeEndian.PutUint32(buf, m.fd.Uint()) return buf, nil } -func patchValue(value []byte, typ btf.Type, replacements map[string]interface{}) error { - replaced := make(map[string]bool) - replace := func(name string, offset, size int, replacement interface{}) error { - if offset+size > len(value) { - return fmt.Errorf("%s: offset %d(+%d) is out of bounds", name, offset, size) - } - - buf, err := marshalBytes(replacement, size) - if err != nil { - return fmt.Errorf("marshal %s: %w", name, err) - } - - copy(value[offset:offset+size], buf) - replaced[name] = true - return nil - } - - switch parent := typ.(type) { - case *btf.Datasec: - for _, secinfo := range parent.Vars { - name := string(secinfo.Type.(*btf.Var).Name) - replacement, ok := replacements[name] - if !ok { - continue - } - - err := replace(name, int(secinfo.Offset), int(secinfo.Size), replacement) - if err != nil { - return err - } - } - - default: - return fmt.Errorf("patching %T is not supported", typ) - } - - if len(replaced) == len(replacements) { - return nil - } - - var missing []string - for name := range replacements { - if !replaced[name] { - missing = append(missing, name) - } - } - - if len(missing) == 1 { - return fmt.Errorf("unknown field: %s", missing[0]) - } - - return fmt.Errorf("unknown fields: %s", strings.Join(missing, ",")) -} - // MapIterator iterates a Map. // // See Map.Iterate. @@ -1239,29 +1405,20 @@ func (mi *MapIterator) Err() error { // // Returns ErrNotExist, if there is no next eBPF map. func MapGetNextID(startID MapID) (MapID, error) { - id, err := objGetNextID(internal.BPF_MAP_GET_NEXT_ID, uint32(startID)) - return MapID(id), err + attr := &sys.MapGetNextIdAttr{Id: uint32(startID)} + return MapID(attr.NextId), sys.MapGetNextId(attr) } // NewMapFromID returns the map for a given id. // // Returns ErrNotExist, if there is no eBPF map with the given id. func NewMapFromID(id MapID) (*Map, error) { - fd, err := internal.BPFObjGetFDByID(internal.BPF_MAP_GET_FD_BY_ID, uint32(id)) + fd, err := sys.MapGetFdById(&sys.MapGetFdByIdAttr{ + Id: uint32(id), + }) if err != nil { return nil, err } return newMapFromFD(fd) } - -// ID returns the systemwide unique ID of the map. -// -// Deprecated: use MapInfo.ID() instead. -func (m *Map) ID() (MapID, error) { - info, err := bpfGetMapInfoByFD(m.fd) - if err != nil { - return MapID(0), err - } - return MapID(info.id), nil -} diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/marshalers.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/marshalers.go index e461d673d701..544d17f35e1a 100644 --- a/cluster-autoscaler/vendor/github.com/cilium/ebpf/marshalers.go +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/marshalers.go @@ -12,6 +12,7 @@ import ( "unsafe" "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" ) // marshalPtr converts an arbitrary value into a pointer suitable @@ -19,17 +20,17 @@ import ( // // As an optimization, it returns the original value if it is an // unsafe.Pointer. -func marshalPtr(data interface{}, length int) (internal.Pointer, error) { +func marshalPtr(data interface{}, length int) (sys.Pointer, error) { if ptr, ok := data.(unsafe.Pointer); ok { - return internal.NewPointer(ptr), nil + return sys.NewPointer(ptr), nil } buf, err := marshalBytes(data, length) if err != nil { - return internal.Pointer{}, err + return sys.Pointer{}, err } - return internal.NewSlicePointer(buf), nil + return sys.NewSlicePointer(buf), nil } // marshalBytes converts an arbitrary value into a byte buffer. @@ -73,13 +74,13 @@ func marshalBytes(data interface{}, length int) (buf []byte, err error) { return buf, nil } -func makeBuffer(dst interface{}, length int) (internal.Pointer, []byte) { +func makeBuffer(dst interface{}, length int) (sys.Pointer, []byte) { if ptr, ok := dst.(unsafe.Pointer); ok { - return internal.NewPointer(ptr), nil + return sys.NewPointer(ptr), nil } buf := make([]byte, length) - return internal.NewSlicePointer(buf), buf + return sys.NewSlicePointer(buf), buf } var bytesReaderPool = sync.Pool{ @@ -98,14 +99,7 @@ var bytesReaderPool = sync.Pool{ func unmarshalBytes(data interface{}, buf []byte) error { switch value := data.(type) { case unsafe.Pointer: - var dst []byte - // Use unsafe.Slice when we drop support for pre1.17 (https://github.com/golang/go/issues/19367) - // We could opt for removing unsafe.Pointer support in the lib as well - sh := (*reflect.SliceHeader)(unsafe.Pointer(&dst)) - sh.Data = uintptr(value) - sh.Len = len(buf) - sh.Cap = len(buf) - + dst := unsafe.Slice((*byte)(value), len(buf)) copy(dst, buf) runtime.KeepAlive(value) return nil @@ -164,21 +158,21 @@ func unmarshalBytes(data interface{}, buf []byte) error { // Values are initialized to zero if the slice has less elements than CPUs. // // slice must have a type like []elementType. -func marshalPerCPUValue(slice interface{}, elemLength int) (internal.Pointer, error) { +func marshalPerCPUValue(slice interface{}, elemLength int) (sys.Pointer, error) { sliceType := reflect.TypeOf(slice) if sliceType.Kind() != reflect.Slice { - return internal.Pointer{}, errors.New("per-CPU value requires slice") + return sys.Pointer{}, errors.New("per-CPU value requires slice") } possibleCPUs, err := internal.PossibleCPUs() if err != nil { - return internal.Pointer{}, err + return sys.Pointer{}, err } sliceValue := reflect.ValueOf(slice) sliceLen := sliceValue.Len() if sliceLen > possibleCPUs { - return internal.Pointer{}, fmt.Errorf("per-CPU value exceeds number of CPUs") + return sys.Pointer{}, fmt.Errorf("per-CPU value exceeds number of CPUs") } alignedElemLength := internal.Align(elemLength, 8) @@ -188,14 +182,14 @@ func marshalPerCPUValue(slice interface{}, elemLength int) (internal.Pointer, er elem := sliceValue.Index(i).Interface() elemBytes, err := marshalBytes(elem, elemLength) if err != nil { - return internal.Pointer{}, err + return sys.Pointer{}, err } offset := i * alignedElemLength copy(buf[offset:offset+elemLength], elemBytes) } - return internal.NewSlicePointer(buf), nil + return sys.NewSlicePointer(buf), nil } // unmarshalPerCPUValue decodes a buffer into a slice containing one value per diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/prog.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/prog.go index 3549a3fe3f0c..675edc711d74 100644 --- a/cluster-autoscaler/vendor/github.com/cilium/ebpf/prog.go +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/prog.go @@ -5,23 +5,22 @@ import ( "encoding/binary" "errors" "fmt" - "io" "math" "path/filepath" + "runtime" "strings" "time" "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/btf" "github.com/cilium/ebpf/internal" - "github.com/cilium/ebpf/internal/btf" + "github.com/cilium/ebpf/internal/sys" "github.com/cilium/ebpf/internal/unix" ) // ErrNotSupported is returned whenever the kernel doesn't support a feature. var ErrNotSupported = internal.ErrNotSupported -var errUnsatisfiedReference = errors.New("unsatisfied reference") - // ProgramID represents the unique ID of an eBPF program. type ProgramID uint32 @@ -44,12 +43,13 @@ type ProgramOptions struct { // Controls the output buffer size for the verifier. Defaults to // DefaultVerifierLogSize. LogSize int - // An ELF containing the target BTF for this program. It is used both to - // find the correct function to trace and to apply CO-RE relocations. + // Type information used for CO-RE relocations and when attaching to + // kernel functions. + // // This is useful in environments where the kernel BTF is not available // (containers) or where it is in a non-standard location. Defaults to - // use the kernel BTF from a well-known location. - TargetBTF io.ReaderAt + // use the kernel BTF from a well-known location if nil. + KernelTypes *btf.Spec } // ProgramSpec defines a Program. @@ -59,13 +59,24 @@ type ProgramSpec struct { Name string // Type determines at which hook in the kernel a program will run. - Type ProgramType + Type ProgramType + + // AttachType of the program, needed to differentiate allowed context + // accesses in some newer program types like CGroupSockAddr. + // + // Available on kernels 4.17 and later. AttachType AttachType + // Name of a kernel data structure or function to attach to. Its // interpretation depends on Type and AttachType. AttachTo string + // The program to attach to. Must be provided manually. AttachTarget *Program + + // The name of the ELF section this program orininated from. + SectionName string + Instructions asm.Instructions // Flags is passed to the kernel and specifies additional program @@ -87,7 +98,7 @@ type ProgramSpec struct { // The BTF associated with this program. Changing Instructions // will most likely invalidate the contained data, and may // result in errors when attempting to load it into the kernel. - BTF *btf.Program + BTF *btf.Spec // The byte order this program was compiled for, may be nil. ByteOrder binary.ByteOrder @@ -112,6 +123,8 @@ func (ps *ProgramSpec) Tag() (string, error) { return ps.Instructions.Tag(internal.NativeEndian) } +type VerifierError = internal.VerifierError + // Program represents BPF program loaded into the kernel. // // It is not safe to close a Program which is used by other goroutines. @@ -120,7 +133,7 @@ type Program struct { // otherwise it is empty. VerifierLog string - fd *internal.FD + fd *sys.FD name string pinnedPath string typ ProgramType @@ -128,8 +141,7 @@ type Program struct { // NewProgram creates a new Program. // -// Loading a program for the first time will perform -// feature detection by loading small, temporary programs. +// See NewProgramWithOptions for details. func NewProgram(spec *ProgramSpec) (*Program, error) { return NewProgramWithOptions(spec, ProgramOptions{}) } @@ -138,12 +150,19 @@ func NewProgram(spec *ProgramSpec) (*Program, error) { // // Loading a program for the first time will perform // feature detection by loading small, temporary programs. +// +// Returns an error wrapping VerifierError if the program or its BTF is rejected +// by the kernel. func NewProgramWithOptions(spec *ProgramSpec, opts ProgramOptions) (*Program, error) { + if spec == nil { + return nil, errors.New("can't load a program from a nil spec") + } + handles := newHandleCache() defer handles.close() prog, err := newProgramWithOptions(spec, opts, handles) - if errors.Is(err, errUnsatisfiedReference) { + if errors.Is(err, asm.ErrUnsatisfiedMapReference) { return nil, fmt.Errorf("cannot load program without loading its whole collection: %w", err) } return prog, err @@ -154,6 +173,10 @@ func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions, handles *hand return nil, errors.New("instructions cannot be empty") } + if spec.Type == UnspecifiedProgram { + return nil, errors.New("can't load program of unspecified type") + } + if spec.ByteOrder != nil && spec.ByteOrder != internal.NativeEndian { return nil, fmt.Errorf("can't load %s program on %s", spec.ByteOrder, internal.NativeEndian) } @@ -171,114 +194,85 @@ func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions, handles *hand kv = v.Kernel() } - attr := &internal.BPFProgLoadAttr{ - ProgType: uint32(spec.Type), + attr := &sys.ProgLoadAttr{ + ProgType: sys.ProgType(spec.Type), ProgFlags: spec.Flags, - ExpectedAttachType: uint32(spec.AttachType), - License: internal.NewStringPointer(spec.License), - KernelVersion: kv, + ExpectedAttachType: sys.AttachType(spec.AttachType), + License: sys.NewStringPointer(spec.License), + KernVersion: kv, } if haveObjName() == nil { - attr.ProgName = internal.NewBPFObjName(spec.Name) + attr.ProgName = sys.NewObjName(spec.Name) } - var err error - var targetBTF *btf.Spec - if opts.TargetBTF != nil { - targetBTF, err = handles.btfSpec(opts.TargetBTF) - if err != nil { - return nil, fmt.Errorf("load target BTF: %w", err) - } - } + kernelTypes := opts.KernelTypes + + insns := make(asm.Instructions, len(spec.Instructions)) + copy(insns, spec.Instructions) var btfDisabled bool - var core btf.COREFixups if spec.BTF != nil { - core, err = spec.BTF.Fixups(targetBTF) - if err != nil { - return nil, fmt.Errorf("CO-RE relocations: %w", err) + if err := applyRelocations(insns, spec.BTF, kernelTypes); err != nil { + return nil, fmt.Errorf("apply CO-RE relocations: %w", err) } - handle, err := handles.btfHandle(spec.BTF.Spec()) + handle, err := handles.btfHandle(spec.BTF) btfDisabled = errors.Is(err, btf.ErrNotSupported) if err != nil && !btfDisabled { return nil, fmt.Errorf("load BTF: %w", err) } if handle != nil { - attr.ProgBTFFd = uint32(handle.FD()) + attr.ProgBtfFd = uint32(handle.FD()) - recSize, bytes, err := spec.BTF.LineInfos() + fib, lib, err := btf.MarshalExtInfos(insns, spec.BTF.TypeID) if err != nil { - return nil, fmt.Errorf("get BTF line infos: %w", err) + return nil, err } - attr.LineInfoRecSize = recSize - attr.LineInfoCnt = uint32(uint64(len(bytes)) / uint64(recSize)) - attr.LineInfo = internal.NewSlicePointer(bytes) - recSize, bytes, err = spec.BTF.FuncInfos() - if err != nil { - return nil, fmt.Errorf("get BTF function infos: %w", err) - } - attr.FuncInfoRecSize = recSize - attr.FuncInfoCnt = uint32(uint64(len(bytes)) / uint64(recSize)) - attr.FuncInfo = internal.NewSlicePointer(bytes) - } - } + attr.FuncInfoRecSize = btf.FuncInfoSize + attr.FuncInfoCnt = uint32(len(fib)) / btf.FuncInfoSize + attr.FuncInfo = sys.NewSlicePointer(fib) - insns, err := core.Apply(spec.Instructions) - if err != nil { - return nil, fmt.Errorf("CO-RE fixup: %w", err) + attr.LineInfoRecSize = btf.LineInfoSize + attr.LineInfoCnt = uint32(len(lib)) / btf.LineInfoSize + attr.LineInfo = sys.NewSlicePointer(lib) + } } - if err := fixupJumpsAndCalls(insns); err != nil { + if err := fixupAndValidate(insns); err != nil { return nil, err } - buf := bytes.NewBuffer(make([]byte, 0, len(spec.Instructions)*asm.InstructionSize)) - err = insns.Marshal(buf, internal.NativeEndian) + buf := bytes.NewBuffer(make([]byte, 0, insns.Size())) + err := insns.Marshal(buf, internal.NativeEndian) if err != nil { return nil, err } bytecode := buf.Bytes() - attr.Instructions = internal.NewSlicePointer(bytecode) - attr.InsCount = uint32(len(bytecode) / asm.InstructionSize) - - if spec.AttachTo != "" { - if spec.AttachTarget != nil { - info, err := spec.AttachTarget.Info() - if err != nil { - return nil, fmt.Errorf("load target BTF: %w", err) - } - - btfID, ok := info.BTFID() - if !ok { - return nil, fmt.Errorf("load target BTF: no BTF info available") - } - btfHandle, err := btf.NewHandleFromID(btfID) - if err != nil { - return nil, fmt.Errorf("load target BTF: %w", err) - } - defer btfHandle.Close() - - targetBTF = btfHandle.Spec() - if err != nil { - return nil, fmt.Errorf("load target BTF: %w", err) - } - } + attr.Insns = sys.NewSlicePointer(bytecode) + attr.InsnCnt = uint32(len(bytecode) / asm.InstructionSize) - target, err := resolveBTFType(targetBTF, spec.AttachTo, spec.Type, spec.AttachType) + if spec.AttachTarget != nil { + targetID, err := findTargetInProgram(spec.AttachTarget, spec.AttachTo, spec.Type, spec.AttachType) if err != nil { - return nil, err + return nil, fmt.Errorf("attach %s/%s: %w", spec.Type, spec.AttachType, err) } - if target != nil { - attr.AttachBTFID = uint32(target.ID()) - } - if spec.AttachTarget != nil { - attr.AttachProgFd = uint32(spec.AttachTarget.FD()) + + attr.AttachBtfId = uint32(targetID) + attr.AttachProgFd = uint32(spec.AttachTarget.FD()) + defer runtime.KeepAlive(spec.AttachTarget) + } else if spec.AttachTo != "" { + targetID, err := findTargetInKernel(kernelTypes, spec.AttachTo, spec.Type, spec.AttachType) + if err != nil && !errors.Is(err, errUnrecognizedAttachType) { + // We ignore errUnrecognizedAttachType since AttachTo may be non-empty + // for programs that don't attach anywhere. + return nil, fmt.Errorf("attach %s/%s: %w", spec.Type, spec.AttachType, err) } + + attr.AttachBtfId = uint32(targetID) } logSize := DefaultVerifierLogSize @@ -291,37 +285,44 @@ func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions, handles *hand logBuf = make([]byte, logSize) attr.LogLevel = opts.LogLevel attr.LogSize = uint32(len(logBuf)) - attr.LogBuf = internal.NewSlicePointer(logBuf) + attr.LogBuf = sys.NewSlicePointer(logBuf) } - fd, err := internal.BPFProgLoad(attr) + fd, err := sys.ProgLoad(attr) if err == nil { - return &Program{internal.CString(logBuf), fd, spec.Name, "", spec.Type}, nil + return &Program{unix.ByteSliceToString(logBuf), fd, spec.Name, "", spec.Type}, nil } - logErr := err if opts.LogLevel == 0 && opts.LogSize >= 0 { // Re-run with the verifier enabled to get better error messages. logBuf = make([]byte, logSize) attr.LogLevel = 1 attr.LogSize = uint32(len(logBuf)) - attr.LogBuf = internal.NewSlicePointer(logBuf) + attr.LogBuf = sys.NewSlicePointer(logBuf) + _, _ = sys.ProgLoad(attr) + } - fd, logErr = internal.BPFProgLoad(attr) - if logErr == nil { - fd.Close() + switch { + case errors.Is(err, unix.EPERM): + if len(logBuf) > 0 && logBuf[0] == 0 { + // EPERM due to RLIMIT_MEMLOCK happens before the verifier, so we can + // check that the log is empty to reduce false positives. + return nil, fmt.Errorf("load program: %w (MEMLOCK may be too low, consider rlimit.RemoveMemlock)", err) } - } - if errors.Is(logErr, unix.EPERM) && logBuf[0] == 0 { - // EPERM due to RLIMIT_MEMLOCK happens before the verifier, so we can - // check that the log is empty to reduce false positives. - return nil, fmt.Errorf("load program: %w (MEMLOCK bay be too low, consider rlimit.RemoveMemlock)", logErr) + fallthrough + + case errors.Is(err, unix.EINVAL): + if hasFunctionReferences(spec.Instructions) { + if err := haveBPFToBPFCalls(); err != nil { + return nil, fmt.Errorf("load program: %w", err) + } + } } - err = internal.ErrorWithLog(err, logBuf, logErr) + err = internal.ErrorWithLog(err, logBuf) if btfDisabled { - return nil, fmt.Errorf("load program without BTF: %w", err) + return nil, fmt.Errorf("load program: %w (BTF disabled)", err) } return nil, fmt.Errorf("load program: %w", err) } @@ -332,18 +333,21 @@ func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions, handles *hand // // Requires at least Linux 4.10. func NewProgramFromFD(fd int) (*Program, error) { - if fd < 0 { - return nil, errors.New("invalid fd") + f, err := sys.NewFD(fd) + if err != nil { + return nil, err } - return newProgramFromFD(internal.NewFD(uint32(fd))) + return newProgramFromFD(f) } // NewProgramFromID returns the program for a given id. // // Returns ErrNotExist, if there is no eBPF program with the given id. func NewProgramFromID(id ProgramID) (*Program, error) { - fd, err := internal.BPFObjGetFDByID(internal.BPF_PROG_GET_FD_BY_ID, uint32(id)) + fd, err := sys.ProgGetFdById(&sys.ProgGetFdByIdAttr{ + Id: uint32(id), + }) if err != nil { return nil, fmt.Errorf("get program by id: %w", err) } @@ -351,7 +355,7 @@ func NewProgramFromID(id ProgramID) (*Program, error) { return newProgramFromFD(fd) } -func newProgramFromFD(fd *internal.FD) (*Program, error) { +func newProgramFromFD(fd *sys.FD) (*Program, error) { info, err := newProgramInfoFromFd(fd) if err != nil { fd.Close() @@ -380,18 +384,29 @@ func (p *Program) Info() (*ProgramInfo, error) { return newProgramInfoFromFd(p.fd) } -// FD gets the file descriptor of the Program. +// Handle returns a reference to the program's type information in the kernel. // -// It is invalid to call this function after Close has been called. -func (p *Program) FD() int { - fd, err := p.fd.Value() +// Returns ErrNotSupported if the kernel has no BTF support, or if there is no +// BTF associated with the program. +func (p *Program) Handle() (*btf.Handle, error) { + info, err := p.Info() if err != nil { - // Best effort: -1 is the number most likely to be an - // invalid file descriptor. - return -1 + return nil, err + } + + id, ok := info.BTFID() + if !ok { + return nil, fmt.Errorf("program %s: retrieve BTF ID: %w", p, ErrNotSupported) } - return int(fd) + return btf.NewHandleFromID(id) +} + +// FD gets the file descriptor of the Program. +// +// It is invalid to call this function after Close has been called. +func (p *Program) FD() int { + return p.fd.Int() } // Clone creates a duplicate of the Program. @@ -445,7 +460,9 @@ func (p *Program) IsPinned() bool { return p.pinnedPath != "" } -// Close unloads the program from the kernel. +// Close the Program's underlying file descriptor, which could unload +// the program from the kernel if it is not pinned or attached to a +// kernel hook. func (p *Program) Close() error { if p == nil { return nil @@ -454,6 +471,28 @@ func (p *Program) Close() error { return p.fd.Close() } +// Various options for Run'ing a Program +type RunOptions struct { + // Program's data input. Required field. + Data []byte + // Program's data after Program has run. Caller must allocate. Optional field. + DataOut []byte + // Program's context input. Optional field. + Context interface{} + // Program's context after Program has run. Must be a pointer or slice. Optional field. + ContextOut interface{} + // Number of times to run Program. Optional field. Defaults to 1. + Repeat uint32 + // Optional flags. + Flags uint32 + // CPU to run Program on. Optional field. + // Note not all program types support this field. + CPU uint32 + // Called whenever the syscall is interrupted, and should be set to testing.B.ResetTimer + // or similar. Typically used during benchmarking. Optional field. + Reset func() +} + // Test runs the Program in the kernel with the given input and returns the // value returned by the eBPF program. outLen may be zero. // @@ -462,11 +501,38 @@ func (p *Program) Close() error { // // This function requires at least Linux 4.12. func (p *Program) Test(in []byte) (uint32, []byte, error) { - ret, out, _, err := p.testRun(in, 1, nil) + // Older kernels ignore the dataSizeOut argument when copying to user space. + // Combined with things like bpf_xdp_adjust_head() we don't really know what the final + // size will be. Hence we allocate an output buffer which we hope will always be large + // enough, and panic if the kernel wrote past the end of the allocation. + // See https://patchwork.ozlabs.org/cover/1006822/ + var out []byte + if len(in) > 0 { + out = make([]byte, len(in)+outputPad) + } + + opts := RunOptions{ + Data: in, + DataOut: out, + Repeat: 1, + } + + ret, _, err := p.testRun(&opts) if err != nil { return ret, nil, fmt.Errorf("can't test program: %w", err) } - return ret, out, nil + return ret, opts.DataOut, nil +} + +// Run runs the Program in kernel with given RunOptions. +// +// Note: the same restrictions from Test apply. +func (p *Program) Run(opts *RunOptions) (uint32, error) { + ret, _, err := p.testRun(opts) + if err != nil { + return ret, fmt.Errorf("can't test program: %w", err) + } + return ret, nil } // Benchmark runs the Program with the given input for a number of times @@ -481,7 +547,17 @@ func (p *Program) Test(in []byte) (uint32, []byte, error) { // // This function requires at least Linux 4.12. func (p *Program) Benchmark(in []byte, repeat int, reset func()) (uint32, time.Duration, error) { - ret, _, total, err := p.testRun(in, repeat, reset) + if uint(repeat) > math.MaxUint32 { + return 0, 0, fmt.Errorf("repeat is too high") + } + + opts := RunOptions{ + Data: in, + Repeat: uint32(repeat), + Reset: reset, + } + + ret, total, err := p.testRun(&opts) if err != nil { return ret, total, fmt.Errorf("can't benchmark program: %w", err) } @@ -490,6 +566,7 @@ func (p *Program) Benchmark(in []byte, repeat int, reset func()) (uint32, time.D var haveProgTestRun = internal.FeatureTest("BPF_PROG_TEST_RUN", "4.12", func() error { prog, err := NewProgram(&ProgramSpec{ + // SocketFilter does not require privileges on newer kernels. Type: SocketFilter, Instructions: asm.Instructions{ asm.LoadImm(asm.R0, 0, asm.DWord), @@ -505,88 +582,109 @@ var haveProgTestRun = internal.FeatureTest("BPF_PROG_TEST_RUN", "4.12", func() e // Programs require at least 14 bytes input in := make([]byte, 14) - attr := bpfProgTestRunAttr{ - fd: uint32(prog.FD()), - dataSizeIn: uint32(len(in)), - dataIn: internal.NewSlicePointer(in), + attr := sys.ProgRunAttr{ + ProgFd: uint32(prog.FD()), + DataSizeIn: uint32(len(in)), + DataIn: sys.NewSlicePointer(in), } - err = bpfProgTestRun(&attr) - if errors.Is(err, unix.EINVAL) { + err = sys.ProgRun(&attr) + switch { + case errors.Is(err, unix.EINVAL): // Check for EINVAL specifically, rather than err != nil since we // otherwise misdetect due to insufficient permissions. return internal.ErrNotSupported - } - if errors.Is(err, unix.EINTR) { + + case errors.Is(err, unix.EINTR): // We know that PROG_TEST_RUN is supported if we get EINTR. return nil - } - return err -}) -func (p *Program) testRun(in []byte, repeat int, reset func()) (uint32, []byte, time.Duration, error) { - if uint(repeat) > math.MaxUint32 { - return 0, nil, 0, fmt.Errorf("repeat is too high") + case errors.Is(err, unix.ENOTSUPP): + // The first PROG_TEST_RUN patches shipped in 4.12 didn't include + // a test runner for SocketFilter. ENOTSUPP means PROG_TEST_RUN is + // supported, but not for the program type used in the probe. + return nil } - if len(in) == 0 { - return 0, nil, 0, fmt.Errorf("missing input") - } + return err +}) - if uint(len(in)) > math.MaxUint32 { - return 0, nil, 0, fmt.Errorf("input is too long") +func (p *Program) testRun(opts *RunOptions) (uint32, time.Duration, error) { + if uint(len(opts.Data)) > math.MaxUint32 { + return 0, 0, fmt.Errorf("input is too long") } if err := haveProgTestRun(); err != nil { - return 0, nil, 0, err + return 0, 0, err } - // Older kernels ignore the dataSizeOut argument when copying to user space. - // Combined with things like bpf_xdp_adjust_head() we don't really know what the final - // size will be. Hence we allocate an output buffer which we hope will always be large - // enough, and panic if the kernel wrote past the end of the allocation. - // See https://patchwork.ozlabs.org/cover/1006822/ - out := make([]byte, len(in)+outputPad) + var ctxBytes []byte + if opts.Context != nil { + ctx := new(bytes.Buffer) + if err := binary.Write(ctx, internal.NativeEndian, opts.Context); err != nil { + return 0, 0, fmt.Errorf("cannot serialize context: %v", err) + } + ctxBytes = ctx.Bytes() + } - fd, err := p.fd.Value() - if err != nil { - return 0, nil, 0, err + var ctxOut []byte + if opts.ContextOut != nil { + ctxOut = make([]byte, binary.Size(opts.ContextOut)) } - attr := bpfProgTestRunAttr{ - fd: fd, - dataSizeIn: uint32(len(in)), - dataSizeOut: uint32(len(out)), - dataIn: internal.NewSlicePointer(in), - dataOut: internal.NewSlicePointer(out), - repeat: uint32(repeat), + attr := sys.ProgRunAttr{ + ProgFd: p.fd.Uint(), + DataSizeIn: uint32(len(opts.Data)), + DataSizeOut: uint32(len(opts.DataOut)), + DataIn: sys.NewSlicePointer(opts.Data), + DataOut: sys.NewSlicePointer(opts.DataOut), + Repeat: uint32(opts.Repeat), + CtxSizeIn: uint32(len(ctxBytes)), + CtxSizeOut: uint32(len(ctxOut)), + CtxIn: sys.NewSlicePointer(ctxBytes), + CtxOut: sys.NewSlicePointer(ctxOut), + Flags: opts.Flags, + Cpu: opts.CPU, } for { - err = bpfProgTestRun(&attr) + err := sys.ProgRun(&attr) if err == nil { break } if errors.Is(err, unix.EINTR) { - if reset != nil { - reset() + if opts.Reset != nil { + opts.Reset() } continue } - return 0, nil, 0, fmt.Errorf("can't run test: %w", err) + if errors.Is(err, unix.ENOTSUPP) { + return 0, 0, fmt.Errorf("kernel doesn't support testing program type %s: %w", p.Type(), ErrNotSupported) + } + + return 0, 0, fmt.Errorf("can't run test: %w", err) } - if int(attr.dataSizeOut) > cap(out) { - // Houston, we have a problem. The program created more data than we allocated, - // and the kernel wrote past the end of our buffer. - panic("kernel wrote past end of output buffer") + if opts.DataOut != nil { + if int(attr.DataSizeOut) > cap(opts.DataOut) { + // Houston, we have a problem. The program created more data than we allocated, + // and the kernel wrote past the end of our buffer. + panic("kernel wrote past end of output buffer") + } + opts.DataOut = opts.DataOut[:int(attr.DataSizeOut)] } - out = out[:int(attr.dataSizeOut)] - total := time.Duration(attr.duration) * time.Nanosecond - return attr.retval, out, total, nil + if len(ctxOut) != 0 { + b := bytes.NewReader(ctxOut) + if err := binary.Read(b, internal.NativeEndian, opts.ContextOut); err != nil { + return 0, 0, fmt.Errorf("failed to decode ContextOut: %v", err) + } + } + + total := time.Duration(attr.Duration) * time.Nanosecond + return attr.Retval, total, nil } func unmarshalProgram(buf []byte) (*Program, error) { @@ -605,70 +703,19 @@ func marshalProgram(p *Program, length int) ([]byte, error) { return nil, fmt.Errorf("can't marshal program to %d bytes", length) } - value, err := p.fd.Value() - if err != nil { - return nil, err - } - buf := make([]byte, 4) - internal.NativeEndian.PutUint32(buf, value) + internal.NativeEndian.PutUint32(buf, p.fd.Uint()) return buf, nil } -// Attach a Program. -// -// Deprecated: use link.RawAttachProgram instead. -func (p *Program) Attach(fd int, typ AttachType, flags AttachFlags) error { - if fd < 0 { - return errors.New("invalid fd") - } - - pfd, err := p.fd.Value() - if err != nil { - return err - } - - attr := internal.BPFProgAttachAttr{ - TargetFd: uint32(fd), - AttachBpfFd: pfd, - AttachType: uint32(typ), - AttachFlags: uint32(flags), - } - - return internal.BPFProgAttach(&attr) -} - -// Detach a Program. -// -// Deprecated: use link.RawDetachProgram instead. -func (p *Program) Detach(fd int, typ AttachType, flags AttachFlags) error { - if fd < 0 { - return errors.New("invalid fd") - } - - if flags != 0 { - return errors.New("flags must be zero") - } - - pfd, err := p.fd.Value() - if err != nil { - return err - } - - attr := internal.BPFProgDetachAttr{ - TargetFd: uint32(fd), - AttachBpfFd: pfd, - AttachType: uint32(typ), - } - - return internal.BPFProgDetach(&attr) -} - // LoadPinnedProgram loads a Program from a BPF file. // // Requires at least Linux 4.11. func LoadPinnedProgram(fileName string, opts *LoadPinOptions) (*Program, error) { - fd, err := internal.BPFObjGet(fileName, opts.Marshal()) + fd, err := sys.ObjGet(&sys.ObjGetAttr{ + Pathname: sys.NewStringPointer(fileName), + FileFlags: opts.Marshal(), + }) if err != nil { return nil, err } @@ -702,28 +749,42 @@ func SanitizeName(name string, replacement rune) string { // // Returns ErrNotExist, if there is no next eBPF program. func ProgramGetNextID(startID ProgramID) (ProgramID, error) { - id, err := objGetNextID(internal.BPF_PROG_GET_NEXT_ID, uint32(startID)) - return ProgramID(id), err + attr := &sys.ProgGetNextIdAttr{Id: uint32(startID)} + return ProgramID(attr.NextId), sys.ProgGetNextId(attr) } -// ID returns the systemwide unique ID of the program. +// BindMap binds map to the program and is only released once program is released. // -// Deprecated: use ProgramInfo.ID() instead. -func (p *Program) ID() (ProgramID, error) { - info, err := bpfGetProgInfoByFD(p.fd, nil) - if err != nil { - return ProgramID(0), err +// This may be used in cases where metadata should be associated with the program +// which otherwise does not contain any references to the map. +func (p *Program) BindMap(m *Map) error { + attr := &sys.ProgBindMapAttr{ + ProgFd: uint32(p.FD()), + MapFd: uint32(m.FD()), } - return ProgramID(info.id), nil + + return sys.ProgBindMap(attr) } -func resolveBTFType(spec *btf.Spec, name string, progType ProgramType, attachType AttachType) (btf.Type, error) { +var errUnrecognizedAttachType = errors.New("unrecognized attach type") + +// find an attach target type in the kernel. +// +// spec may be nil and defaults to the canonical kernel BTF. name together with +// progType and attachType determine which type we need to attach to. +// +// Returns errUnrecognizedAttachType. +func findTargetInKernel(spec *btf.Spec, name string, progType ProgramType, attachType AttachType) (btf.TypeID, error) { type match struct { p ProgramType a AttachType } - var typeName, featureName string + var ( + typeName, featureName string + isBTFTypeFunc = true + ) + switch (match{progType, attachType}) { case match{LSM, AttachLSMMac}: typeName = "bpf_lsm_" + name @@ -731,31 +792,84 @@ func resolveBTFType(spec *btf.Spec, name string, progType ProgramType, attachTyp case match{Tracing, AttachTraceIter}: typeName = "bpf_iter_" + name featureName = name + " iterator" - case match{Extension, AttachNone}: + case match{Tracing, AttachTraceFEntry}: + typeName = name + featureName = fmt.Sprintf("fentry %s", name) + case match{Tracing, AttachTraceFExit}: + typeName = name + featureName = fmt.Sprintf("fexit %s", name) + case match{Tracing, AttachModifyReturn}: typeName = name - featureName = fmt.Sprintf("freplace %s", name) + featureName = fmt.Sprintf("fmod_ret %s", name) + case match{Tracing, AttachTraceRawTp}: + typeName = fmt.Sprintf("btf_trace_%s", name) + featureName = fmt.Sprintf("raw_tp %s", name) + isBTFTypeFunc = false default: - return nil, nil + return 0, errUnrecognizedAttachType } - if spec == nil { - var err error - spec, err = btf.LoadKernelSpec() - if err != nil { - return nil, fmt.Errorf("load kernel spec: %w", err) - } + spec, err := maybeLoadKernelBTF(spec) + if err != nil { + return 0, fmt.Errorf("load kernel spec: %w", err) } - var target *btf.Func - err := spec.FindType(typeName, &target) - if errors.Is(err, btf.ErrNotFound) { - return nil, &internal.UnsupportedFeatureError{ - Name: featureName, + var target btf.Type + if isBTFTypeFunc { + var targetFunc *btf.Func + err = spec.TypeByName(typeName, &targetFunc) + target = targetFunc + } else { + var targetTypedef *btf.Typedef + err = spec.TypeByName(typeName, &targetTypedef) + target = targetTypedef + } + + if err != nil { + if errors.Is(err, btf.ErrNotFound) { + return 0, &internal.UnsupportedFeatureError{ + Name: featureName, + } } + return 0, fmt.Errorf("find target for %s: %w", featureName, err) + } + + return spec.TypeID(target) +} + +// find an attach target type in a program. +// +// Returns errUnrecognizedAttachType. +func findTargetInProgram(prog *Program, name string, progType ProgramType, attachType AttachType) (btf.TypeID, error) { + type match struct { + p ProgramType + a AttachType + } + + var typeName string + switch (match{progType, attachType}) { + case match{Extension, AttachNone}: + typeName = name + default: + return 0, errUnrecognizedAttachType } + + btfHandle, err := prog.Handle() + if err != nil { + return 0, fmt.Errorf("load target BTF: %w", err) + } + defer btfHandle.Close() + + spec, err := btfHandle.Spec(nil) + if err != nil { + return 0, err + } + + var targetFunc *btf.Func + err = spec.TypeByName(typeName, &targetFunc) if err != nil { - return nil, fmt.Errorf("resolve BTF for %s: %w", featureName, err) + return 0, fmt.Errorf("find target %s: %w", typeName, err) } - return target, nil + return spec.TypeID(targetFunc) } diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/run-tests.sh b/cluster-autoscaler/vendor/github.com/cilium/ebpf/run-tests.sh index a079edc7e18c..c21cca9e5e7d 100644 --- a/cluster-autoscaler/vendor/github.com/cilium/ebpf/run-tests.sh +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/run-tests.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # Test the current package under a different kernel. # Requires virtme and qemu to be installed. # Examples: @@ -48,21 +48,31 @@ if [[ "${1:-}" = "--exec-vm" ]]; then rm "${output}/fake-stdin" fi - if ! $sudo virtme-run --kimg "${input}/bzImage" --memory 768M --pwd \ - --rwdir="${testdir}=${testdir}" \ - --rodir=/run/input="${input}" \ - --rwdir=/run/output="${output}" \ - --script-sh "PATH=\"$PATH\" \"$script\" --exec-test $cmd" \ - --kopt possible_cpus=2; then # need at least two CPUs for some tests - exit 23 - fi + for ((i = 0; i < 3; i++)); do + if ! $sudo virtme-run --kimg "${input}/bzImage" --memory 768M --pwd \ + --rwdir="${testdir}=${testdir}" \ + --rodir=/run/input="${input}" \ + --rwdir=/run/output="${output}" \ + --script-sh "PATH=\"$PATH\" CI_MAX_KERNEL_VERSION="${CI_MAX_KERNEL_VERSION:-}" \"$script\" --exec-test $cmd" \ + --kopt possible_cpus=2; then # need at least two CPUs for some tests + exit 23 + fi + + if [[ -e "${output}/status" ]]; then + break + fi + + if [[ -v CI ]]; then + echo "Retrying test run due to qemu crash" + continue + fi - if [[ ! -e "${output}/success" ]]; then exit 42 - fi + done + rc=$(<"${output}/status") $sudo rm -r "$output" - exit 0 + exit $rc elif [[ "${1:-}" = "--exec-test" ]]; then shift @@ -73,13 +83,16 @@ elif [[ "${1:-}" = "--exec-test" ]]; then export KERNEL_SELFTESTS="/run/input/bpf" fi - dmesg -C - if ! "$@"; then - dmesg - exit 1 # this return code is "swallowed" by qemu + if [[ -f "/run/input/bpf/bpf_testmod/bpf_testmod.ko" ]]; then + insmod "/run/input/bpf/bpf_testmod/bpf_testmod.ko" fi - touch "/run/output/success" - exit 0 + + dmesg --clear + rc=0 + "$@" || rc=$? + dmesg + echo $rc > "/run/output/status" + exit $rc # this return code is "swallowed" by qemu fi readonly kernel_version="${1:-}" @@ -90,22 +103,27 @@ fi shift readonly kernel="linux-${kernel_version}.bz" -readonly selftests="linux-${kernel_version}-selftests-bpf.bz" +readonly selftests="linux-${kernel_version}-selftests-bpf.tgz" readonly input="$(mktemp -d)" readonly tmp_dir="${TMPDIR:-/tmp}" readonly branch="${BRANCH:-master}" fetch() { echo Fetching "${1}" - wget -nv -N -P "${tmp_dir}" "https://github.com/cilium/ci-kernels/raw/${branch}/${1}" + pushd "${tmp_dir}" > /dev/null + curl -s -L -O --fail --etag-compare "${1}.etag" --etag-save "${1}.etag" "https://github.com/cilium/ci-kernels/raw/${branch}/${1}" + local ret=$? + popd > /dev/null + return $ret } fetch "${kernel}" cp "${tmp_dir}/${kernel}" "${input}/bzImage" if fetch "${selftests}"; then + echo "Decompressing selftests" mkdir "${input}/bpf" - tar --strip-components=4 -xjf "${tmp_dir}/${selftests}" -C "${input}/bpf" + tar --strip-components=4 -xf "${tmp_dir}/${selftests}" -C "${input}/bpf" else echo "No selftests found, disabling" fi @@ -117,6 +135,8 @@ fi export GOFLAGS=-mod=readonly export CGO_ENABLED=0 +# LINUX_VERSION_CODE test compares this to discovered value. +export KERNEL_VERSION="${kernel_version}" echo Testing on "${kernel_version}" go test -exec "$script --exec-vm $input" "${args[@]}" diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/syscalls.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/syscalls.go index f8cb5f0e0cd6..e5c270a55851 100644 --- a/cluster-autoscaler/vendor/github.com/cilium/ebpf/syscalls.go +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/syscalls.go @@ -4,19 +4,13 @@ import ( "bytes" "errors" "fmt" - "os" - "unsafe" "github.com/cilium/ebpf/asm" "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" "github.com/cilium/ebpf/internal/unix" ) -// ErrNotExist is returned when loading a non-existing map or program. -// -// Deprecated: use os.ErrNotExist instead. -var ErrNotExist = os.ErrNotExist - // invalidBPFObjNameChar returns true if char may not appear in // a BPF object name. func invalidBPFObjNameChar(char rune) bool { @@ -38,108 +32,24 @@ func invalidBPFObjNameChar(char rune) bool { } } -type bpfMapOpAttr struct { - mapFd uint32 - padding uint32 - key internal.Pointer - value internal.Pointer - flags uint64 -} - -type bpfBatchMapOpAttr struct { - inBatch internal.Pointer - outBatch internal.Pointer - keys internal.Pointer - values internal.Pointer - count uint32 - mapFd uint32 - elemFlags uint64 - flags uint64 -} - -type bpfMapInfo struct { - map_type uint32 // since 4.12 1e2709769086 - id uint32 - key_size uint32 - value_size uint32 - max_entries uint32 - map_flags uint32 - name internal.BPFObjName // since 4.15 ad5b177bd73f - ifindex uint32 // since 4.16 52775b33bb50 - btf_vmlinux_value_type_id uint32 // since 5.6 85d33df357b6 - netns_dev uint64 // since 4.16 52775b33bb50 - netns_ino uint64 - btf_id uint32 // since 4.18 78958fca7ead - btf_key_type_id uint32 // since 4.18 9b2cf328b2ec - btf_value_type_id uint32 -} - -type bpfProgInfo struct { - prog_type uint32 - id uint32 - tag [unix.BPF_TAG_SIZE]byte - jited_prog_len uint32 - xlated_prog_len uint32 - jited_prog_insns internal.Pointer - xlated_prog_insns internal.Pointer - load_time uint64 // since 4.15 cb4d2b3f03d8 - created_by_uid uint32 - nr_map_ids uint32 // since 4.15 cb4d2b3f03d8 - map_ids internal.Pointer - name internal.BPFObjName // since 4.15 067cae47771c - ifindex uint32 - gpl_compatible uint32 - netns_dev uint64 - netns_ino uint64 - nr_jited_ksyms uint32 - nr_jited_func_lens uint32 - jited_ksyms internal.Pointer - jited_func_lens internal.Pointer - btf_id uint32 - func_info_rec_size uint32 - func_info internal.Pointer - nr_func_info uint32 - nr_line_info uint32 - line_info internal.Pointer - jited_line_info internal.Pointer - nr_jited_line_info uint32 - line_info_rec_size uint32 - jited_line_info_rec_size uint32 - nr_prog_tags uint32 - prog_tags internal.Pointer - run_time_ns uint64 - run_cnt uint64 -} - -type bpfProgTestRunAttr struct { - fd uint32 - retval uint32 - dataSizeIn uint32 - dataSizeOut uint32 - dataIn internal.Pointer - dataOut internal.Pointer - repeat uint32 - duration uint32 -} - -type bpfMapFreezeAttr struct { - mapFd uint32 -} - -type bpfObjGetNextIDAttr struct { - startID uint32 - nextID uint32 - openFlags uint32 -} +func progLoad(insns asm.Instructions, typ ProgramType, license string) (*sys.FD, error) { + buf := bytes.NewBuffer(make([]byte, 0, insns.Size())) + if err := insns.Marshal(buf, internal.NativeEndian); err != nil { + return nil, err + } + bytecode := buf.Bytes() -func bpfProgTestRun(attr *bpfProgTestRunAttr) error { - _, err := internal.BPF(internal.BPF_PROG_TEST_RUN, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) - return err + return sys.ProgLoad(&sys.ProgLoadAttr{ + ProgType: sys.ProgType(typ), + License: sys.NewStringPointer(license), + Insns: sys.NewSlicePointer(bytecode), + InsnCnt: uint32(len(bytecode) / asm.InstructionSize), + }) } var haveNestedMaps = internal.FeatureTest("nested maps", "4.12", func() error { - _, err := internal.BPFMapCreate(&internal.BPFMapCreateAttr{ - MapType: uint32(ArrayOfMaps), + _, err := sys.MapCreate(&sys.MapCreateAttr{ + MapType: sys.MapType(ArrayOfMaps), KeySize: 4, ValueSize: 4, MaxEntries: 1, @@ -158,12 +68,12 @@ var haveNestedMaps = internal.FeatureTest("nested maps", "4.12", func() error { var haveMapMutabilityModifiers = internal.FeatureTest("read- and write-only maps", "5.2", func() error { // This checks BPF_F_RDONLY_PROG and BPF_F_WRONLY_PROG. Since // BPF_MAP_FREEZE appeared in 5.2 as well we don't do a separate check. - m, err := internal.BPFMapCreate(&internal.BPFMapCreateAttr{ - MapType: uint32(Array), + m, err := sys.MapCreate(&sys.MapCreateAttr{ + MapType: sys.MapType(Array), KeySize: 4, ValueSize: 4, MaxEntries: 1, - Flags: unix.BPF_F_RDONLY_PROG, + MapFlags: unix.BPF_F_RDONLY_PROG, }) if err != nil { return internal.ErrNotSupported @@ -174,12 +84,12 @@ var haveMapMutabilityModifiers = internal.FeatureTest("read- and write-only maps var haveMmapableMaps = internal.FeatureTest("mmapable maps", "5.5", func() error { // This checks BPF_F_MMAPABLE, which appeared in 5.5 for array maps. - m, err := internal.BPFMapCreate(&internal.BPFMapCreateAttr{ - MapType: uint32(Array), + m, err := sys.MapCreate(&sys.MapCreateAttr{ + MapType: sys.MapType(Array), KeySize: 4, ValueSize: 4, MaxEntries: 1, - Flags: unix.BPF_F_MMAPABLE, + MapFlags: unix.BPF_F_MMAPABLE, }) if err != nil { return internal.ErrNotSupported @@ -190,12 +100,12 @@ var haveMmapableMaps = internal.FeatureTest("mmapable maps", "5.5", func() error var haveInnerMaps = internal.FeatureTest("inner maps", "5.10", func() error { // This checks BPF_F_INNER_MAP, which appeared in 5.10. - m, err := internal.BPFMapCreate(&internal.BPFMapCreateAttr{ - MapType: uint32(Array), + m, err := sys.MapCreate(&sys.MapCreateAttr{ + MapType: sys.MapType(Array), KeySize: 4, ValueSize: 4, MaxEntries: 1, - Flags: unix.BPF_F_INNER_MAP, + MapFlags: unix.BPF_F_INNER_MAP, }) if err != nil { return internal.ErrNotSupported @@ -204,111 +114,21 @@ var haveInnerMaps = internal.FeatureTest("inner maps", "5.10", func() error { return nil }) -func bpfMapLookupElem(m *internal.FD, key, valueOut internal.Pointer) error { - fd, err := m.Value() - if err != nil { - return err - } - - attr := bpfMapOpAttr{ - mapFd: fd, - key: key, - value: valueOut, - } - _, err = internal.BPF(internal.BPF_MAP_LOOKUP_ELEM, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) - return wrapMapError(err) -} - -func bpfMapLookupAndDelete(m *internal.FD, key, valueOut internal.Pointer) error { - fd, err := m.Value() - if err != nil { - return err - } - - attr := bpfMapOpAttr{ - mapFd: fd, - key: key, - value: valueOut, - } - _, err = internal.BPF(internal.BPF_MAP_LOOKUP_AND_DELETE_ELEM, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) - return wrapMapError(err) -} - -func bpfMapUpdateElem(m *internal.FD, key, valueOut internal.Pointer, flags uint64) error { - fd, err := m.Value() - if err != nil { - return err - } - - attr := bpfMapOpAttr{ - mapFd: fd, - key: key, - value: valueOut, - flags: flags, - } - _, err = internal.BPF(internal.BPF_MAP_UPDATE_ELEM, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) - return wrapMapError(err) -} - -func bpfMapDeleteElem(m *internal.FD, key internal.Pointer) error { - fd, err := m.Value() - if err != nil { - return err - } - - attr := bpfMapOpAttr{ - mapFd: fd, - key: key, - } - _, err = internal.BPF(internal.BPF_MAP_DELETE_ELEM, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) - return wrapMapError(err) -} - -func bpfMapGetNextKey(m *internal.FD, key, nextKeyOut internal.Pointer) error { - fd, err := m.Value() - if err != nil { - return err - } - - attr := bpfMapOpAttr{ - mapFd: fd, - key: key, - value: nextKeyOut, - } - _, err = internal.BPF(internal.BPF_MAP_GET_NEXT_KEY, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) - return wrapMapError(err) -} - -func objGetNextID(cmd internal.BPFCmd, start uint32) (uint32, error) { - attr := bpfObjGetNextIDAttr{ - startID: start, - } - _, err := internal.BPF(cmd, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) - return attr.nextID, err -} - -func bpfMapBatch(cmd internal.BPFCmd, m *internal.FD, inBatch, outBatch, keys, values internal.Pointer, count uint32, opts *BatchOptions) (uint32, error) { - fd, err := m.Value() +var haveNoPreallocMaps = internal.FeatureTest("prealloc maps", "4.6", func() error { + // This checks BPF_F_NO_PREALLOC, which appeared in 4.6. + m, err := sys.MapCreate(&sys.MapCreateAttr{ + MapType: sys.MapType(Hash), + KeySize: 4, + ValueSize: 4, + MaxEntries: 1, + MapFlags: unix.BPF_F_NO_PREALLOC, + }) if err != nil { - return 0, err - } - - attr := bpfBatchMapOpAttr{ - inBatch: inBatch, - outBatch: outBatch, - keys: keys, - values: values, - count: count, - mapFd: fd, - } - if opts != nil { - attr.elemFlags = opts.ElemFlags - attr.flags = opts.Flags + return internal.ErrNotSupported } - _, err = internal.BPF(cmd, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) - // always return count even on an error, as things like update might partially be fulfilled. - return attr.count, wrapMapError(err) -} + _ = m.Close() + return nil +}) func wrapMapError(err error) error { if err == nil { @@ -316,15 +136,15 @@ func wrapMapError(err error) error { } if errors.Is(err, unix.ENOENT) { - return internal.SyscallError(ErrKeyNotExist, unix.ENOENT) + return sys.Error(ErrKeyNotExist, unix.ENOENT) } if errors.Is(err, unix.EEXIST) { - return internal.SyscallError(ErrKeyExist, unix.EEXIST) + return sys.Error(ErrKeyExist, unix.EEXIST) } if errors.Is(err, unix.ENOTSUPP) { - return internal.SyscallError(ErrNotSupported, unix.ENOTSUPP) + return sys.Error(ErrNotSupported, unix.ENOTSUPP) } if errors.Is(err, unix.E2BIG) { @@ -334,51 +154,16 @@ func wrapMapError(err error) error { return err } -func bpfMapFreeze(m *internal.FD) error { - fd, err := m.Value() - if err != nil { - return err - } - - attr := bpfMapFreezeAttr{ - mapFd: fd, - } - _, err = internal.BPF(internal.BPF_MAP_FREEZE, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) - return err -} - -func bpfGetProgInfoByFD(fd *internal.FD, ids []MapID) (*bpfProgInfo, error) { - var info bpfProgInfo - if len(ids) > 0 { - info.nr_map_ids = uint32(len(ids)) - info.map_ids = internal.NewPointer(unsafe.Pointer(&ids[0])) - } - - if err := internal.BPFObjGetInfoByFD(fd, unsafe.Pointer(&info), unsafe.Sizeof(info)); err != nil { - return nil, fmt.Errorf("can't get program info: %w", err) - } - return &info, nil -} - -func bpfGetMapInfoByFD(fd *internal.FD) (*bpfMapInfo, error) { - var info bpfMapInfo - err := internal.BPFObjGetInfoByFD(fd, unsafe.Pointer(&info), unsafe.Sizeof(info)) - if err != nil { - return nil, fmt.Errorf("can't get map info: %w", err) - } - return &info, nil -} - var haveObjName = internal.FeatureTest("object names", "4.15", func() error { - attr := internal.BPFMapCreateAttr{ - MapType: uint32(Array), + attr := sys.MapCreateAttr{ + MapType: sys.MapType(Array), KeySize: 4, ValueSize: 4, MaxEntries: 1, - MapName: internal.NewBPFObjName("feature_test"), + MapName: sys.NewObjName("feature_test"), } - fd, err := internal.BPFMapCreate(&attr) + fd, err := sys.MapCreate(&attr) if err != nil { return internal.ErrNotSupported } @@ -392,15 +177,15 @@ var objNameAllowsDot = internal.FeatureTest("dot in object names", "5.2", func() return err } - attr := internal.BPFMapCreateAttr{ - MapType: uint32(Array), + attr := sys.MapCreateAttr{ + MapType: sys.MapType(Array), KeySize: 4, ValueSize: 4, MaxEntries: 1, - MapName: internal.NewBPFObjName(".test"), + MapName: sys.NewObjName(".test"), } - fd, err := internal.BPFMapCreate(&attr) + fd, err := sys.MapCreate(&attr) if err != nil { return internal.ErrNotSupported } @@ -411,24 +196,30 @@ var objNameAllowsDot = internal.FeatureTest("dot in object names", "5.2", func() var haveBatchAPI = internal.FeatureTest("map batch api", "5.6", func() error { var maxEntries uint32 = 2 - attr := internal.BPFMapCreateAttr{ - MapType: uint32(Hash), + attr := sys.MapCreateAttr{ + MapType: sys.MapType(Hash), KeySize: 4, ValueSize: 4, MaxEntries: maxEntries, } - fd, err := internal.BPFMapCreate(&attr) + fd, err := sys.MapCreate(&attr) if err != nil { return internal.ErrNotSupported } defer fd.Close() + keys := []uint32{1, 2} values := []uint32{3, 4} kp, _ := marshalPtr(keys, 8) vp, _ := marshalPtr(values, 8) - nilPtr := internal.NewPointer(nil) - _, err = bpfMapBatch(internal.BPF_MAP_UPDATE_BATCH, fd, nilPtr, nilPtr, kp, vp, maxEntries, nil) + + err = sys.MapUpdateBatch(&sys.MapUpdateBatchAttr{ + MapFd: fd.Uint(), + Keys: kp, + Values: vp, + Count: maxEntries, + }) if err != nil { return internal.ErrNotSupported } @@ -444,21 +235,30 @@ var haveProbeReadKernel = internal.FeatureTest("bpf_probe_read_kernel", "5.5", f asm.FnProbeReadKernel.Call(), asm.Return(), } - buf := bytes.NewBuffer(make([]byte, 0, len(insns)*asm.InstructionSize)) - if err := insns.Marshal(buf, internal.NativeEndian); err != nil { - return err - } - bytecode := buf.Bytes() - fd, err := internal.BPFProgLoad(&internal.BPFProgLoadAttr{ - ProgType: uint32(Kprobe), - License: internal.NewStringPointer("GPL"), - Instructions: internal.NewSlicePointer(bytecode), - InsCount: uint32(len(bytecode) / asm.InstructionSize), - }) + fd, err := progLoad(insns, Kprobe, "GPL") if err != nil { return internal.ErrNotSupported } _ = fd.Close() return nil }) + +var haveBPFToBPFCalls = internal.FeatureTest("bpf2bpf calls", "4.16", func() error { + insns := asm.Instructions{ + asm.Call.Label("prog2").WithSymbol("prog1"), + asm.Return(), + asm.Mov.Imm(asm.R0, 0).WithSymbol("prog2"), + asm.Return(), + } + + fd, err := progLoad(insns, SocketFilter, "MIT") + if errors.Is(err, unix.EINVAL) { + return internal.ErrNotSupported + } + if err != nil { + return err + } + _ = fd.Close() + return nil +}) diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/types.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/types.go index 84b83f9f9855..a27b44247454 100644 --- a/cluster-autoscaler/vendor/github.com/cilium/ebpf/types.go +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/types.go @@ -11,7 +11,7 @@ import ( type MapType uint32 // Max returns the latest supported MapType. -func (_ MapType) Max() MapType { +func (MapType) Max() MapType { return maxMapType - 1 } @@ -103,12 +103,6 @@ const ( maxMapType ) -// Deprecated: StructOpts was a typo, use StructOpsMap instead. -// -// Declared as a variable to prevent stringer from picking it up -// as an enum value. -var StructOpts MapType = StructOpsMap - // hasPerCPUValue returns true if the Map stores a value per CPU. func (mt MapType) hasPerCPUValue() bool { return mt == PerCPUHash || mt == PerCPUArray || mt == LRUCPUHash || mt == PerCPUCGroupStorage @@ -126,11 +120,22 @@ func (mt MapType) canStoreProgram() bool { return mt == ProgramArray } +// hasBTF returns true if the map type supports BTF key/value metadata. +func (mt MapType) hasBTF() bool { + switch mt { + case PerfEventArray, CGroupArray, StackTrace, ArrayOfMaps, HashOfMaps, DevMap, + DevMapHash, CPUMap, XSKMap, SockMap, SockHash, Queue, Stack, RingBuf: + return false + default: + return true + } +} + // ProgramType of the eBPF program type ProgramType uint32 // Max return the latest supported ProgramType. -func (_ ProgramType) Max() ProgramType { +func (ProgramType) Max() ProgramType { return maxProgramType - 1 } @@ -167,6 +172,7 @@ const ( Extension LSM SkLookup + Syscall maxProgramType ) diff --git a/cluster-autoscaler/vendor/github.com/cilium/ebpf/types_string.go b/cluster-autoscaler/vendor/github.com/cilium/ebpf/types_string.go index 81cbc9efde0c..e80b948b0960 100644 --- a/cluster-autoscaler/vendor/github.com/cilium/ebpf/types_string.go +++ b/cluster-autoscaler/vendor/github.com/cilium/ebpf/types_string.go @@ -86,12 +86,13 @@ func _() { _ = x[Extension-28] _ = x[LSM-29] _ = x[SkLookup-30] - _ = x[maxProgramType-31] + _ = x[Syscall-31] + _ = x[maxProgramType-32] } -const _ProgramType_name = "UnspecifiedProgramSocketFilterKprobeSchedCLSSchedACTTracePointXDPPerfEventCGroupSKBCGroupSockLWTInLWTOutLWTXmitSockOpsSkSKBCGroupDeviceSkMsgRawTracepointCGroupSockAddrLWTSeg6LocalLircMode2SkReuseportFlowDissectorCGroupSysctlRawTracepointWritableCGroupSockoptTracingStructOpsExtensionLSMSkLookupmaxProgramType" +const _ProgramType_name = "UnspecifiedProgramSocketFilterKprobeSchedCLSSchedACTTracePointXDPPerfEventCGroupSKBCGroupSockLWTInLWTOutLWTXmitSockOpsSkSKBCGroupDeviceSkMsgRawTracepointCGroupSockAddrLWTSeg6LocalLircMode2SkReuseportFlowDissectorCGroupSysctlRawTracepointWritableCGroupSockoptTracingStructOpsExtensionLSMSkLookupSyscallmaxProgramType" -var _ProgramType_index = [...]uint16{0, 18, 30, 36, 44, 52, 62, 65, 74, 83, 93, 98, 104, 111, 118, 123, 135, 140, 153, 167, 179, 188, 199, 212, 224, 245, 258, 265, 274, 283, 286, 294, 308} +var _ProgramType_index = [...]uint16{0, 18, 30, 36, 44, 52, 62, 65, 74, 83, 93, 98, 104, 111, 118, 123, 135, 140, 153, 167, 179, 188, 199, 212, 224, 245, 258, 265, 274, 283, 286, 294, 301, 315} func (i ProgramType) String() string { if i >= ProgramType(len(_ProgramType_index)-1) { diff --git a/cluster-autoscaler/vendor/github.com/container-storage-interface/spec/lib/go/csi/csi.pb.go b/cluster-autoscaler/vendor/github.com/container-storage-interface/spec/lib/go/csi/csi.pb.go index d889edb2c8c0..fa010c376b77 100644 --- a/cluster-autoscaler/vendor/github.com/container-storage-interface/spec/lib/go/csi/csi.pb.go +++ b/cluster-autoscaler/vendor/github.com/container-storage-interface/spec/lib/go/csi/csi.pb.go @@ -47,18 +47,28 @@ const ( // returned by NodeGetInfo to ensure that a given volume is // accessible from a given node when scheduling workloads. PluginCapability_Service_VOLUME_ACCESSIBILITY_CONSTRAINTS PluginCapability_Service_Type = 2 + // GROUP_CONTROLLER_SERVICE indicates that the Plugin provides + // RPCs for operating on groups of volumes. Plugins MAY provide + // this capability. + // The presence of this capability determines whether the CO will + // attempt to invoke the REQUIRED GroupController service RPCs, as + // well as specific RPCs as indicated by + // GroupControllerGetCapabilities. + PluginCapability_Service_GROUP_CONTROLLER_SERVICE PluginCapability_Service_Type = 3 ) var PluginCapability_Service_Type_name = map[int32]string{ 0: "UNKNOWN", 1: "CONTROLLER_SERVICE", 2: "VOLUME_ACCESSIBILITY_CONSTRAINTS", + 3: "GROUP_CONTROLLER_SERVICE", } var PluginCapability_Service_Type_value = map[string]int32{ "UNKNOWN": 0, "CONTROLLER_SERVICE": 1, "VOLUME_ACCESSIBILITY_CONSTRAINTS": 2, + "GROUP_CONTROLLER_SERVICE": 3, } func (x PluginCapability_Service_Type) String() string { @@ -85,17 +95,20 @@ const ( // expansion of node-published volume via NodeExpandVolume. // // Example 1: Given a shared filesystem volume (e.g. GlusterFs), - // the Plugin may set the ONLINE volume expansion capability and - // implement ControllerExpandVolume but not NodeExpandVolume. + // + // the Plugin may set the ONLINE volume expansion capability and + // implement ControllerExpandVolume but not NodeExpandVolume. // // Example 2: Given a block storage volume type (e.g. EBS), the - // Plugin may set the ONLINE volume expansion capability and - // implement both ControllerExpandVolume and NodeExpandVolume. + // + // Plugin may set the ONLINE volume expansion capability and + // implement both ControllerExpandVolume and NodeExpandVolume. // // Example 3: Given a Plugin that supports volume expansion only - // upon a node, the Plugin may set the ONLINE volume - // expansion capability and implement NodeExpandVolume but not - // ControllerExpandVolume. + // + // upon a node, the Plugin may set the ONLINE volume + // expansion capability and implement NodeExpandVolume but not + // ControllerExpandVolume. PluginCapability_VolumeExpansion_ONLINE PluginCapability_VolumeExpansion_Type = 1 // OFFLINE indicates that volumes currently published and // available on a node SHALL NOT be expanded via @@ -105,10 +118,11 @@ const ( // the EXPAND_VOLUME node capability. // // Example 1: Given a block storage volume type (e.g. Azure Disk) - // that does not support expansion of "node-attached" (i.e. - // controller-published) volumes, the Plugin may indicate - // OFFLINE volume expansion support and implement both - // ControllerExpandVolume and NodeExpandVolume. + // + // that does not support expansion of "node-attached" (i.e. + // controller-published) volumes, the Plugin may indicate + // OFFLINE volume expansion support and implement both + // ControllerExpandVolume and NodeExpandVolume. PluginCapability_VolumeExpansion_OFFLINE PluginCapability_VolumeExpansion_Type = 2 ) @@ -385,6 +399,34 @@ func (NodeServiceCapability_RPC_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptor_9cdb00adce470e01, []int{55, 0, 0} } +type GroupControllerServiceCapability_RPC_Type int32 + +const ( + GroupControllerServiceCapability_RPC_UNKNOWN GroupControllerServiceCapability_RPC_Type = 0 + // Indicates that the group controller plugin supports + // creating, deleting, and getting details of a volume + // group snapshot. + GroupControllerServiceCapability_RPC_CREATE_DELETE_GET_VOLUME_GROUP_SNAPSHOT GroupControllerServiceCapability_RPC_Type = 1 +) + +var GroupControllerServiceCapability_RPC_Type_name = map[int32]string{ + 0: "UNKNOWN", + 1: "CREATE_DELETE_GET_VOLUME_GROUP_SNAPSHOT", +} + +var GroupControllerServiceCapability_RPC_Type_value = map[string]int32{ + "UNKNOWN": 0, + "CREATE_DELETE_GET_VOLUME_GROUP_SNAPSHOT": 1, +} + +func (x GroupControllerServiceCapability_RPC_Type) String() string { + return proto.EnumName(GroupControllerServiceCapability_RPC_Type_name, int32(x)) +} + +func (GroupControllerServiceCapability_RPC_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{62, 0, 0} +} + type GetPluginInfoRequest struct { XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` @@ -555,6 +597,7 @@ func (m *GetPluginCapabilitiesResponse) GetCapabilities() []*PluginCapability { // Specifies a capability of the plugin. type PluginCapability struct { // Types that are valid to be assigned to Type: + // // *PluginCapability_Service_ // *PluginCapability_VolumeExpansion_ Type isPluginCapability_Type `protobuf_oneof:"type"` @@ -748,16 +791,16 @@ type ProbeResponse struct { // and it is important for a CO to distinguish between the following // cases: // - // 1) The plugin is in an unhealthy state and MAY need restarting. In - // this case a gRPC error code SHALL be returned. - // 2) The plugin is still initializing, but is otherwise perfectly - // healthy. In this case a successful response SHALL be returned - // with a readiness value of `false`. Calls to the plugin's - // Controller and/or Node services MAY fail due to an incomplete - // initialization state. - // 3) The plugin has finished initializing and is ready to service - // calls to its Controller and/or Node services. A successful - // response is returned with a readiness value of `true`. + // 1. The plugin is in an unhealthy state and MAY need restarting. In + // this case a gRPC error code SHALL be returned. + // 2. The plugin is still initializing, but is otherwise perfectly + // healthy. In this case a successful response SHALL be returned + // with a readiness value of `false`. Calls to the plugin's + // Controller and/or Node services MAY fail due to an incomplete + // initialization state. + // 3. The plugin has finished initializing and is ready to service + // calls to its Controller and/or Node services. A successful + // response is returned with a readiness value of `true`. // // This field is OPTIONAL. If not present, the caller SHALL assume // that the plugin is in a ready state and is accepting calls to its @@ -804,26 +847,27 @@ func (m *ProbeResponse) GetReady() *wrappers.BoolValue { type CreateVolumeRequest struct { // The suggested name for the storage space. This field is REQUIRED. // It serves two purposes: - // 1) Idempotency - This name is generated by the CO to achieve - // idempotency. The Plugin SHOULD ensure that multiple - // `CreateVolume` calls for the same name do not result in more - // than one piece of storage provisioned corresponding to that - // name. If a Plugin is unable to enforce idempotency, the CO's - // error recovery logic could result in multiple (unused) volumes - // being provisioned. - // In the case of error, the CO MUST handle the gRPC error codes - // per the recovery behavior defined in the "CreateVolume Errors" - // section below. - // The CO is responsible for cleaning up volumes it provisioned - // that it no longer needs. If the CO is uncertain whether a volume - // was provisioned or not when a `CreateVolume` call fails, the CO - // MAY call `CreateVolume` again, with the same name, to ensure the - // volume exists and to retrieve the volume's `volume_id` (unless - // otherwise prohibited by "CreateVolume Errors"). - // 2) Suggested name - Some storage systems allow callers to specify - // an identifier by which to refer to the newly provisioned - // storage. If a storage system supports this, it can optionally - // use this name as the identifier for the new volume. + // 1. Idempotency - This name is generated by the CO to achieve + // idempotency. The Plugin SHOULD ensure that multiple + // `CreateVolume` calls for the same name do not result in more + // than one piece of storage provisioned corresponding to that + // name. If a Plugin is unable to enforce idempotency, the CO's + // error recovery logic could result in multiple (unused) volumes + // being provisioned. + // In the case of error, the CO MUST handle the gRPC error codes + // per the recovery behavior defined in the "CreateVolume Errors" + // section below. + // The CO is responsible for cleaning up volumes it provisioned + // that it no longer needs. If the CO is uncertain whether a volume + // was provisioned or not when a `CreateVolume` call fails, the CO + // MAY call `CreateVolume` again, with the same name, to ensure the + // volume exists and to retrieve the volume's `volume_id` (unless + // otherwise prohibited by "CreateVolume Errors"). + // 2. Suggested name - Some storage systems allow callers to specify + // an identifier by which to refer to the newly provisioned + // storage. If a storage system supports this, it can optionally + // use this name as the identifier for the new volume. + // // Any Unicode string that conforms to the length limit is allowed // except those containing the following banned characters: // U+0000-U+0008, U+000B, U+000C, U+000E-U+001F, U+007F-U+009F. @@ -957,6 +1001,7 @@ func (m *CreateVolumeRequest) GetAccessibilityRequirements() *TopologyRequiremen // type fields MUST be specified. type VolumeContentSource struct { // Types that are valid to be assigned to Type: + // // *VolumeContentSource_Snapshot // *VolumeContentSource_Volume Type isVolumeContentSource_Type `protobuf_oneof:"type"` @@ -1168,6 +1213,7 @@ type VolumeCapability struct { // following fields MUST be specified. // // Types that are valid to be assigned to AccessType: + // // *VolumeCapability_Block // *VolumeCapability_Mount AccessType isVolumeCapability_AccessType `protobuf_oneof:"access_type"` @@ -1509,14 +1555,18 @@ type Volume struct { // node. // // Example 1: - // accessible_topology = {"region": "R1", "zone": "Z2"} + // + // accessible_topology = {"region": "R1", "zone": "Z2"} + // // Indicates a volume accessible only from the "region" "R1" and the // "zone" "Z2". // // Example 2: - // accessible_topology = - // {"region": "R1", "zone": "Z2"}, - // {"region": "R1", "zone": "Z3"} + // + // accessible_topology = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"} + // // Indicates a volume accessible from both "zone" "Z2" and "zone" "Z3" // in the "region" "R1". AccessibleTopology []*Topology `protobuf:"bytes,5,rep,name=accessible_topology,json=accessibleTopology,proto3" json:"accessible_topology,omitempty"` @@ -1595,21 +1645,27 @@ type TopologyRequirement struct { // accessible from at least one of the requisite topologies. // // Given - // x = number of topologies provisioned volume is accessible from - // n = number of requisite topologies + // + // x = number of topologies provisioned volume is accessible from + // n = number of requisite topologies + // // The CO MUST ensure n >= 1. The SP MUST ensure x >= 1 // If x==n, then the SP MUST make the provisioned volume available to // all topologies from the list of requisite topologies. If it is // unable to do so, the SP MUST fail the CreateVolume call. // For example, if a volume should be accessible from a single zone, // and requisite = - // {"region": "R1", "zone": "Z2"} + // + // {"region": "R1", "zone": "Z2"} + // // then the provisioned volume MUST be accessible from the "region" // "R1" and the "zone" "Z2". // Similarly, if a volume should be accessible from two zones, and // requisite = - // {"region": "R1", "zone": "Z2"}, - // {"region": "R1", "zone": "Z3"} + // + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"} + // // then the provisioned volume MUST be accessible from the "region" // "R1" and both "zone" "Z2" and "zone" "Z3". // @@ -1618,18 +1674,23 @@ type TopologyRequirement struct { // the CreateVolume call. // For example, if a volume should be accessible from a single zone, // and requisite = - // {"region": "R1", "zone": "Z2"}, - // {"region": "R1", "zone": "Z3"} + // + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"} + // // then the SP may choose to make the provisioned volume available in // either the "zone" "Z2" or the "zone" "Z3" in the "region" "R1". // Similarly, if a volume should be accessible from two zones, and // requisite = - // {"region": "R1", "zone": "Z2"}, - // {"region": "R1", "zone": "Z3"}, - // {"region": "R1", "zone": "Z4"} + // + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"}, + // {"region": "R1", "zone": "Z4"} + // // then the provisioned volume MUST be accessible from any combination // of two unique topologies: e.g. "R1/Z2" and "R1/Z3", or "R1/Z2" and - // "R1/Z4", or "R1/Z3" and "R1/Z4". + // + // "R1/Z4", or "R1/Z3" and "R1/Z4". // // If x>n, then the SP MUST make the provisioned volume available from // all topologies from the list of requisite topologies and MAY choose @@ -1638,7 +1699,9 @@ type TopologyRequirement struct { // CreateVolume call. // For example, if a volume should be accessible from two zones, and // requisite = - // {"region": "R1", "zone": "Z2"} + // + // {"region": "R1", "zone": "Z2"} + // // then the provisioned volume MUST be accessible from the "region" // "R1" and the "zone" "Z2" and the SP may select the second zone // independently, e.g. "R1/Z4". @@ -1667,10 +1730,14 @@ type TopologyRequirement struct { // Example 1: // Given a volume should be accessible from a single zone, and // requisite = - // {"region": "R1", "zone": "Z2"}, - // {"region": "R1", "zone": "Z3"} + // + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"} + // // preferred = - // {"region": "R1", "zone": "Z3"} + // + // {"region": "R1", "zone": "Z3"} + // // then the SP SHOULD first attempt to make the provisioned volume // available from "zone" "Z3" in the "region" "R1" and fall back to // "zone" "Z2" in the "region" "R1" if that is not possible. @@ -1678,13 +1745,17 @@ type TopologyRequirement struct { // Example 2: // Given a volume should be accessible from a single zone, and // requisite = - // {"region": "R1", "zone": "Z2"}, - // {"region": "R1", "zone": "Z3"}, - // {"region": "R1", "zone": "Z4"}, - // {"region": "R1", "zone": "Z5"} + // + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"}, + // {"region": "R1", "zone": "Z4"}, + // {"region": "R1", "zone": "Z5"} + // // preferred = - // {"region": "R1", "zone": "Z4"}, - // {"region": "R1", "zone": "Z2"} + // + // {"region": "R1", "zone": "Z4"}, + // {"region": "R1", "zone": "Z2"} + // // then the SP SHOULD first attempt to make the provisioned volume // accessible from "zone" "Z4" in the "region" "R1" and fall back to // "zone" "Z2" in the "region" "R1" if that is not possible. If that @@ -1697,13 +1768,17 @@ type TopologyRequirement struct { // the volume is accessible from two zones, aka synchronously // replicated), and // requisite = - // {"region": "R1", "zone": "Z2"}, - // {"region": "R1", "zone": "Z3"}, - // {"region": "R1", "zone": "Z4"}, - // {"region": "R1", "zone": "Z5"} + // + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"}, + // {"region": "R1", "zone": "Z4"}, + // {"region": "R1", "zone": "Z5"} + // // preferred = - // {"region": "R1", "zone": "Z5"}, - // {"region": "R1", "zone": "Z3"} + // + // {"region": "R1", "zone": "Z5"}, + // {"region": "R1", "zone": "Z3"} + // // then the SP SHOULD first attempt to make the provisioned volume // accessible from the combination of the two "zones" "Z5" and "Z3" in // the "region" "R1". If that's not possible, it should fall back to @@ -2972,6 +3047,7 @@ func (m *ControllerGetCapabilitiesResponse) GetCapabilities() []*ControllerServi // Specifies a capability of the controller service. type ControllerServiceCapability struct { // Types that are valid to be assigned to Type: + // // *ControllerServiceCapability_Rpc Type isControllerServiceCapability_Type `protobuf_oneof:"type"` XXX_NoUnkeyedLiteral struct{} `json:"-"` @@ -3093,12 +3169,12 @@ type CreateSnapshotRequest struct { // This field is OPTIONAL. The Plugin is responsible for parsing and // validating these parameters. COs will treat these as opaque. // Use cases for opaque parameters: - // - Specify a policy to automatically clean up the snapshot. - // - Specify an expiration date for the snapshot. - // - Specify whether the snapshot is readonly or read/write. - // - Specify if the snapshot should be replicated to some place. - // - Specify primary or secondary for replication systems that - // support snapshotting only on primary. + // - Specify a policy to automatically clean up the snapshot. + // - Specify an expiration date for the snapshot. + // - Specify whether the snapshot is readonly or read/write. + // - Specify if the snapshot should be replicated to some place. + // - Specify primary or secondary for replication systems that + // support snapshotting only on primary. Parameters map[string]string `protobuf:"bytes,4,rep,name=parameters,proto3" json:"parameters,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` @@ -3230,7 +3306,21 @@ type Snapshot struct { // Indicates if a snapshot is ready to use as a // `volume_content_source` in a `CreateVolumeRequest`. The default // value is false. This field is REQUIRED. - ReadyToUse bool `protobuf:"varint,5,opt,name=ready_to_use,json=readyToUse,proto3" json:"ready_to_use,omitempty"` + ReadyToUse bool `protobuf:"varint,5,opt,name=ready_to_use,json=readyToUse,proto3" json:"ready_to_use,omitempty"` + // The ID of the volume group snapshot that this snapshot is part of. + // It uniquely identifies the group snapshot on the storage system. + // This field is OPTIONAL. + // If this snapshot is a member of a volume group snapshot, and it + // MUST NOT be deleted as a stand alone snapshot, then the SP + // MUST provide the ID of the volume group snapshot in this field. + // If provided, CO MUST use this field in subsequent volume group + // snapshot operations to indicate that this snapshot is part of the + // specified group snapshot. + // If not provided, CO SHALL treat the snapshot as independent, + // and SP SHALL allow it to be deleted separately. + // If this message is inside a VolumeGroupSnapshot message, the value + // MUST be the same as the group_snapshot_id in that message. + GroupSnapshotId string `protobuf:"bytes,6,opt,name=group_snapshot_id,json=groupSnapshotId,proto3" json:"group_snapshot_id,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -3296,6 +3386,13 @@ func (m *Snapshot) GetReadyToUse() bool { return false } +func (m *Snapshot) GetGroupSnapshotId() string { + if m != nil { + return m.GroupSnapshotId + } + return "" +} + type DeleteSnapshotRequest struct { // The ID of the snapshot to be deleted. // This field is REQUIRED. @@ -4500,6 +4597,7 @@ func (m *NodeGetCapabilitiesResponse) GetCapabilities() []*NodeServiceCapability // Specifies a capability of the node service. type NodeServiceCapability struct { // Types that are valid to be assigned to Type: + // // *NodeServiceCapability_Rpc Type isNodeServiceCapability_Type `protobuf_oneof:"type"` XXX_NoUnkeyedLiteral struct{} `json:"-"` @@ -4666,8 +4764,10 @@ type NodeGetInfoResponse struct { // no topological constraints declared for V. // // Example 1: - // accessible_topology = - // {"region": "R1", "zone": "Z2"} + // + // accessible_topology = + // {"region": "R1", "zone": "Z2"} + // // Indicates the node exists within the "region" "R1" and the "zone" // "Z2". AccessibleTopology *Topology `protobuf:"bytes,3,opt,name=accessible_topology,json=accessibleTopology,proto3" json:"accessible_topology,omitempty"` @@ -4874,6 +4974,608 @@ func (m *NodeExpandVolumeResponse) GetCapacityBytes() int64 { return 0 } +type GroupControllerGetCapabilitiesRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GroupControllerGetCapabilitiesRequest) Reset() { *m = GroupControllerGetCapabilitiesRequest{} } +func (m *GroupControllerGetCapabilitiesRequest) String() string { return proto.CompactTextString(m) } +func (*GroupControllerGetCapabilitiesRequest) ProtoMessage() {} +func (*GroupControllerGetCapabilitiesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{60} +} + +func (m *GroupControllerGetCapabilitiesRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GroupControllerGetCapabilitiesRequest.Unmarshal(m, b) +} +func (m *GroupControllerGetCapabilitiesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GroupControllerGetCapabilitiesRequest.Marshal(b, m, deterministic) +} +func (m *GroupControllerGetCapabilitiesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GroupControllerGetCapabilitiesRequest.Merge(m, src) +} +func (m *GroupControllerGetCapabilitiesRequest) XXX_Size() int { + return xxx_messageInfo_GroupControllerGetCapabilitiesRequest.Size(m) +} +func (m *GroupControllerGetCapabilitiesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GroupControllerGetCapabilitiesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GroupControllerGetCapabilitiesRequest proto.InternalMessageInfo + +type GroupControllerGetCapabilitiesResponse struct { + // All the capabilities that the group controller service supports. + // This field is OPTIONAL. + Capabilities []*GroupControllerServiceCapability `protobuf:"bytes,1,rep,name=capabilities,proto3" json:"capabilities,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GroupControllerGetCapabilitiesResponse) Reset() { + *m = GroupControllerGetCapabilitiesResponse{} +} +func (m *GroupControllerGetCapabilitiesResponse) String() string { return proto.CompactTextString(m) } +func (*GroupControllerGetCapabilitiesResponse) ProtoMessage() {} +func (*GroupControllerGetCapabilitiesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{61} +} + +func (m *GroupControllerGetCapabilitiesResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GroupControllerGetCapabilitiesResponse.Unmarshal(m, b) +} +func (m *GroupControllerGetCapabilitiesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GroupControllerGetCapabilitiesResponse.Marshal(b, m, deterministic) +} +func (m *GroupControllerGetCapabilitiesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GroupControllerGetCapabilitiesResponse.Merge(m, src) +} +func (m *GroupControllerGetCapabilitiesResponse) XXX_Size() int { + return xxx_messageInfo_GroupControllerGetCapabilitiesResponse.Size(m) +} +func (m *GroupControllerGetCapabilitiesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GroupControllerGetCapabilitiesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GroupControllerGetCapabilitiesResponse proto.InternalMessageInfo + +func (m *GroupControllerGetCapabilitiesResponse) GetCapabilities() []*GroupControllerServiceCapability { + if m != nil { + return m.Capabilities + } + return nil +} + +// Specifies a capability of the group controller service. +type GroupControllerServiceCapability struct { + // Types that are valid to be assigned to Type: + // + // *GroupControllerServiceCapability_Rpc + Type isGroupControllerServiceCapability_Type `protobuf_oneof:"type"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GroupControllerServiceCapability) Reset() { *m = GroupControllerServiceCapability{} } +func (m *GroupControllerServiceCapability) String() string { return proto.CompactTextString(m) } +func (*GroupControllerServiceCapability) ProtoMessage() {} +func (*GroupControllerServiceCapability) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{62} +} + +func (m *GroupControllerServiceCapability) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GroupControllerServiceCapability.Unmarshal(m, b) +} +func (m *GroupControllerServiceCapability) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GroupControllerServiceCapability.Marshal(b, m, deterministic) +} +func (m *GroupControllerServiceCapability) XXX_Merge(src proto.Message) { + xxx_messageInfo_GroupControllerServiceCapability.Merge(m, src) +} +func (m *GroupControllerServiceCapability) XXX_Size() int { + return xxx_messageInfo_GroupControllerServiceCapability.Size(m) +} +func (m *GroupControllerServiceCapability) XXX_DiscardUnknown() { + xxx_messageInfo_GroupControllerServiceCapability.DiscardUnknown(m) +} + +var xxx_messageInfo_GroupControllerServiceCapability proto.InternalMessageInfo + +type isGroupControllerServiceCapability_Type interface { + isGroupControllerServiceCapability_Type() +} + +type GroupControllerServiceCapability_Rpc struct { + Rpc *GroupControllerServiceCapability_RPC `protobuf:"bytes,1,opt,name=rpc,proto3,oneof"` +} + +func (*GroupControllerServiceCapability_Rpc) isGroupControllerServiceCapability_Type() {} + +func (m *GroupControllerServiceCapability) GetType() isGroupControllerServiceCapability_Type { + if m != nil { + return m.Type + } + return nil +} + +func (m *GroupControllerServiceCapability) GetRpc() *GroupControllerServiceCapability_RPC { + if x, ok := m.GetType().(*GroupControllerServiceCapability_Rpc); ok { + return x.Rpc + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*GroupControllerServiceCapability) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*GroupControllerServiceCapability_Rpc)(nil), + } +} + +type GroupControllerServiceCapability_RPC struct { + Type GroupControllerServiceCapability_RPC_Type `protobuf:"varint,1,opt,name=type,proto3,enum=csi.v1.GroupControllerServiceCapability_RPC_Type" json:"type,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GroupControllerServiceCapability_RPC) Reset() { *m = GroupControllerServiceCapability_RPC{} } +func (m *GroupControllerServiceCapability_RPC) String() string { return proto.CompactTextString(m) } +func (*GroupControllerServiceCapability_RPC) ProtoMessage() {} +func (*GroupControllerServiceCapability_RPC) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{62, 0} +} + +func (m *GroupControllerServiceCapability_RPC) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GroupControllerServiceCapability_RPC.Unmarshal(m, b) +} +func (m *GroupControllerServiceCapability_RPC) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GroupControllerServiceCapability_RPC.Marshal(b, m, deterministic) +} +func (m *GroupControllerServiceCapability_RPC) XXX_Merge(src proto.Message) { + xxx_messageInfo_GroupControllerServiceCapability_RPC.Merge(m, src) +} +func (m *GroupControllerServiceCapability_RPC) XXX_Size() int { + return xxx_messageInfo_GroupControllerServiceCapability_RPC.Size(m) +} +func (m *GroupControllerServiceCapability_RPC) XXX_DiscardUnknown() { + xxx_messageInfo_GroupControllerServiceCapability_RPC.DiscardUnknown(m) +} + +var xxx_messageInfo_GroupControllerServiceCapability_RPC proto.InternalMessageInfo + +func (m *GroupControllerServiceCapability_RPC) GetType() GroupControllerServiceCapability_RPC_Type { + if m != nil { + return m.Type + } + return GroupControllerServiceCapability_RPC_UNKNOWN +} + +type CreateVolumeGroupSnapshotRequest struct { + // The suggested name for the group snapshot. This field is REQUIRED + // for idempotency. + // Any Unicode string that conforms to the length limit is allowed + // except those containing the following banned characters: + // U+0000-U+0008, U+000B, U+000C, U+000E-U+001F, U+007F-U+009F. + // (These are control characters other than commonly used whitespace.) + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // volume IDs of the source volumes to be snapshotted together. + // This field is REQUIRED. + SourceVolumeIds []string `protobuf:"bytes,2,rep,name=source_volume_ids,json=sourceVolumeIds,proto3" json:"source_volume_ids,omitempty"` + // Secrets required by plugin to complete + // ControllerCreateVolumeGroupSnapshot request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + // The secrets provided in this field SHOULD be the same for + // all group snapshot operations on the same group snapshot. + Secrets map[string]string `protobuf:"bytes,3,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Plugin specific parameters passed in as opaque key-value pairs. + // This field is OPTIONAL. The Plugin is responsible for parsing and + // validating these parameters. COs will treat these as opaque. + Parameters map[string]string `protobuf:"bytes,4,rep,name=parameters,proto3" json:"parameters,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateVolumeGroupSnapshotRequest) Reset() { *m = CreateVolumeGroupSnapshotRequest{} } +func (m *CreateVolumeGroupSnapshotRequest) String() string { return proto.CompactTextString(m) } +func (*CreateVolumeGroupSnapshotRequest) ProtoMessage() {} +func (*CreateVolumeGroupSnapshotRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{63} +} + +func (m *CreateVolumeGroupSnapshotRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateVolumeGroupSnapshotRequest.Unmarshal(m, b) +} +func (m *CreateVolumeGroupSnapshotRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateVolumeGroupSnapshotRequest.Marshal(b, m, deterministic) +} +func (m *CreateVolumeGroupSnapshotRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateVolumeGroupSnapshotRequest.Merge(m, src) +} +func (m *CreateVolumeGroupSnapshotRequest) XXX_Size() int { + return xxx_messageInfo_CreateVolumeGroupSnapshotRequest.Size(m) +} +func (m *CreateVolumeGroupSnapshotRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CreateVolumeGroupSnapshotRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateVolumeGroupSnapshotRequest proto.InternalMessageInfo + +func (m *CreateVolumeGroupSnapshotRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *CreateVolumeGroupSnapshotRequest) GetSourceVolumeIds() []string { + if m != nil { + return m.SourceVolumeIds + } + return nil +} + +func (m *CreateVolumeGroupSnapshotRequest) GetSecrets() map[string]string { + if m != nil { + return m.Secrets + } + return nil +} + +func (m *CreateVolumeGroupSnapshotRequest) GetParameters() map[string]string { + if m != nil { + return m.Parameters + } + return nil +} + +type CreateVolumeGroupSnapshotResponse struct { + // Contains all attributes of the newly created group snapshot. + // This field is REQUIRED. + GroupSnapshot *VolumeGroupSnapshot `protobuf:"bytes,1,opt,name=group_snapshot,json=groupSnapshot,proto3" json:"group_snapshot,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateVolumeGroupSnapshotResponse) Reset() { *m = CreateVolumeGroupSnapshotResponse{} } +func (m *CreateVolumeGroupSnapshotResponse) String() string { return proto.CompactTextString(m) } +func (*CreateVolumeGroupSnapshotResponse) ProtoMessage() {} +func (*CreateVolumeGroupSnapshotResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{64} +} + +func (m *CreateVolumeGroupSnapshotResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateVolumeGroupSnapshotResponse.Unmarshal(m, b) +} +func (m *CreateVolumeGroupSnapshotResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateVolumeGroupSnapshotResponse.Marshal(b, m, deterministic) +} +func (m *CreateVolumeGroupSnapshotResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateVolumeGroupSnapshotResponse.Merge(m, src) +} +func (m *CreateVolumeGroupSnapshotResponse) XXX_Size() int { + return xxx_messageInfo_CreateVolumeGroupSnapshotResponse.Size(m) +} +func (m *CreateVolumeGroupSnapshotResponse) XXX_DiscardUnknown() { + xxx_messageInfo_CreateVolumeGroupSnapshotResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateVolumeGroupSnapshotResponse proto.InternalMessageInfo + +func (m *CreateVolumeGroupSnapshotResponse) GetGroupSnapshot() *VolumeGroupSnapshot { + if m != nil { + return m.GroupSnapshot + } + return nil +} + +type VolumeGroupSnapshot struct { + // The identifier for this group snapshot, generated by the plugin. + // This field MUST contain enough information to uniquely identify + // this specific snapshot vs all other group snapshots supported by + // this plugin. + // This field SHALL be used by the CO in subsequent calls to refer to + // this group snapshot. + // The SP is NOT responsible for global uniqueness of + // group_snapshot_id across multiple SPs. + // This field is REQUIRED. + GroupSnapshotId string `protobuf:"bytes,1,opt,name=group_snapshot_id,json=groupSnapshotId,proto3" json:"group_snapshot_id,omitempty"` + // A list of snapshots belonging to this group. + // This field is REQUIRED. + Snapshots []*Snapshot `protobuf:"bytes,2,rep,name=snapshots,proto3" json:"snapshots,omitempty"` + // Timestamp of when the volume group snapshot was taken. + // This field is REQUIRED. + CreationTime *timestamp.Timestamp `protobuf:"bytes,3,opt,name=creation_time,json=creationTime,proto3" json:"creation_time,omitempty"` + // Indicates if all individual snapshots in the group snapshot + // are ready to use as a `volume_content_source` in a + // `CreateVolumeRequest`. The default value is false. + // If any snapshot in the list of snapshots in this message have + // ready_to_use set to false, the SP MUST set this field to false. + // If all of the snapshots in the list of snapshots in this message + // have ready_to_use set to true, the SP SHOULD set this field to + // true. + // This field is REQUIRED. + ReadyToUse bool `protobuf:"varint,4,opt,name=ready_to_use,json=readyToUse,proto3" json:"ready_to_use,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VolumeGroupSnapshot) Reset() { *m = VolumeGroupSnapshot{} } +func (m *VolumeGroupSnapshot) String() string { return proto.CompactTextString(m) } +func (*VolumeGroupSnapshot) ProtoMessage() {} +func (*VolumeGroupSnapshot) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{65} +} + +func (m *VolumeGroupSnapshot) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VolumeGroupSnapshot.Unmarshal(m, b) +} +func (m *VolumeGroupSnapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VolumeGroupSnapshot.Marshal(b, m, deterministic) +} +func (m *VolumeGroupSnapshot) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeGroupSnapshot.Merge(m, src) +} +func (m *VolumeGroupSnapshot) XXX_Size() int { + return xxx_messageInfo_VolumeGroupSnapshot.Size(m) +} +func (m *VolumeGroupSnapshot) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeGroupSnapshot.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeGroupSnapshot proto.InternalMessageInfo + +func (m *VolumeGroupSnapshot) GetGroupSnapshotId() string { + if m != nil { + return m.GroupSnapshotId + } + return "" +} + +func (m *VolumeGroupSnapshot) GetSnapshots() []*Snapshot { + if m != nil { + return m.Snapshots + } + return nil +} + +func (m *VolumeGroupSnapshot) GetCreationTime() *timestamp.Timestamp { + if m != nil { + return m.CreationTime + } + return nil +} + +func (m *VolumeGroupSnapshot) GetReadyToUse() bool { + if m != nil { + return m.ReadyToUse + } + return false +} + +type DeleteVolumeGroupSnapshotRequest struct { + // The ID of the group snapshot to be deleted. + // This field is REQUIRED. + GroupSnapshotId string `protobuf:"bytes,1,opt,name=group_snapshot_id,json=groupSnapshotId,proto3" json:"group_snapshot_id,omitempty"` + // A list of snapshot IDs that are part of this group snapshot. + // If SP does not need to rely on this field to delete the snapshots + // in the group, it SHOULD check this field and report an error + // if it has the ability to detect a mismatch. + // Some SPs require this list to delete the snapshots in the group. + // If SP needs to use this field to delete the snapshots in the + // group, it MUST report an error if it has the ability to detect + // a mismatch. + // This field is REQUIRED. + SnapshotIds []string `protobuf:"bytes,2,rep,name=snapshot_ids,json=snapshotIds,proto3" json:"snapshot_ids,omitempty"` + // Secrets required by plugin to complete group snapshot deletion + // request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + // The secrets provided in this field SHOULD be the same for + // all group snapshot operations on the same group snapshot. + Secrets map[string]string `protobuf:"bytes,3,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteVolumeGroupSnapshotRequest) Reset() { *m = DeleteVolumeGroupSnapshotRequest{} } +func (m *DeleteVolumeGroupSnapshotRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteVolumeGroupSnapshotRequest) ProtoMessage() {} +func (*DeleteVolumeGroupSnapshotRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{66} +} + +func (m *DeleteVolumeGroupSnapshotRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteVolumeGroupSnapshotRequest.Unmarshal(m, b) +} +func (m *DeleteVolumeGroupSnapshotRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteVolumeGroupSnapshotRequest.Marshal(b, m, deterministic) +} +func (m *DeleteVolumeGroupSnapshotRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteVolumeGroupSnapshotRequest.Merge(m, src) +} +func (m *DeleteVolumeGroupSnapshotRequest) XXX_Size() int { + return xxx_messageInfo_DeleteVolumeGroupSnapshotRequest.Size(m) +} +func (m *DeleteVolumeGroupSnapshotRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteVolumeGroupSnapshotRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteVolumeGroupSnapshotRequest proto.InternalMessageInfo + +func (m *DeleteVolumeGroupSnapshotRequest) GetGroupSnapshotId() string { + if m != nil { + return m.GroupSnapshotId + } + return "" +} + +func (m *DeleteVolumeGroupSnapshotRequest) GetSnapshotIds() []string { + if m != nil { + return m.SnapshotIds + } + return nil +} + +func (m *DeleteVolumeGroupSnapshotRequest) GetSecrets() map[string]string { + if m != nil { + return m.Secrets + } + return nil +} + +type DeleteVolumeGroupSnapshotResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteVolumeGroupSnapshotResponse) Reset() { *m = DeleteVolumeGroupSnapshotResponse{} } +func (m *DeleteVolumeGroupSnapshotResponse) String() string { return proto.CompactTextString(m) } +func (*DeleteVolumeGroupSnapshotResponse) ProtoMessage() {} +func (*DeleteVolumeGroupSnapshotResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{67} +} + +func (m *DeleteVolumeGroupSnapshotResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteVolumeGroupSnapshotResponse.Unmarshal(m, b) +} +func (m *DeleteVolumeGroupSnapshotResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteVolumeGroupSnapshotResponse.Marshal(b, m, deterministic) +} +func (m *DeleteVolumeGroupSnapshotResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteVolumeGroupSnapshotResponse.Merge(m, src) +} +func (m *DeleteVolumeGroupSnapshotResponse) XXX_Size() int { + return xxx_messageInfo_DeleteVolumeGroupSnapshotResponse.Size(m) +} +func (m *DeleteVolumeGroupSnapshotResponse) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteVolumeGroupSnapshotResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteVolumeGroupSnapshotResponse proto.InternalMessageInfo + +type GetVolumeGroupSnapshotRequest struct { + // The ID of the group snapshot to fetch current group snapshot + // information for. + // This field is REQUIRED. + GroupSnapshotId string `protobuf:"bytes,1,opt,name=group_snapshot_id,json=groupSnapshotId,proto3" json:"group_snapshot_id,omitempty"` + // A list of snapshot IDs that are part of this group snapshot. + // If SP does not need to rely on this field to get the snapshots + // in the group, it SHOULD check this field and report an error + // if it has the ability to detect a mismatch. + // Some SPs require this list to get the snapshots in the group. + // If SP needs to use this field to get the snapshots in the + // group, it MUST report an error if it has the ability to detect + // a mismatch. + // This field is REQUIRED. + SnapshotIds []string `protobuf:"bytes,2,rep,name=snapshot_ids,json=snapshotIds,proto3" json:"snapshot_ids,omitempty"` + // Secrets required by plugin to complete + // GetVolumeGroupSnapshot request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + // The secrets provided in this field SHOULD be the same for + // all group snapshot operations on the same group snapshot. + Secrets map[string]string `protobuf:"bytes,3,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetVolumeGroupSnapshotRequest) Reset() { *m = GetVolumeGroupSnapshotRequest{} } +func (m *GetVolumeGroupSnapshotRequest) String() string { return proto.CompactTextString(m) } +func (*GetVolumeGroupSnapshotRequest) ProtoMessage() {} +func (*GetVolumeGroupSnapshotRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{68} +} + +func (m *GetVolumeGroupSnapshotRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetVolumeGroupSnapshotRequest.Unmarshal(m, b) +} +func (m *GetVolumeGroupSnapshotRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetVolumeGroupSnapshotRequest.Marshal(b, m, deterministic) +} +func (m *GetVolumeGroupSnapshotRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetVolumeGroupSnapshotRequest.Merge(m, src) +} +func (m *GetVolumeGroupSnapshotRequest) XXX_Size() int { + return xxx_messageInfo_GetVolumeGroupSnapshotRequest.Size(m) +} +func (m *GetVolumeGroupSnapshotRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetVolumeGroupSnapshotRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetVolumeGroupSnapshotRequest proto.InternalMessageInfo + +func (m *GetVolumeGroupSnapshotRequest) GetGroupSnapshotId() string { + if m != nil { + return m.GroupSnapshotId + } + return "" +} + +func (m *GetVolumeGroupSnapshotRequest) GetSnapshotIds() []string { + if m != nil { + return m.SnapshotIds + } + return nil +} + +func (m *GetVolumeGroupSnapshotRequest) GetSecrets() map[string]string { + if m != nil { + return m.Secrets + } + return nil +} + +type GetVolumeGroupSnapshotResponse struct { + // This field is REQUIRED + GroupSnapshot *VolumeGroupSnapshot `protobuf:"bytes,1,opt,name=group_snapshot,json=groupSnapshot,proto3" json:"group_snapshot,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetVolumeGroupSnapshotResponse) Reset() { *m = GetVolumeGroupSnapshotResponse{} } +func (m *GetVolumeGroupSnapshotResponse) String() string { return proto.CompactTextString(m) } +func (*GetVolumeGroupSnapshotResponse) ProtoMessage() {} +func (*GetVolumeGroupSnapshotResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{69} +} + +func (m *GetVolumeGroupSnapshotResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetVolumeGroupSnapshotResponse.Unmarshal(m, b) +} +func (m *GetVolumeGroupSnapshotResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetVolumeGroupSnapshotResponse.Marshal(b, m, deterministic) +} +func (m *GetVolumeGroupSnapshotResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetVolumeGroupSnapshotResponse.Merge(m, src) +} +func (m *GetVolumeGroupSnapshotResponse) XXX_Size() int { + return xxx_messageInfo_GetVolumeGroupSnapshotResponse.Size(m) +} +func (m *GetVolumeGroupSnapshotResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetVolumeGroupSnapshotResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetVolumeGroupSnapshotResponse proto.InternalMessageInfo + +func (m *GetVolumeGroupSnapshotResponse) GetGroupSnapshot() *VolumeGroupSnapshot { + if m != nil { + return m.GroupSnapshot + } + return nil +} + var E_AlphaEnum = &proto.ExtensionDesc{ ExtendedType: (*descriptor.EnumOptions)(nil), ExtensionType: (*bool)(nil), @@ -4944,6 +5646,7 @@ func init() { proto.RegisterEnum("csi.v1.ControllerServiceCapability_RPC_Type", ControllerServiceCapability_RPC_Type_name, ControllerServiceCapability_RPC_Type_value) proto.RegisterEnum("csi.v1.VolumeUsage_Unit", VolumeUsage_Unit_name, VolumeUsage_Unit_value) proto.RegisterEnum("csi.v1.NodeServiceCapability_RPC_Type", NodeServiceCapability_RPC_Type_name, NodeServiceCapability_RPC_Type_value) + proto.RegisterEnum("csi.v1.GroupControllerServiceCapability_RPC_Type", GroupControllerServiceCapability_RPC_Type_name, GroupControllerServiceCapability_RPC_Type_value) proto.RegisterType((*GetPluginInfoRequest)(nil), "csi.v1.GetPluginInfoRequest") proto.RegisterType((*GetPluginInfoResponse)(nil), "csi.v1.GetPluginInfoResponse") proto.RegisterMapType((map[string]string)(nil), "csi.v1.GetPluginInfoResponse.ManifestEntry") @@ -5046,6 +5749,21 @@ func init() { proto.RegisterType((*NodeExpandVolumeRequest)(nil), "csi.v1.NodeExpandVolumeRequest") proto.RegisterMapType((map[string]string)(nil), "csi.v1.NodeExpandVolumeRequest.SecretsEntry") proto.RegisterType((*NodeExpandVolumeResponse)(nil), "csi.v1.NodeExpandVolumeResponse") + proto.RegisterType((*GroupControllerGetCapabilitiesRequest)(nil), "csi.v1.GroupControllerGetCapabilitiesRequest") + proto.RegisterType((*GroupControllerGetCapabilitiesResponse)(nil), "csi.v1.GroupControllerGetCapabilitiesResponse") + proto.RegisterType((*GroupControllerServiceCapability)(nil), "csi.v1.GroupControllerServiceCapability") + proto.RegisterType((*GroupControllerServiceCapability_RPC)(nil), "csi.v1.GroupControllerServiceCapability.RPC") + proto.RegisterType((*CreateVolumeGroupSnapshotRequest)(nil), "csi.v1.CreateVolumeGroupSnapshotRequest") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.CreateVolumeGroupSnapshotRequest.ParametersEntry") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.CreateVolumeGroupSnapshotRequest.SecretsEntry") + proto.RegisterType((*CreateVolumeGroupSnapshotResponse)(nil), "csi.v1.CreateVolumeGroupSnapshotResponse") + proto.RegisterType((*VolumeGroupSnapshot)(nil), "csi.v1.VolumeGroupSnapshot") + proto.RegisterType((*DeleteVolumeGroupSnapshotRequest)(nil), "csi.v1.DeleteVolumeGroupSnapshotRequest") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.DeleteVolumeGroupSnapshotRequest.SecretsEntry") + proto.RegisterType((*DeleteVolumeGroupSnapshotResponse)(nil), "csi.v1.DeleteVolumeGroupSnapshotResponse") + proto.RegisterType((*GetVolumeGroupSnapshotRequest)(nil), "csi.v1.GetVolumeGroupSnapshotRequest") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.GetVolumeGroupSnapshotRequest.SecretsEntry") + proto.RegisterType((*GetVolumeGroupSnapshotResponse)(nil), "csi.v1.GetVolumeGroupSnapshotResponse") proto.RegisterExtension(E_AlphaEnum) proto.RegisterExtension(E_AlphaEnumValue) proto.RegisterExtension(E_CsiSecret) @@ -5060,245 +5778,269 @@ func init() { } var fileDescriptor_9cdb00adce470e01 = []byte{ - // 3796 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x3b, 0x4b, 0x6c, 0x23, 0x47, - 0x76, 0x6a, 0xfe, 0x24, 0x3d, 0x4a, 0x1a, 0xaa, 0x28, 0x69, 0x38, 0x2d, 0x69, 0xa4, 0xe9, 0xf1, - 0x78, 0xe5, 0xf1, 0x0c, 0x67, 0xad, 0xb5, 0x8d, 0x58, 0x1e, 0xef, 0x9a, 0xa4, 0x38, 0x12, 0x77, - 0x28, 0x52, 0x6e, 0x52, 0x33, 0x3b, 0x93, 0x18, 0xed, 0x16, 0x59, 0xe2, 0x34, 0x4c, 0x76, 0xd3, - 0xdd, 0x4d, 0x45, 0xda, 0x4b, 0x82, 0x04, 0x39, 0x04, 0xb9, 0xe4, 0xb6, 0xce, 0x29, 0x8b, 0x24, - 0xc7, 0x5d, 0xec, 0x21, 0x08, 0x72, 0x0c, 0x90, 0x5b, 0x02, 0xe4, 0x73, 0x4b, 0x90, 0xcb, 0x1e, - 0x02, 0xe4, 0x60, 0x24, 0x80, 0xcf, 0x39, 0x04, 0x41, 0x57, 0x55, 0x37, 0xfb, 0xcb, 0xcf, 0x48, - 0x03, 0x1f, 0xf6, 0x24, 0xf6, 0xab, 0xf7, 0x5e, 0xbd, 0xaa, 0x7a, 0xef, 0xd5, 0xfb, 0x94, 0xe0, - 0x83, 0x8e, 0x62, 0xbe, 0x1a, 0x9c, 0xe6, 0x5b, 0x5a, 0xef, 0x51, 0x4b, 0x53, 0x4d, 0x59, 0x51, - 0xb1, 0xfe, 0xd0, 0x30, 0x35, 0x5d, 0xee, 0xe0, 0x87, 0x8a, 0x6a, 0x62, 0xfd, 0x4c, 0x6e, 0xe1, - 0x47, 0x46, 0x1f, 0xb7, 0x1e, 0xb5, 0x0c, 0x25, 0xdf, 0xd7, 0x35, 0x53, 0x43, 0x29, 0xeb, 0xe7, - 0xf9, 0x7b, 0xfc, 0x76, 0x47, 0xd3, 0x3a, 0x5d, 0xfc, 0x88, 0x40, 0x4f, 0x07, 0x67, 0x8f, 0xda, - 0xd8, 0x68, 0xe9, 0x4a, 0xdf, 0xd4, 0x74, 0x8a, 0xc9, 0x6f, 0xf9, 0x31, 0x4c, 0xa5, 0x87, 0x0d, - 0x53, 0xee, 0xf5, 0x19, 0xc2, 0x6d, 0x3f, 0xc2, 0xef, 0xea, 0x72, 0xbf, 0x8f, 0x75, 0x83, 0x8e, - 0x0b, 0x6b, 0xb0, 0x72, 0x80, 0xcd, 0xe3, 0xee, 0xa0, 0xa3, 0xa8, 0x15, 0xf5, 0x4c, 0x13, 0xf1, - 0x57, 0x03, 0x6c, 0x98, 0xc2, 0xbf, 0x73, 0xb0, 0xea, 0x1b, 0x30, 0xfa, 0x9a, 0x6a, 0x60, 0x84, - 0x20, 0xa1, 0xca, 0x3d, 0x9c, 0xe3, 0xb6, 0xb9, 0x9d, 0x79, 0x91, 0xfc, 0x46, 0xf7, 0x60, 0xe9, - 0x1c, 0xab, 0x6d, 0x4d, 0x97, 0xce, 0xb1, 0x6e, 0x28, 0x9a, 0x9a, 0x8b, 0x91, 0xd1, 0x45, 0x0a, - 0x7d, 0x46, 0x81, 0xe8, 0x00, 0xe6, 0x7a, 0xb2, 0xaa, 0x9c, 0x61, 0xc3, 0xcc, 0xc5, 0xb7, 0xe3, - 0x3b, 0xe9, 0xdd, 0x77, 0xf3, 0x74, 0xa9, 0xf9, 0xd0, 0xb9, 0xf2, 0x47, 0x0c, 0xbb, 0xac, 0x9a, - 0xfa, 0xa5, 0xe8, 0x10, 0xf3, 0x1f, 0xc3, 0xa2, 0x67, 0x08, 0x65, 0x20, 0xfe, 0x25, 0xbe, 0x64, - 0x32, 0x59, 0x3f, 0xd1, 0x0a, 0x24, 0xcf, 0xe5, 0xee, 0x00, 0x33, 0x49, 0xe8, 0xc7, 0x5e, 0xec, - 0xb7, 0x38, 0xe1, 0x36, 0x6c, 0x38, 0xb3, 0x95, 0xe4, 0xbe, 0x7c, 0xaa, 0x74, 0x15, 0x53, 0xc1, - 0x86, 0xbd, 0xf4, 0xcf, 0x61, 0x33, 0x62, 0x9c, 0xed, 0xc0, 0x63, 0x58, 0x68, 0xb9, 0xe0, 0x39, - 0x8e, 0x2c, 0x25, 0x67, 0x2f, 0xc5, 0x47, 0x79, 0x29, 0x7a, 0xb0, 0x85, 0x7f, 0x8e, 0x43, 0xc6, - 0x8f, 0x82, 0x1e, 0xc3, 0xac, 0x81, 0xf5, 0x73, 0xa5, 0x45, 0xf7, 0x35, 0xbd, 0xbb, 0x1d, 0xc5, - 0x2d, 0xdf, 0xa0, 0x78, 0x87, 0x33, 0xa2, 0x4d, 0x82, 0x4e, 0x20, 0x73, 0xae, 0x75, 0x07, 0x3d, - 0x2c, 0xe1, 0x8b, 0xbe, 0xac, 0x3a, 0x07, 0x90, 0xde, 0xdd, 0x89, 0x64, 0xf3, 0x8c, 0x10, 0x94, - 0x6d, 0xfc, 0xc3, 0x19, 0xf1, 0xc6, 0xb9, 0x17, 0xc4, 0xff, 0x8c, 0x83, 0x59, 0x36, 0x1b, 0xfa, - 0x08, 0x12, 0xe6, 0x65, 0x9f, 0x4a, 0xb7, 0xb4, 0x7b, 0x6f, 0x9c, 0x74, 0xf9, 0xe6, 0x65, 0x1f, - 0x8b, 0x84, 0x44, 0xf8, 0x0c, 0x12, 0xd6, 0x17, 0x4a, 0xc3, 0xec, 0x49, 0xed, 0x69, 0xad, 0xfe, - 0xbc, 0x96, 0x99, 0x41, 0x6b, 0x80, 0x4a, 0xf5, 0x5a, 0x53, 0xac, 0x57, 0xab, 0x65, 0x51, 0x6a, - 0x94, 0xc5, 0x67, 0x95, 0x52, 0x39, 0xc3, 0xa1, 0xb7, 0x60, 0xfb, 0x59, 0xbd, 0x7a, 0x72, 0x54, - 0x96, 0x0a, 0xa5, 0x52, 0xb9, 0xd1, 0xa8, 0x14, 0x2b, 0xd5, 0x4a, 0xf3, 0x85, 0x54, 0xaa, 0xd7, - 0x1a, 0x4d, 0xb1, 0x50, 0xa9, 0x35, 0x1b, 0x99, 0x18, 0xff, 0x07, 0x1c, 0xdc, 0xf0, 0x2d, 0x00, - 0x15, 0x3c, 0x12, 0x3e, 0x9c, 0x74, 0xe1, 0x6e, 0x49, 0x1f, 0x84, 0x49, 0x0a, 0x90, 0xaa, 0xd7, - 0xaa, 0x95, 0x9a, 0x25, 0x5d, 0x1a, 0x66, 0xeb, 0x4f, 0x9e, 0x90, 0x8f, 0x58, 0x31, 0x45, 0x27, - 0x14, 0x96, 0x60, 0xe1, 0x58, 0xd7, 0x4e, 0xb1, 0xad, 0x3f, 0x05, 0x58, 0x64, 0xdf, 0x4c, 0x5f, - 0xbe, 0x0f, 0x49, 0x1d, 0xcb, 0xed, 0x4b, 0x76, 0xb4, 0x7c, 0x9e, 0xda, 0x64, 0xde, 0xb6, 0xc9, - 0x7c, 0x51, 0xd3, 0xba, 0xcf, 0x2c, 0xfd, 0x14, 0x29, 0xa2, 0xf0, 0x6d, 0x02, 0xb2, 0x25, 0x1d, - 0xcb, 0x26, 0xa6, 0xd2, 0x32, 0xd6, 0xa1, 0xb6, 0xf7, 0x18, 0x96, 0x2c, 0xfd, 0x6a, 0x29, 0xe6, - 0xa5, 0xa4, 0xcb, 0x6a, 0x07, 0xb3, 0xa3, 0x5f, 0xb5, 0x77, 0xa0, 0xc4, 0x46, 0x45, 0x6b, 0x50, - 0x5c, 0x6c, 0xb9, 0x3f, 0x51, 0x05, 0xb2, 0x4c, 0x75, 0x3c, 0x2a, 0x1d, 0xf7, 0xaa, 0x34, 0x95, - 0xc2, 0xa5, 0xd2, 0xe8, 0xdc, 0x0b, 0x51, 0xb0, 0x81, 0x9e, 0x02, 0xf4, 0x65, 0x5d, 0xee, 0x61, - 0x13, 0xeb, 0x46, 0x2e, 0xe1, 0xb5, 0xef, 0x90, 0xd5, 0xe4, 0x8f, 0x1d, 0x6c, 0x6a, 0xdf, 0x2e, - 0x72, 0x74, 0x60, 0x19, 0x44, 0x4b, 0xc7, 0xa6, 0x91, 0x4b, 0x12, 0x4e, 0x3b, 0xa3, 0x38, 0x35, - 0x28, 0x2a, 0x61, 0x53, 0x8c, 0x7f, 0x5d, 0xe4, 0x44, 0x9b, 0x1a, 0xd5, 0x61, 0xd5, 0x5e, 0xa0, - 0xa6, 0x9a, 0x58, 0x35, 0x25, 0x43, 0x1b, 0xe8, 0x2d, 0x9c, 0x4b, 0x91, 0x5d, 0x5a, 0xf7, 0x2d, - 0x91, 0xe2, 0x34, 0x08, 0x8a, 0xc8, 0xb6, 0xc6, 0x03, 0x44, 0x2f, 0x81, 0x97, 0x5b, 0x2d, 0x6c, - 0x18, 0x0a, 0xdd, 0x0b, 0x49, 0xc7, 0x5f, 0x0d, 0x14, 0x1d, 0xf7, 0xb0, 0x6a, 0x1a, 0xb9, 0x59, - 0x2f, 0xd7, 0xa6, 0xd6, 0xd7, 0xba, 0x5a, 0xe7, 0x52, 0x1c, 0xe2, 0x88, 0xb7, 0x3c, 0xe4, 0xae, - 0x11, 0x83, 0xff, 0x04, 0x6e, 0xf8, 0x36, 0x65, 0x1a, 0xcf, 0xc6, 0xef, 0xc1, 0x82, 0x7b, 0x27, - 0xa6, 0xf2, 0x8a, 0x7f, 0x12, 0x83, 0x6c, 0xc8, 0x1e, 0xa0, 0x43, 0x98, 0x33, 0x54, 0xb9, 0x6f, - 0xbc, 0xd2, 0x4c, 0xa6, 0xbf, 0xf7, 0x47, 0x6c, 0x59, 0xbe, 0xc1, 0x70, 0xe9, 0xe7, 0xe1, 0x8c, - 0xe8, 0x50, 0xa3, 0x22, 0xa4, 0xe8, 0x7e, 0xfa, 0x7d, 0x53, 0x18, 0x1f, 0x0a, 0x73, 0xb8, 0x30, - 0x4a, 0xfe, 0x3d, 0x58, 0xf2, 0xce, 0x80, 0xb6, 0x20, 0x6d, 0xcf, 0x20, 0x29, 0x6d, 0xb6, 0x56, - 0xb0, 0x41, 0x95, 0x36, 0xff, 0x2e, 0x2c, 0xb8, 0x99, 0xa1, 0x75, 0x98, 0x67, 0x0a, 0xe1, 0xa0, - 0xcf, 0x51, 0x40, 0xa5, 0xed, 0xd8, 0xf4, 0x0f, 0x61, 0xc5, 0xab, 0x67, 0xcc, 0x94, 0xdf, 0x76, - 0xd6, 0x40, 0xf7, 0x62, 0xc9, 0xbb, 0x06, 0x5b, 0x4e, 0xe1, 0xcf, 0x93, 0x90, 0xf1, 0x1b, 0x0d, - 0x7a, 0x0c, 0xc9, 0xd3, 0xae, 0xd6, 0xfa, 0x92, 0xd1, 0xbe, 0x15, 0x65, 0x5d, 0xf9, 0xa2, 0x85, - 0x45, 0xa1, 0x87, 0x33, 0x22, 0x25, 0xb2, 0xa8, 0x7b, 0xda, 0x40, 0x35, 0xd9, 0xee, 0x45, 0x53, - 0x1f, 0x59, 0x58, 0x43, 0x6a, 0x42, 0x84, 0xf6, 0x21, 0x4d, 0xd5, 0x4e, 0xea, 0x69, 0x6d, 0x9c, - 0x8b, 0x13, 0x1e, 0x77, 0x23, 0x79, 0x14, 0x08, 0xee, 0x91, 0xd6, 0xc6, 0x22, 0xc8, 0xce, 0x6f, - 0x7e, 0x11, 0xd2, 0x2e, 0xd9, 0xf8, 0x01, 0xa4, 0x5d, 0x93, 0xa1, 0x9b, 0x30, 0x7b, 0x66, 0x48, - 0x8e, 0x13, 0x9e, 0x17, 0x53, 0x67, 0x06, 0xf1, 0xa7, 0x5b, 0x90, 0x26, 0x52, 0x48, 0x67, 0x5d, - 0xb9, 0x63, 0xe4, 0x62, 0xdb, 0x71, 0xeb, 0x8c, 0x08, 0xe8, 0x89, 0x05, 0x41, 0x0f, 0x80, 0x39, - 0x14, 0x89, 0xe2, 0x75, 0x74, 0x6d, 0xd0, 0x27, 0x42, 0xce, 0x8b, 0xec, 0x6a, 0x23, 0x13, 0x1d, - 0x58, 0x70, 0xfe, 0xaf, 0x63, 0x00, 0x43, 0x01, 0xd1, 0x63, 0x48, 0x90, 0x35, 0x51, 0xc7, 0xbf, - 0x33, 0xc1, 0x9a, 0xf2, 0x64, 0x61, 0x84, 0x4a, 0xf8, 0x2f, 0x0e, 0x12, 0x84, 0x8d, 0xff, 0x7a, - 0x6a, 0x54, 0x6a, 0x07, 0xd5, 0xb2, 0x54, 0xab, 0xef, 0x97, 0xa5, 0xe7, 0x62, 0xa5, 0x59, 0x16, - 0x33, 0x1c, 0x5a, 0x87, 0x9b, 0x6e, 0xb8, 0x58, 0x2e, 0xec, 0x97, 0x45, 0xa9, 0x5e, 0xab, 0xbe, - 0xc8, 0xc4, 0x10, 0x0f, 0x6b, 0x47, 0x27, 0xd5, 0x66, 0x25, 0x38, 0x16, 0x47, 0x1b, 0x90, 0x73, - 0x8d, 0x31, 0x1e, 0x8c, 0x6d, 0xc2, 0x62, 0xeb, 0x1a, 0xa5, 0x3f, 0xd9, 0x60, 0x12, 0x09, 0x70, - 0xcb, 0x3d, 0xa7, 0x97, 0x36, 0xc5, 0xc7, 0x7f, 0x5e, 0xe4, 0xd0, 0x1d, 0xc8, 0xb9, 0x71, 0x3c, - 0x1c, 0x66, 0x09, 0x4a, 0x71, 0xd1, 0xd1, 0x00, 0xa2, 0xe1, 0xcf, 0x61, 0xd1, 0x73, 0x31, 0x58, - 0x31, 0x1c, 0xf3, 0x64, 0x6d, 0xe9, 0xf4, 0xd2, 0x24, 0x71, 0x0d, 0xb7, 0x13, 0x17, 0x17, 0x6d, - 0x68, 0xd1, 0x02, 0x5a, 0x67, 0xd9, 0x55, 0x7a, 0x8a, 0xc9, 0x70, 0x62, 0x04, 0x07, 0x08, 0x88, - 0x20, 0x08, 0xbf, 0x8e, 0x41, 0x8a, 0x29, 0xc4, 0x3d, 0xd7, 0xd5, 0xe4, 0x61, 0x69, 0x43, 0x29, - 0x4b, 0x8f, 0x45, 0xc6, 0xbc, 0x16, 0x89, 0x0e, 0x61, 0xc9, 0xed, 0xbf, 0x2f, 0xec, 0xc8, 0xf1, - 0x8e, 0xf7, 0x9c, 0xdd, 0x4e, 0xe4, 0x82, 0xc5, 0x8b, 0x8b, 0xe7, 0x6e, 0x18, 0x2a, 0xc2, 0x92, - 0xef, 0x0a, 0x48, 0x8c, 0xbf, 0x02, 0x16, 0x5b, 0x1e, 0x6f, 0x58, 0x80, 0xac, 0xed, 0xbd, 0xbb, - 0x58, 0x32, 0x99, 0x77, 0x67, 0x57, 0x54, 0x26, 0xe0, 0xf5, 0xd1, 0x10, 0xd9, 0x86, 0xf1, 0x9f, - 0x02, 0x0a, 0xca, 0x3a, 0x95, 0xab, 0x1e, 0x40, 0x36, 0xe4, 0x5e, 0x41, 0x79, 0x98, 0x27, 0x47, - 0x65, 0x28, 0x26, 0x66, 0x31, 0x69, 0x50, 0xa2, 0x21, 0x8a, 0x85, 0xdf, 0xd7, 0xf1, 0x19, 0xd6, - 0x75, 0xdc, 0x26, 0x36, 0x19, 0x8a, 0xef, 0xa0, 0x08, 0x7f, 0xc8, 0xc1, 0x9c, 0x0d, 0x47, 0x7b, - 0x30, 0x67, 0xe0, 0x0e, 0xbd, 0xf3, 0xe8, 0x5c, 0xb7, 0xfd, 0xb4, 0xf9, 0x06, 0x43, 0x60, 0xd1, - 0xbb, 0x8d, 0x6f, 0x45, 0xef, 0x9e, 0xa1, 0xa9, 0x16, 0xff, 0xb7, 0x1c, 0x64, 0xf7, 0x71, 0x17, - 0xfb, 0x43, 0xa3, 0x51, 0x6e, 0xdd, 0x1d, 0x4d, 0xc4, 0xbc, 0xd1, 0x44, 0x08, 0xab, 0x11, 0xd1, - 0xc4, 0x95, 0x6e, 0xd8, 0x35, 0x58, 0xf1, 0xce, 0x46, 0xef, 0x14, 0xe1, 0x7f, 0xe2, 0x70, 0xdb, - 0xd2, 0x05, 0x5d, 0xeb, 0x76, 0xb1, 0x7e, 0x3c, 0x38, 0xed, 0x2a, 0xc6, 0xab, 0x29, 0x16, 0x77, - 0x13, 0x66, 0x55, 0xad, 0xed, 0x32, 0x9e, 0x94, 0xf5, 0x59, 0x69, 0xa3, 0x32, 0x2c, 0xfb, 0x63, - 0xbb, 0x4b, 0xe6, 0xf9, 0xa3, 0x23, 0xbb, 0xcc, 0xb9, 0xff, 0xda, 0xe2, 0x61, 0xce, 0x8a, 0x4a, - 0x35, 0xb5, 0x7b, 0x49, 0x2c, 0x66, 0x4e, 0x74, 0xbe, 0x91, 0xe8, 0x0f, 0xd3, 0x7e, 0xe0, 0x84, - 0x69, 0x23, 0x57, 0x34, 0x2a, 0x62, 0xfb, 0x22, 0x60, 0xf1, 0x29, 0xc2, 0xfa, 0xa3, 0x09, 0x59, - 0x8f, 0xf5, 0x04, 0x57, 0x39, 0xc5, 0x6b, 0x30, 0xdf, 0x7f, 0xe4, 0x60, 0x2b, 0x72, 0x09, 0x2c, - 0xce, 0x68, 0xc3, 0x8d, 0x3e, 0x1d, 0x70, 0x36, 0x81, 0x5a, 0xd9, 0xc7, 0x63, 0x37, 0x81, 0xa5, - 0xce, 0x0c, 0xea, 0xd9, 0x86, 0xa5, 0xbe, 0x07, 0xc8, 0x17, 0x20, 0x1b, 0x82, 0x36, 0xd5, 0x62, - 0xbe, 0xe1, 0x60, 0x7b, 0x28, 0xca, 0x89, 0xda, 0xbf, 0x3e, 0xf5, 0x6d, 0x0e, 0x75, 0x8b, 0xba, - 0xfc, 0x0f, 0x82, 0x6b, 0x0f, 0x9f, 0xf0, 0x4d, 0x59, 0xf0, 0x5d, 0xb8, 0x33, 0x62, 0x6a, 0x66, - 0xce, 0xbf, 0x4e, 0xc0, 0x9d, 0x67, 0x72, 0x57, 0x69, 0x3b, 0xd1, 0x63, 0x48, 0x91, 0x61, 0xf4, - 0x96, 0xb4, 0x02, 0x16, 0x40, 0xbd, 0xd6, 0x63, 0xc7, 0x6a, 0xc7, 0xf1, 0x9f, 0xe0, 0x3a, 0xbc, - 0xc6, 0xcc, 0xef, 0x45, 0x48, 0xe6, 0xf7, 0xd1, 0xe4, 0xb2, 0x8e, 0xca, 0x03, 0x4f, 0xfc, 0x0e, - 0xe6, 0xc3, 0xc9, 0xf9, 0x8e, 0xd0, 0x82, 0x2b, 0x5b, 0xf1, 0x77, 0x99, 0xaa, 0xfd, 0x7d, 0x02, - 0x84, 0x51, 0xab, 0x67, 0x3e, 0x44, 0x84, 0xf9, 0x96, 0xa6, 0x9e, 0x29, 0x7a, 0x0f, 0xb7, 0x59, - 0xca, 0xf1, 0xfe, 0x24, 0x9b, 0xc7, 0x1c, 0x48, 0xc9, 0xa6, 0x15, 0x87, 0x6c, 0x50, 0x0e, 0x66, - 0x7b, 0xd8, 0x30, 0xe4, 0x8e, 0x2d, 0x96, 0xfd, 0xc9, 0xff, 0x32, 0x0e, 0xf3, 0x0e, 0x09, 0x52, - 0x03, 0x1a, 0x4c, 0xdd, 0xd7, 0xc1, 0xeb, 0x08, 0xf0, 0xfa, 0xca, 0x1c, 0x7b, 0x0d, 0x65, 0x6e, - 0x7b, 0x94, 0x99, 0x9a, 0xc3, 0xfe, 0x6b, 0x89, 0x3d, 0x42, 0xaf, 0xbf, 0x73, 0x05, 0x14, 0x7e, - 0x07, 0x50, 0x55, 0x31, 0x58, 0xea, 0xe6, 0xb8, 0x25, 0x2b, 0x53, 0x93, 0x2f, 0x24, 0xac, 0x9a, - 0xba, 0xc2, 0xc2, 0xf5, 0xa4, 0x08, 0x3d, 0xf9, 0xa2, 0x4c, 0x21, 0x56, 0x48, 0x6f, 0x98, 0xb2, - 0x6e, 0x2a, 0x6a, 0x47, 0x32, 0xb5, 0x2f, 0xb1, 0x53, 0xe9, 0xb5, 0xa1, 0x4d, 0x0b, 0x28, 0xfc, - 0x77, 0x0c, 0xb2, 0x1e, 0xf6, 0x4c, 0x27, 0x3f, 0x86, 0xd9, 0x21, 0x6f, 0x4f, 0x18, 0x1f, 0x82, - 0x9d, 0xa7, 0xdb, 0x66, 0x53, 0xa0, 0x4d, 0x00, 0x15, 0x5f, 0x98, 0x9e, 0x79, 0xe7, 0x2d, 0x08, - 0x99, 0x93, 0xff, 0x23, 0xce, 0xc9, 0xf4, 0x4d, 0xd9, 0x1c, 0x90, 0xac, 0x92, 0xb9, 0x68, 0xdc, - 0x96, 0xd8, 0x1d, 0x43, 0xe7, 0x9d, 0x17, 0x33, 0xce, 0x48, 0x8d, 0xdc, 0x36, 0x06, 0x3a, 0x70, - 0x8a, 0xa8, 0x2d, 0x4d, 0x6d, 0x2b, 0xe6, 0xb0, 0x88, 0x7a, 0x33, 0x90, 0x20, 0xd0, 0xe1, 0xa2, - 0x95, 0x57, 0xd9, 0x65, 0x53, 0x07, 0xca, 0x7f, 0x05, 0x49, 0x7a, 0x1c, 0x13, 0x16, 0x0b, 0xd0, - 0xa7, 0x90, 0x32, 0x88, 0xc4, 0xfe, 0xc2, 0x48, 0xd8, 0x9e, 0xb8, 0x57, 0x28, 0x32, 0x3a, 0xe1, - 0x87, 0xc0, 0x0f, 0x2f, 0xa6, 0x03, 0x6c, 0x4e, 0x7e, 0xfd, 0xee, 0x59, 0x6b, 0x10, 0x7e, 0x16, - 0x83, 0xf5, 0x50, 0x06, 0xd3, 0x95, 0x3d, 0xd0, 0xa1, 0x6f, 0x25, 0xdf, 0x0f, 0xde, 0xd8, 0x01, - 0xe6, 0xa1, 0x2b, 0xe2, 0x7f, 0xff, 0x6a, 0x87, 0x59, 0x9c, 0xfa, 0x30, 0x03, 0xe7, 0x48, 0x77, - 0xe6, 0x97, 0x31, 0x40, 0x07, 0xd8, 0x74, 0x52, 0x65, 0xb6, 0xa5, 0x11, 0xfe, 0x86, 0x7b, 0x0d, - 0x7f, 0xf3, 0x63, 0x8f, 0xbf, 0xa1, 0x1e, 0xeb, 0xbe, 0xab, 0x2d, 0xe2, 0x9b, 0x7a, 0xe4, 0x6d, - 0x19, 0x91, 0x9e, 0xd2, 0x98, 0x7f, 0xb2, 0xf4, 0xf4, 0x8a, 0x6e, 0xe5, 0x3f, 0x39, 0xc8, 0x7a, - 0x84, 0x66, 0x1a, 0xf4, 0x10, 0x90, 0x7c, 0x2e, 0x2b, 0x5d, 0xd9, 0x12, 0xcc, 0x4e, 0xff, 0x59, - 0x39, 0x60, 0xd9, 0x19, 0xb1, 0xc9, 0xd0, 0x53, 0xc8, 0xf6, 0xe4, 0x0b, 0xa5, 0x37, 0xe8, 0x49, - 0x6c, 0x9f, 0x0d, 0xe5, 0xa7, 0x76, 0xe1, 0x70, 0x3d, 0x50, 0x40, 0xaf, 0xa8, 0xe6, 0x87, 0xef, - 0xd3, 0x0a, 0xfa, 0x32, 0xa3, 0x63, 0xca, 0xa3, 0xfc, 0x14, 0xa3, 0x63, 0xc8, 0xf6, 0x14, 0x35, - 0xc0, 0x2c, 0x3e, 0x96, 0x19, 0x35, 0xf0, 0x65, 0x46, 0x3c, 0xe4, 0x28, 0x08, 0xee, 0xa0, 0x97, - 0x2d, 0xd7, 0xdf, 0x46, 0xea, 0xba, 0x83, 0xc5, 0x00, 0x0e, 0xdb, 0x96, 0x83, 0xd0, 0x56, 0xd2, - 0xdd, 0xa0, 0xd9, 0xb0, 0xbe, 0x4a, 0x64, 0x57, 0xe9, 0xff, 0xe2, 0x6e, 0x0b, 0x0e, 0x60, 0xa3, - 0x8f, 0x21, 0xae, 0xf7, 0x5b, 0xcc, 0x7c, 0xbf, 0x37, 0x01, 0xff, 0xbc, 0x78, 0x5c, 0x3a, 0x9c, - 0x11, 0x2d, 0x2a, 0xfe, 0xcf, 0xe2, 0x10, 0x17, 0x8f, 0x4b, 0xe8, 0x53, 0x4f, 0x8b, 0xe5, 0xc1, - 0x84, 0x5c, 0xdc, 0x1d, 0x96, 0x7f, 0x89, 0x85, 0xb5, 0x58, 0x72, 0xb0, 0x52, 0x12, 0xcb, 0x85, - 0x66, 0x59, 0xda, 0x2f, 0x57, 0xcb, 0xcd, 0xb2, 0x44, 0x5b, 0x40, 0x19, 0x0e, 0x6d, 0x40, 0xee, - 0xf8, 0xa4, 0x58, 0xad, 0x34, 0x0e, 0xa5, 0x93, 0x9a, 0xfd, 0x8b, 0x8d, 0xc6, 0x50, 0x06, 0x16, - 0xaa, 0x95, 0x46, 0x93, 0x01, 0x1a, 0x99, 0xb8, 0x05, 0x39, 0x28, 0x37, 0xa5, 0x52, 0xe1, 0xb8, - 0x50, 0xaa, 0x34, 0x5f, 0x64, 0x12, 0x88, 0x87, 0x35, 0x2f, 0xef, 0x46, 0xad, 0x70, 0xdc, 0x38, - 0xac, 0x37, 0x33, 0x49, 0x84, 0x60, 0x89, 0xd0, 0xdb, 0xa0, 0x46, 0x26, 0x65, 0x71, 0x28, 0x55, - 0xeb, 0x35, 0x47, 0x86, 0x59, 0xb4, 0x02, 0x19, 0x7b, 0x66, 0xb1, 0x5c, 0xd8, 0x27, 0x05, 0xbd, - 0x39, 0xb4, 0x0c, 0x8b, 0xe5, 0x9f, 0x1c, 0x17, 0x6a, 0xfb, 0x36, 0xe2, 0x3c, 0xda, 0x86, 0x0d, - 0xb7, 0x38, 0x12, 0xa3, 0x2a, 0xef, 0x93, 0xa2, 0x5c, 0x23, 0x03, 0xe8, 0x16, 0x64, 0x58, 0x77, - 0xab, 0x54, 0xaf, 0xed, 0x57, 0x9a, 0x95, 0x7a, 0x2d, 0x93, 0xa6, 0x15, 0xbc, 0x2c, 0x80, 0x25, - 0x39, 0x63, 0xb6, 0x30, 0xbe, 0xac, 0xb7, 0x48, 0xcb, 0x7a, 0x76, 0xc5, 0xfa, 0x9b, 0x18, 0xac, - 0xd2, 0x92, 0xb5, 0x5d, 0x20, 0xb7, 0x7d, 0xd5, 0x0e, 0x64, 0x68, 0xbd, 0x4b, 0xf2, 0xdf, 0x02, - 0x4b, 0x14, 0xfe, 0xcc, 0xce, 0x3b, 0xec, 0xf6, 0x52, 0xcc, 0xd5, 0x5e, 0xaa, 0xf8, 0xb3, 0xb0, - 0xfb, 0xde, 0x46, 0x8c, 0x6f, 0xb6, 0x51, 0x89, 0xfd, 0x51, 0x48, 0x9a, 0xf0, 0x70, 0x34, 0xb7, - 0x51, 0x21, 0xd4, 0x55, 0xb2, 0xf8, 0x2b, 0x7a, 0xb9, 0x27, 0xb0, 0xe6, 0x97, 0x97, 0x19, 0xf4, - 0x83, 0x40, 0xbb, 0xc4, 0x71, 0xbb, 0x0e, 0xae, 0x83, 0x21, 0xfc, 0x1b, 0x07, 0x73, 0x36, 0xd8, - 0x0a, 0x6f, 0x2c, 0xbf, 0xe4, 0xa9, 0x94, 0xce, 0x5b, 0x10, 0xa7, 0xf0, 0xea, 0x6e, 0x74, 0xc4, - 0xfc, 0x8d, 0x8e, 0xd0, 0x73, 0x8e, 0x87, 0x9e, 0xf3, 0x8f, 0x60, 0xb1, 0x65, 0x89, 0xaf, 0x68, - 0xaa, 0x64, 0x2a, 0x3d, 0xbb, 0x10, 0x1a, 0x6c, 0x4c, 0x36, 0xed, 0xd7, 0x04, 0xe2, 0x82, 0x4d, - 0x60, 0x81, 0xd0, 0x36, 0x2c, 0x90, 0x46, 0xa5, 0x64, 0x6a, 0xd2, 0xc0, 0xc0, 0xb9, 0x24, 0x29, - 0x0b, 0x01, 0x81, 0x35, 0xb5, 0x13, 0x03, 0x0b, 0x7f, 0xc7, 0xc1, 0x2a, 0xad, 0x76, 0xf9, 0xd5, - 0x71, 0x5c, 0xc3, 0xc6, 0xad, 0x71, 0xbe, 0xdb, 0x30, 0x94, 0xe1, 0x9b, 0x4a, 0xf6, 0x73, 0xb0, - 0xe6, 0x9f, 0x8f, 0x65, 0xf8, 0xbf, 0x8a, 0xc1, 0x8a, 0x15, 0x9a, 0xd9, 0x03, 0xd7, 0x1d, 0x3d, - 0x4f, 0x71, 0x92, 0xbe, 0xcd, 0x4c, 0x04, 0x36, 0xf3, 0xd0, 0x9f, 0x3f, 0xbf, 0xe3, 0x0e, 0x2e, - 0xfd, 0x2b, 0x78, 0x53, 0x7b, 0xf9, 0x0b, 0x0e, 0x56, 0x7d, 0xf3, 0x31, 0x7b, 0xf9, 0xc4, 0x9f, - 0x10, 0xdc, 0x8d, 0x90, 0xef, 0xb5, 0x52, 0x82, 0x0f, 0xec, 0x50, 0x7c, 0x3a, 0xb3, 0xfc, 0xd7, - 0x18, 0x6c, 0x0e, 0x2f, 0x35, 0xf2, 0x54, 0xa0, 0x3d, 0x45, 0x45, 0xeb, 0x6a, 0x1d, 0xf9, 0xcf, - 0xfc, 0x0e, 0x77, 0x37, 0x78, 0xcf, 0x86, 0x88, 0x34, 0xca, 0xf1, 0x86, 0x16, 0x82, 0x13, 0xd3, - 0x16, 0x82, 0xaf, 0xa4, 0x01, 0xbf, 0xe7, 0xae, 0x71, 0x7b, 0xc5, 0x67, 0x9a, 0x30, 0x61, 0xb3, - 0xe8, 0x43, 0xb8, 0x49, 0xa2, 0x7f, 0xe7, 0xa5, 0x8b, 0xdd, 0x7f, 0xa7, 0x2e, 0x71, 0x4e, 0x5c, - 0xb5, 0x86, 0x9d, 0xe7, 0x1d, 0xac, 0x41, 0xd2, 0x16, 0xbe, 0x4d, 0xc0, 0x9a, 0x95, 0x1d, 0x34, - 0x4c, 0xb9, 0x33, 0x4d, 0xeb, 0xe0, 0xb7, 0x83, 0x95, 0xd8, 0x98, 0xf7, 0x58, 0xc2, 0xb9, 0x4e, - 0x52, 0x80, 0x45, 0x79, 0xc8, 0x1a, 0xa6, 0xdc, 0x21, 0xee, 0x40, 0xd6, 0x3b, 0xd8, 0x94, 0xfa, - 0xb2, 0xf9, 0x8a, 0xd9, 0xfa, 0x32, 0x1b, 0x6a, 0x92, 0x91, 0x63, 0xd9, 0x7c, 0x75, 0x4d, 0x07, - 0x89, 0x7e, 0xec, 0x77, 0x0a, 0xef, 0x8e, 0x59, 0xcb, 0x08, 0xdd, 0xfa, 0x49, 0x44, 0xb5, 0xfe, - 0xbd, 0x31, 0x2c, 0xc7, 0x57, 0xe9, 0xaf, 0x5e, 0x9d, 0xfe, 0x8e, 0x0b, 0xfd, 0xb7, 0xe0, 0x66, - 0x60, 0xf1, 0xec, 0x0a, 0xe9, 0x40, 0xce, 0x1a, 0x3a, 0x51, 0x8d, 0x29, 0xd5, 0x31, 0x42, 0x63, - 0x62, 0x11, 0x1a, 0x23, 0xac, 0xc3, 0xad, 0x90, 0x89, 0x98, 0x14, 0x7f, 0x93, 0xa4, 0x62, 0x4c, - 0xdf, 0x73, 0xfa, 0x3c, 0xca, 0x2a, 0xde, 0x77, 0x1f, 0x7b, 0x68, 0x7b, 0xe6, 0x4d, 0xd8, 0xc5, - 0x16, 0xa4, 0xdd, 0x78, 0xec, 0x1a, 0x34, 0xc7, 0x18, 0x4e, 0xf2, 0x4a, 0xad, 0xb0, 0x94, 0xaf, - 0x15, 0x56, 0x1d, 0x1a, 0xd5, 0xac, 0x37, 0xb4, 0x8d, 0xdc, 0x8a, 0x11, 0x66, 0xf5, 0x32, 0x60, - 0x56, 0x73, 0xde, 0xfe, 0x5a, 0x24, 0xd3, 0xdf, 0x00, 0xc3, 0x62, 0x4a, 0x1d, 0xda, 0xf8, 0x12, - 0x5e, 0x02, 0x4f, 0x35, 0x7e, 0xfa, 0x56, 0x94, 0x4f, 0x8d, 0x62, 0x7e, 0x35, 0x12, 0x36, 0x61, - 0x3d, 0x94, 0x37, 0x9b, 0xfa, 0x8f, 0x39, 0x2a, 0x98, 0x53, 0xe3, 0x6a, 0x98, 0xb2, 0x69, 0x4c, - 0x3a, 0x35, 0x1b, 0x74, 0x4f, 0x4d, 0x41, 0x44, 0x83, 0xa7, 0x34, 0x09, 0xe1, 0x4f, 0x39, 0xba, - 0x0f, 0x7e, 0x59, 0xd8, 0x6d, 0xfb, 0x0e, 0x24, 0x07, 0xa4, 0x8c, 0x4f, 0xa3, 0xae, 0xac, 0xd7, - 0x08, 0x4e, 0xac, 0x21, 0x91, 0x62, 0x5c, 0x5b, 0x61, 0x54, 0xf8, 0x15, 0x07, 0x69, 0x17, 0x7f, - 0xb4, 0x01, 0xf3, 0x4e, 0xe5, 0xc7, 0xce, 0x77, 0x1c, 0x80, 0x75, 0xfc, 0xa6, 0x66, 0xca, 0x5d, - 0xf6, 0xc4, 0x84, 0x7e, 0x58, 0x29, 0xea, 0xc0, 0xc0, 0x34, 0x1c, 0x8e, 0x8b, 0xe4, 0x37, 0x7a, - 0x00, 0x89, 0x81, 0xaa, 0x98, 0xc4, 0xec, 0x97, 0xfc, 0xf6, 0x4c, 0xa6, 0xca, 0x9f, 0xa8, 0x8a, - 0x29, 0x12, 0x2c, 0xe1, 0x3e, 0x24, 0xac, 0x2f, 0x6f, 0x05, 0x62, 0x1e, 0x92, 0xc5, 0x17, 0xcd, - 0x72, 0x23, 0xc3, 0x21, 0x80, 0x54, 0x85, 0xe6, 0xeb, 0x31, 0xa1, 0x6a, 0x3f, 0x33, 0x75, 0x16, - 0x61, 0xb9, 0x00, 0xf9, 0x54, 0xd5, 0xf4, 0x9e, 0xdc, 0x25, 0x32, 0xcf, 0x89, 0xce, 0x77, 0x74, - 0x77, 0x84, 0xd6, 0x12, 0x37, 0x9c, 0x13, 0x09, 0xab, 0x17, 0x7d, 0x41, 0x75, 0x2b, 0xaa, 0x52, - 0x54, 0x08, 0xad, 0x14, 0x6d, 0x7a, 0x6e, 0xd9, 0x31, 0x35, 0xa2, 0x7f, 0x88, 0xc1, 0x6a, 0x28, - 0x1e, 0xfa, 0xc0, 0x5d, 0x1d, 0xba, 0x33, 0x92, 0xa7, 0xbb, 0x2e, 0xf4, 0x2d, 0x47, 0xeb, 0x42, - 0x7b, 0x9e, 0xba, 0xd0, 0xdb, 0x63, 0xe9, 0xdd, 0x15, 0xa1, 0x5f, 0x70, 0x11, 0x15, 0xa1, 0x46, - 0xb3, 0x70, 0x50, 0x96, 0x4e, 0x6a, 0xf4, 0xaf, 0x53, 0x11, 0x5a, 0x81, 0xcc, 0xb0, 0x4e, 0x22, - 0x35, 0x9a, 0x85, 0x66, 0x23, 0x13, 0x0b, 0x56, 0x63, 0xe2, 0xa1, 0xb5, 0x96, 0xc4, 0xf8, 0xb2, - 0x4a, 0x92, 0xa2, 0xac, 0x01, 0x62, 0xd4, 0x47, 0xf5, 0x93, 0x5a, 0x53, 0x3a, 0x10, 0xeb, 0x27, - 0xc7, 0x99, 0x94, 0x53, 0x6e, 0x59, 0x01, 0xc4, 0x4e, 0xcb, 0xfd, 0x6a, 0xfe, 0x2f, 0x38, 0xc8, - 0x7a, 0xc0, 0xec, 0xf0, 0x5c, 0x3d, 0x6e, 0xce, 0xd3, 0xe3, 0x7e, 0x04, 0x2b, 0x56, 0xc6, 0x48, - 0x2d, 0xc5, 0x90, 0xfa, 0x58, 0x27, 0xb5, 0x6d, 0xa6, 0xf3, 0xcb, 0x3d, 0xf9, 0x82, 0xd5, 0xff, - 0x8f, 0xb1, 0x6e, 0x31, 0xbe, 0x86, 0x0a, 0xaf, 0xf0, 0x75, 0x9c, 0xc6, 0x25, 0x53, 0xe7, 0x35, - 0x63, 0x7d, 0x54, 0x30, 0xf1, 0x89, 0x4f, 0x91, 0xf8, 0x44, 0x78, 0xb8, 0xc4, 0x54, 0xc1, 0xf0, - 0xf4, 0x77, 0x7a, 0x6d, 0x78, 0x6f, 0xd3, 0xc8, 0xf5, 0x81, 0x5b, 0x7f, 0xc7, 0x66, 0x5a, 0xa9, - 0xaf, 0x8b, 0xdc, 0xcf, 0xaf, 0x2b, 0x4f, 0x2e, 0xd0, 0x78, 0xec, 0x0a, 0xf9, 0xd1, 0xee, 0xff, - 0x72, 0x30, 0x57, 0x69, 0x63, 0xd5, 0xa4, 0x6b, 0x5b, 0xf4, 0xfc, 0x63, 0x05, 0xda, 0x88, 0xf8, - 0x7f, 0x0b, 0xb2, 0x30, 0x7e, 0x73, 0xe4, 0x7f, 0x63, 0x08, 0x33, 0xe8, 0xcc, 0xf5, 0x4f, 0x21, - 0x9e, 0x26, 0xc6, 0x5b, 0x01, 0xca, 0x10, 0x17, 0xc7, 0xdf, 0x1b, 0x83, 0xe5, 0xcc, 0xf3, 0x21, - 0x24, 0xc9, 0x13, 0x7a, 0xb4, 0xe2, 0x3c, 0xe3, 0x77, 0xbd, 0xb0, 0xe7, 0x57, 0x7d, 0x50, 0x9b, - 0x6e, 0xf7, 0x9f, 0xe6, 0x01, 0x86, 0x69, 0x26, 0x7a, 0x0a, 0x0b, 0xee, 0x57, 0xbc, 0x68, 0x7d, - 0xc4, 0x1b, 0x72, 0x7e, 0x23, 0x7c, 0xd0, 0x91, 0xe9, 0x29, 0x2c, 0xb8, 0x9f, 0x6f, 0x0d, 0x99, - 0x85, 0x3c, 0x21, 0x1b, 0x32, 0x0b, 0x7d, 0xf1, 0x35, 0x83, 0xba, 0x70, 0x33, 0xe2, 0x01, 0x0f, - 0x7a, 0x7b, 0xb2, 0x67, 0x4e, 0xfc, 0xf7, 0x26, 0x7c, 0x09, 0x24, 0xcc, 0x20, 0x1d, 0x6e, 0x45, - 0xbe, 0x5b, 0x41, 0x3b, 0x93, 0xbe, 0xaa, 0xe1, 0xdf, 0x99, 0x00, 0xd3, 0x99, 0x73, 0x00, 0x7c, - 0x74, 0xb3, 0x1c, 0xbd, 0x33, 0xf1, 0x2b, 0x0e, 0xfe, 0xfe, 0xe4, 0xbd, 0x77, 0x61, 0x06, 0x1d, - 0x42, 0xda, 0xd5, 0x35, 0x45, 0x7c, 0x68, 0x2b, 0x95, 0x32, 0x5e, 0x1f, 0xd1, 0x66, 0xa5, 0x9c, - 0x5c, 0x8d, 0xac, 0x21, 0xa7, 0x60, 0x4b, 0x6e, 0xc8, 0x29, 0xa4, 0xf3, 0xe5, 0xdf, 0x7e, 0xdf, - 0xfd, 0x1e, 0xb6, 0xfd, 0xe1, 0x01, 0x42, 0xd8, 0xf6, 0x47, 0x04, 0x0b, 0xc2, 0x0c, 0xfa, 0x0c, - 0x96, 0xbc, 0x15, 0x6a, 0xb4, 0x39, 0xb2, 0xd2, 0xce, 0xdf, 0x8e, 0x1a, 0x76, 0xb3, 0xf4, 0x16, - 0x44, 0x87, 0x2c, 0x43, 0x0b, 0xb3, 0x43, 0x96, 0x11, 0x75, 0xd4, 0x19, 0xcb, 0x3f, 0x79, 0xca, - 0x7c, 0x43, 0xff, 0x14, 0x56, 0x9d, 0x1c, 0xfa, 0xa7, 0xd0, 0xda, 0xa0, 0x30, 0x83, 0x14, 0x58, - 0x0b, 0xaf, 0x32, 0xa1, 0x7b, 0x13, 0x15, 0xd1, 0xf8, 0xb7, 0xc7, 0xa1, 0x39, 0x53, 0xb5, 0x20, - 0x1b, 0xd2, 0xd4, 0x46, 0xc2, 0xc8, 0x8e, 0x37, 0x9d, 0xe4, 0xee, 0x04, 0x5d, 0x71, 0xc1, 0x8a, - 0x42, 0x76, 0xff, 0x23, 0x09, 0x09, 0x72, 0xed, 0x37, 0xe1, 0x86, 0xaf, 0x94, 0x80, 0x6e, 0x8f, - 0x2e, 0xb0, 0xf0, 0x5b, 0x91, 0xe3, 0xce, 0x1a, 0x5e, 0xc2, 0x72, 0xa0, 0x38, 0x80, 0xb6, 0xdd, - 0x74, 0x61, 0x05, 0x0a, 0xfe, 0xce, 0x08, 0x0c, 0x3f, 0x6f, 0xaf, 0x6f, 0xdb, 0x1e, 0x97, 0xbd, - 0x7a, 0x79, 0x47, 0xf9, 0xb3, 0x2f, 0x68, 0x94, 0xe5, 0xf7, 0x64, 0x82, 0x57, 0xae, 0x50, 0x1f, - 0x76, 0x77, 0x24, 0x8e, 0x33, 0xc3, 0xe7, 0x4e, 0x78, 0xe7, 0x4a, 0x9e, 0x90, 0x47, 0xb8, 0xd0, - 0x24, 0x8f, 0x17, 0x46, 0xa1, 0x38, 0xec, 0x9f, 0x43, 0xc6, 0x7f, 0xcf, 0xa3, 0xad, 0x31, 0x61, - 0x07, 0xbf, 0x1d, 0x8d, 0xe0, 0xdf, 0x19, 0xbf, 0x93, 0xf1, 0x4b, 0x15, 0xe6, 0x5e, 0xee, 0x8e, - 0xc4, 0x71, 0xbb, 0x45, 0x57, 0x84, 0x3b, 0x74, 0x8b, 0xc1, 0x68, 0x78, 0xe8, 0x16, 0x43, 0x42, - 0x62, 0x61, 0x66, 0xef, 0x31, 0x80, 0xdc, 0xed, 0xbf, 0x92, 0x25, 0xac, 0x0e, 0x7a, 0x68, 0x23, - 0xd0, 0x7c, 0x2a, 0xab, 0x83, 0x5e, 0xbd, 0x6f, 0x25, 0x5d, 0x46, 0xee, 0xaf, 0xe6, 0x48, 0xaa, - 0x35, 0x4f, 0x08, 0xac, 0x81, 0xbd, 0x2a, 0x64, 0x86, 0xd4, 0x12, 0x09, 0xa1, 0xd0, 0x9d, 0x50, - 0x1e, 0xa4, 0x95, 0xef, 0x63, 0xb4, 0xe4, 0x30, 0x22, 0xa3, 0x7b, 0x9f, 0x00, 0xb4, 0x0c, 0x45, - 0xa2, 0x31, 0x1c, 0xda, 0x0c, 0xf0, 0x79, 0xa2, 0xe0, 0x6e, 0xdb, 0xe6, 0xf1, 0x97, 0x4c, 0x98, - 0x96, 0xa1, 0xd0, 0x48, 0x6f, 0xef, 0x47, 0x90, 0xa6, 0xc2, 0x9c, 0x59, 0x78, 0xe3, 0xe8, 0x99, - 0x0c, 0x74, 0xf5, 0x64, 0x64, 0xaf, 0x0c, 0x8b, 0x94, 0x01, 0x4b, 0x18, 0xd1, 0x56, 0x80, 0xc5, - 0x11, 0x1d, 0xf1, 0x31, 0x59, 0x20, 0x64, 0x6c, 0x6c, 0xaf, 0x08, 0x0b, 0x36, 0x1b, 0xf3, 0x95, - 0xd6, 0x46, 0xb7, 0x43, 0xb8, 0x58, 0x03, 0x3e, 0x26, 0x69, 0xc6, 0xc4, 0x1a, 0x1a, 0x8a, 0x62, - 0xff, 0x77, 0x69, 0x50, 0x14, 0x96, 0xd4, 0x85, 0x8a, 0xc2, 0xc6, 0x8a, 0xc9, 0x97, 0xf1, 0x96, - 0xa1, 0x9c, 0xa6, 0x08, 0xd1, 0x0f, 0xfe, 0x3f, 0x00, 0x00, 0xff, 0xff, 0x6f, 0x0a, 0xad, 0x10, - 0x0a, 0x3d, 0x00, 0x00, + // 4182 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x5c, 0x4d, 0x6c, 0x1b, 0x49, + 0x76, 0x56, 0xf3, 0x4f, 0xd2, 0xa3, 0x24, 0x53, 0xa5, 0x1f, 0xd3, 0x2d, 0x59, 0x96, 0xda, 0xe3, + 0x19, 0x8d, 0xc7, 0xa6, 0x67, 0xbc, 0x33, 0x83, 0x1d, 0x8d, 0x67, 0x77, 0x48, 0x89, 0x96, 0xb8, + 0xa6, 0x49, 0x6d, 0x93, 0xf2, 0xac, 0x9d, 0x0c, 0x7a, 0x5a, 0x64, 0x49, 0x6e, 0x0c, 0xd9, 0xcd, + 0xe9, 0x6e, 0x2a, 0xd6, 0xe6, 0x90, 0x64, 0x83, 0x20, 0x1b, 0xe4, 0x12, 0x24, 0x87, 0x4c, 0x4e, + 0x59, 0x24, 0x39, 0xee, 0x62, 0x0f, 0x41, 0x10, 0x20, 0x97, 0x00, 0xb9, 0x25, 0x40, 0x90, 0x1c, + 0x93, 0x5c, 0xf6, 0x10, 0x20, 0x87, 0x45, 0x02, 0x4c, 0x2e, 0x39, 0xe4, 0x10, 0x04, 0x5d, 0x55, + 0xfd, 0xff, 0x43, 0xd2, 0x92, 0x33, 0x01, 0xf6, 0x64, 0x75, 0xd5, 0xab, 0x57, 0xaf, 0xaa, 0xde, + 0x7b, 0xf5, 0xde, 0xf7, 0x8a, 0x86, 0xf7, 0x4e, 0x15, 0xf3, 0xf9, 0xf0, 0xb8, 0xd4, 0xd1, 0xfa, + 0xf7, 0x3a, 0x9a, 0x6a, 0xca, 0x8a, 0x8a, 0xf5, 0xbb, 0x86, 0xa9, 0xe9, 0xf2, 0x29, 0xbe, 0xab, + 0xa8, 0x26, 0xd6, 0x4f, 0xe4, 0x0e, 0xbe, 0x67, 0x0c, 0x70, 0xe7, 0x5e, 0xc7, 0x50, 0x4a, 0x03, + 0x5d, 0x33, 0x35, 0x94, 0xb3, 0xfe, 0x3c, 0x7b, 0x87, 0xdf, 0x3c, 0xd5, 0xb4, 0xd3, 0x1e, 0xbe, + 0x47, 0x5a, 0x8f, 0x87, 0x27, 0xf7, 0xba, 0xd8, 0xe8, 0xe8, 0xca, 0xc0, 0xd4, 0x74, 0x4a, 0xc9, + 0xdf, 0x08, 0x52, 0x98, 0x4a, 0x1f, 0x1b, 0xa6, 0xdc, 0x1f, 0x30, 0x82, 0x8d, 0x20, 0xc1, 0xaf, + 0xe8, 0xf2, 0x60, 0x80, 0x75, 0x83, 0xf6, 0x0b, 0xab, 0xb0, 0xbc, 0x8f, 0xcd, 0xc3, 0xde, 0xf0, + 0x54, 0x51, 0x6b, 0xea, 0x89, 0x26, 0xe2, 0x2f, 0x86, 0xd8, 0x30, 0x85, 0x7f, 0xe2, 0x60, 0x25, + 0xd0, 0x61, 0x0c, 0x34, 0xd5, 0xc0, 0x08, 0x41, 0x46, 0x95, 0xfb, 0xb8, 0xc8, 0x6d, 0x72, 0xdb, + 0xb3, 0x22, 0xf9, 0x1b, 0xdd, 0x82, 0x85, 0x33, 0xac, 0x76, 0x35, 0x5d, 0x3a, 0xc3, 0xba, 0xa1, + 0x68, 0x6a, 0x31, 0x45, 0x7a, 0xe7, 0x69, 0xeb, 0x13, 0xda, 0x88, 0xf6, 0x61, 0xa6, 0x2f, 0xab, + 0xca, 0x09, 0x36, 0xcc, 0x62, 0x7a, 0x33, 0xbd, 0x9d, 0xbf, 0xff, 0x56, 0x89, 0x2e, 0xb5, 0x14, + 0x39, 0x57, 0xe9, 0x31, 0xa3, 0xae, 0xaa, 0xa6, 0x7e, 0x2e, 0x3a, 0x83, 0xf9, 0x0f, 0x61, 0xde, + 0xd7, 0x85, 0x0a, 0x90, 0xfe, 0x1c, 0x9f, 0x33, 0x99, 0xac, 0x3f, 0xd1, 0x32, 0x64, 0xcf, 0xe4, + 0xde, 0x10, 0x33, 0x49, 0xe8, 0xc7, 0x4e, 0xea, 0x9b, 0x9c, 0xb0, 0x01, 0xeb, 0xce, 0x6c, 0xbb, + 0xf2, 0x40, 0x3e, 0x56, 0x7a, 0x8a, 0xa9, 0x60, 0xc3, 0x5e, 0xfa, 0xa7, 0x70, 0x3d, 0xa6, 0x9f, + 0xed, 0xc0, 0x03, 0x98, 0xeb, 0x78, 0xda, 0x8b, 0x1c, 0x59, 0x4a, 0xd1, 0x5e, 0x4a, 0x60, 0xe4, + 0xb9, 0xe8, 0xa3, 0x16, 0xfe, 0x33, 0x0d, 0x85, 0x20, 0x09, 0x7a, 0x00, 0xd3, 0x06, 0xd6, 0xcf, + 0x94, 0x0e, 0xdd, 0xd7, 0xfc, 0xfd, 0xcd, 0x38, 0x6e, 0xa5, 0x16, 0xa5, 0x3b, 0x98, 0x12, 0xed, + 0x21, 0xe8, 0x08, 0x0a, 0x67, 0x5a, 0x6f, 0xd8, 0xc7, 0x12, 0x7e, 0x31, 0x90, 0x55, 0xe7, 0x00, + 0xf2, 0xf7, 0xb7, 0x63, 0xd9, 0x3c, 0x21, 0x03, 0xaa, 0x36, 0xfd, 0xc1, 0x94, 0x78, 0xe5, 0xcc, + 0xdf, 0xc4, 0xff, 0x15, 0x07, 0xd3, 0x6c, 0x36, 0xf4, 0x01, 0x64, 0xcc, 0xf3, 0x01, 0x95, 0x6e, + 0xe1, 0xfe, 0xad, 0x51, 0xd2, 0x95, 0xda, 0xe7, 0x03, 0x2c, 0x92, 0x21, 0x82, 0x09, 0x19, 0xeb, + 0x0b, 0xe5, 0x61, 0xfa, 0xa8, 0xf1, 0xa8, 0xd1, 0xfc, 0xa4, 0x51, 0x98, 0x42, 0xab, 0x80, 0x76, + 0x9b, 0x8d, 0xb6, 0xd8, 0xac, 0xd7, 0xab, 0xa2, 0xd4, 0xaa, 0x8a, 0x4f, 0x6a, 0xbb, 0xd5, 0x02, + 0x87, 0x5e, 0x83, 0xcd, 0x27, 0xcd, 0xfa, 0xd1, 0xe3, 0xaa, 0x54, 0xde, 0xdd, 0xad, 0xb6, 0x5a, + 0xb5, 0x4a, 0xad, 0x5e, 0x6b, 0x3f, 0x95, 0x76, 0x9b, 0x8d, 0x56, 0x5b, 0x2c, 0xd7, 0x1a, 0xed, + 0x56, 0x21, 0x85, 0xb6, 0xa0, 0xb8, 0x2f, 0x36, 0x8f, 0x0e, 0xa5, 0x08, 0x1e, 0x69, 0x3e, 0xfd, + 0xa3, 0x0a, 0xc7, 0xff, 0x80, 0x83, 0x2b, 0x81, 0x35, 0xa2, 0xb2, 0x6f, 0x11, 0x77, 0xc7, 0xdd, + 0x1b, 0xef, 0x62, 0xee, 0x44, 0x2d, 0x06, 0x20, 0xd7, 0x6c, 0xd4, 0x6b, 0x0d, 0x6b, 0x01, 0x79, + 0x98, 0x6e, 0x3e, 0x7c, 0x48, 0x3e, 0x52, 0x95, 0x1c, 0x9d, 0x50, 0x58, 0x80, 0xb9, 0x43, 0x5d, + 0x3b, 0xc6, 0xb6, 0x8a, 0x95, 0x61, 0x9e, 0x7d, 0x33, 0x95, 0x7a, 0x1b, 0xb2, 0x3a, 0x96, 0xbb, + 0xe7, 0xec, 0xf4, 0xf9, 0x12, 0x35, 0xdb, 0x92, 0x6d, 0xb6, 0xa5, 0x8a, 0xa6, 0xf5, 0x9e, 0x58, + 0x2a, 0x2c, 0x52, 0x42, 0xe1, 0xab, 0x0c, 0x2c, 0xed, 0xea, 0x58, 0x36, 0x31, 0x95, 0x96, 0xb1, + 0x8e, 0x34, 0xcf, 0x07, 0xb0, 0x60, 0xa9, 0x60, 0x47, 0x31, 0xcf, 0x25, 0x5d, 0x56, 0x4f, 0x31, + 0xd3, 0x8e, 0x15, 0x7b, 0x07, 0x76, 0x59, 0xaf, 0x68, 0x75, 0x8a, 0xf3, 0x1d, 0xef, 0x27, 0xaa, + 0xc1, 0x12, 0xd3, 0x2e, 0x9f, 0xd6, 0xa7, 0xfd, 0x5a, 0x4f, 0xa5, 0xf0, 0x68, 0x3d, 0x3a, 0xf3, + 0xb7, 0x28, 0xd8, 0x40, 0x8f, 0x00, 0x06, 0xb2, 0x2e, 0xf7, 0xb1, 0x89, 0x75, 0xa3, 0x98, 0xf1, + 0xbb, 0x80, 0x88, 0xd5, 0x94, 0x0e, 0x1d, 0x6a, 0xea, 0x02, 0x3c, 0xc3, 0xd1, 0xbe, 0x65, 0x33, + 0x1d, 0x1d, 0x9b, 0x46, 0x31, 0x4b, 0x38, 0x6d, 0x27, 0x71, 0x6a, 0x51, 0x52, 0xc2, 0xa6, 0x92, + 0xfe, 0xb2, 0xc2, 0x89, 0xf6, 0x68, 0xd4, 0x84, 0x15, 0x7b, 0x81, 0x9a, 0x6a, 0x62, 0xd5, 0x94, + 0x0c, 0x6d, 0xa8, 0x77, 0x70, 0x31, 0x47, 0x76, 0x69, 0x2d, 0xb0, 0x44, 0x4a, 0xd3, 0x22, 0x24, + 0x22, 0xdb, 0x1a, 0x5f, 0x23, 0x7a, 0x06, 0xbc, 0xdc, 0xe9, 0x60, 0xc3, 0x50, 0xe8, 0x5e, 0x48, + 0x3a, 0xfe, 0x62, 0xa8, 0xe8, 0xb8, 0x8f, 0x55, 0xd3, 0x28, 0x4e, 0xfb, 0xb9, 0xb6, 0xb5, 0x81, + 0xd6, 0xd3, 0x4e, 0xcf, 0x45, 0x97, 0x46, 0xbc, 0xe6, 0x1b, 0xee, 0xe9, 0x31, 0xf8, 0x8f, 0xe0, + 0x4a, 0x60, 0x53, 0x26, 0x71, 0x7e, 0xfc, 0x0e, 0xcc, 0x79, 0x77, 0x62, 0x22, 0xc7, 0xf9, 0xbb, + 0x29, 0x58, 0x8a, 0xd8, 0x03, 0x74, 0x00, 0x33, 0x86, 0x2a, 0x0f, 0x8c, 0xe7, 0x9a, 0xc9, 0xf4, + 0xf7, 0x76, 0xc2, 0x96, 0x95, 0x5a, 0x8c, 0x96, 0x7e, 0x1e, 0x4c, 0x89, 0xce, 0x68, 0x54, 0x81, + 0x1c, 0xdd, 0xcf, 0xa0, 0xfb, 0x8a, 0xe2, 0x43, 0xdb, 0x1c, 0x2e, 0x6c, 0x24, 0xff, 0x0e, 0x2c, + 0xf8, 0x67, 0x40, 0x37, 0x20, 0x6f, 0xcf, 0x20, 0x29, 0x5d, 0xb6, 0x56, 0xb0, 0x9b, 0x6a, 0x5d, + 0xfe, 0x2d, 0x98, 0xf3, 0x32, 0x43, 0x6b, 0x30, 0xcb, 0x14, 0xc2, 0x21, 0x9f, 0xa1, 0x0d, 0xb5, + 0xae, 0x63, 0xd3, 0xdf, 0x82, 0x65, 0xbf, 0x9e, 0x31, 0x53, 0x7e, 0xdd, 0x59, 0x03, 0xdd, 0x8b, + 0x05, 0xff, 0x1a, 0x6c, 0x39, 0x85, 0x3f, 0xce, 0x42, 0x21, 0x68, 0x34, 0xe8, 0x01, 0x64, 0x8f, + 0x7b, 0x5a, 0xe7, 0x73, 0x36, 0xf6, 0xb5, 0x38, 0xeb, 0x2a, 0x55, 0x2c, 0x2a, 0xda, 0x7a, 0x30, + 0x25, 0xd2, 0x41, 0xd6, 0xe8, 0xbe, 0x36, 0x54, 0x4d, 0xb6, 0x7b, 0xf1, 0xa3, 0x1f, 0x5b, 0x54, + 0xee, 0x68, 0x32, 0x08, 0xed, 0x41, 0x9e, 0xaa, 0x9d, 0xd4, 0xd7, 0xba, 0xb8, 0x98, 0x26, 0x3c, + 0x6e, 0xc6, 0xf2, 0x28, 0x13, 0xda, 0xc7, 0x5a, 0x17, 0x8b, 0x20, 0x3b, 0x7f, 0xf3, 0xf3, 0x90, + 0xf7, 0xc8, 0xc6, 0x0f, 0x21, 0xef, 0x99, 0x0c, 0x5d, 0x85, 0xe9, 0x13, 0x43, 0x72, 0x9c, 0xf0, + 0xac, 0x98, 0x3b, 0x31, 0x88, 0x3f, 0xbd, 0x01, 0x79, 0x22, 0x85, 0x74, 0xd2, 0x93, 0x4f, 0x8d, + 0x62, 0x6a, 0x33, 0x6d, 0x9d, 0x11, 0x69, 0x7a, 0x68, 0xb5, 0xa0, 0x3b, 0xc0, 0x1c, 0x8a, 0x44, + 0xe9, 0x4e, 0x75, 0x6d, 0x38, 0x20, 0x42, 0xce, 0x8a, 0xec, 0xf6, 0x23, 0x13, 0xed, 0x5b, 0xed, + 0xfc, 0x9f, 0xa7, 0x00, 0x5c, 0x01, 0xd1, 0x03, 0xc8, 0x90, 0x35, 0x51, 0xc7, 0xbf, 0x3d, 0xc6, + 0x9a, 0x4a, 0x64, 0x61, 0x64, 0x94, 0xf0, 0x6f, 0x1c, 0x64, 0x08, 0x9b, 0xe0, 0x0d, 0xd6, 0xaa, + 0x35, 0xf6, 0xeb, 0x55, 0xa9, 0xd1, 0xdc, 0xab, 0x4a, 0x9f, 0x88, 0xb5, 0x76, 0x55, 0x2c, 0x70, + 0x68, 0x0d, 0xae, 0x7a, 0xdb, 0xc5, 0x6a, 0x79, 0xaf, 0x2a, 0x4a, 0xcd, 0x46, 0xfd, 0x69, 0x21, + 0x85, 0x78, 0x58, 0x7d, 0x7c, 0x54, 0x6f, 0xd7, 0xc2, 0x7d, 0x69, 0xb4, 0x0e, 0x45, 0x4f, 0x1f, + 0xe3, 0xc1, 0xd8, 0x66, 0x2c, 0xb6, 0x9e, 0x5e, 0xfa, 0x27, 0xeb, 0xcc, 0x22, 0x01, 0xae, 0x79, + 0xe7, 0xf4, 0x8f, 0xcd, 0x91, 0x0b, 0xd1, 0xba, 0x33, 0xbd, 0x34, 0x3e, 0x0e, 0xd3, 0x84, 0xa4, + 0x32, 0xef, 0x68, 0x00, 0xd1, 0xf0, 0x4f, 0x60, 0xde, 0x77, 0x31, 0x58, 0x61, 0x1e, 0xf3, 0x64, + 0x5d, 0xe9, 0xf8, 0xdc, 0x24, 0xa1, 0x0f, 0xb7, 0x9d, 0x16, 0xe7, 0xed, 0xd6, 0x8a, 0xd5, 0x68, + 0x9d, 0x65, 0x4f, 0xe9, 0x2b, 0x26, 0xa3, 0x49, 0x11, 0x1a, 0x20, 0x4d, 0x84, 0x40, 0xf8, 0x59, + 0x0a, 0x72, 0x4c, 0x21, 0x6e, 0x79, 0xae, 0x26, 0x1f, 0x4b, 0xbb, 0x95, 0xb2, 0xf4, 0x59, 0x64, + 0xca, 0x6f, 0x91, 0xe8, 0x00, 0x16, 0xbc, 0xfe, 0xfb, 0x85, 0x1d, 0x5c, 0x6e, 0xf9, 0xcf, 0xd9, + 0xeb, 0x44, 0x5e, 0xb0, 0x90, 0x72, 0xfe, 0xcc, 0xdb, 0x86, 0x2a, 0xb0, 0x10, 0xb8, 0x02, 0x32, + 0xa3, 0xaf, 0x80, 0xf9, 0x8e, 0xcf, 0x1b, 0x96, 0x61, 0xc9, 0xf6, 0xde, 0x3d, 0x2c, 0x99, 0xcc, + 0xbb, 0xb3, 0x2b, 0xaa, 0x10, 0xf2, 0xfa, 0xc8, 0x25, 0xb6, 0xdb, 0xf8, 0x8f, 0x01, 0x85, 0x65, + 0x9d, 0xc8, 0x55, 0x0f, 0x61, 0x29, 0xe2, 0x5e, 0x41, 0x25, 0x98, 0x25, 0x47, 0x65, 0x28, 0x26, + 0x66, 0x61, 0x6b, 0x58, 0x22, 0x97, 0xc4, 0xa2, 0x1f, 0xe8, 0xf8, 0x04, 0xeb, 0x3a, 0xee, 0x12, + 0x9b, 0x8c, 0xa4, 0x77, 0x48, 0x84, 0xdf, 0xe4, 0x60, 0xc6, 0x6e, 0x47, 0x3b, 0x30, 0x63, 0xe0, + 0x53, 0x7a, 0xe7, 0xd1, 0xb9, 0x36, 0x82, 0x63, 0x4b, 0x2d, 0x46, 0xc0, 0x02, 0x7c, 0x9b, 0xde, + 0x0a, 0xf0, 0x7d, 0x5d, 0x13, 0x2d, 0xfe, 0x2f, 0x39, 0x58, 0xda, 0xc3, 0x3d, 0x1c, 0x0c, 0x8d, + 0x92, 0xdc, 0xba, 0x37, 0x9a, 0x48, 0xf9, 0xa3, 0x89, 0x08, 0x56, 0x09, 0xd1, 0xc4, 0x85, 0x6e, + 0xd8, 0x55, 0x58, 0xf6, 0xcf, 0x46, 0xef, 0x14, 0xe1, 0x3f, 0xd2, 0xb0, 0x61, 0xe9, 0x82, 0xae, + 0xf5, 0x7a, 0x58, 0x3f, 0x1c, 0x1e, 0xf7, 0x14, 0xe3, 0xf9, 0x04, 0x8b, 0xbb, 0x0a, 0xd3, 0xaa, + 0xd6, 0xf5, 0x18, 0x4f, 0xce, 0xfa, 0xac, 0x75, 0x51, 0x15, 0x16, 0x83, 0xb1, 0xdd, 0x39, 0xf3, + 0xfc, 0xf1, 0x91, 0x5d, 0xe1, 0x2c, 0x78, 0x6d, 0xf1, 0x30, 0x63, 0x45, 0xa5, 0x9a, 0xda, 0x3b, + 0x27, 0x16, 0x33, 0x23, 0x3a, 0xdf, 0x48, 0x0c, 0x86, 0x69, 0xdf, 0x70, 0xc2, 0xb4, 0xc4, 0x15, + 0x25, 0x45, 0x6c, 0x9f, 0x85, 0x2c, 0x3e, 0x47, 0x58, 0x7f, 0x30, 0x26, 0xeb, 0x91, 0x9e, 0xe0, + 0x22, 0xa7, 0x78, 0x09, 0xe6, 0xfb, 0x77, 0x1c, 0xdc, 0x88, 0x5d, 0x02, 0x8b, 0x33, 0xba, 0x70, + 0x65, 0x40, 0x3b, 0x9c, 0x4d, 0xa0, 0x56, 0xf6, 0xe1, 0xc8, 0x4d, 0x60, 0xd9, 0x35, 0x6b, 0xf5, + 0x6d, 0xc3, 0xc2, 0xc0, 0xd7, 0xc8, 0x97, 0x61, 0x29, 0x82, 0x6c, 0xa2, 0xc5, 0xfc, 0x9c, 0x83, + 0x4d, 0x57, 0x94, 0x23, 0x75, 0x70, 0x79, 0xea, 0xdb, 0x76, 0x75, 0x8b, 0xba, 0xfc, 0xf7, 0xc2, + 0x6b, 0x8f, 0x9e, 0xf0, 0x55, 0x59, 0xf0, 0x4d, 0xd8, 0x4a, 0x98, 0x9a, 0x99, 0xf3, 0xcf, 0x32, + 0xb0, 0xf5, 0x44, 0xee, 0x29, 0x5d, 0x27, 0x7a, 0x8c, 0xc0, 0x21, 0x92, 0xb7, 0xa4, 0x13, 0xb2, + 0x00, 0xea, 0xb5, 0x1e, 0x38, 0x56, 0x3b, 0x8a, 0xff, 0x18, 0xd7, 0xe1, 0x25, 0x66, 0x7e, 0x4f, + 0x23, 0x32, 0xbf, 0x0f, 0xc6, 0x97, 0x35, 0x29, 0x0f, 0x3c, 0x0a, 0x3a, 0x98, 0xf7, 0xc7, 0xe7, + 0x9b, 0xa0, 0x05, 0x17, 0xb6, 0xe2, 0xaf, 0x33, 0x55, 0xfb, 0x9b, 0x0c, 0x08, 0x49, 0xab, 0x67, + 0x3e, 0x44, 0x84, 0xd9, 0x8e, 0xa6, 0x9e, 0x28, 0x7a, 0x1f, 0x77, 0x59, 0xca, 0xf1, 0xee, 0x38, + 0x9b, 0xc7, 0x1c, 0xc8, 0xae, 0x3d, 0x56, 0x74, 0xd9, 0xa0, 0x22, 0x4c, 0xf7, 0xb1, 0x61, 0xc8, + 0xa7, 0xb6, 0x58, 0xf6, 0x27, 0xff, 0x93, 0x34, 0xcc, 0x3a, 0x43, 0x90, 0x1a, 0xd2, 0x60, 0xea, + 0xbe, 0xf6, 0x5f, 0x46, 0x80, 0x97, 0x57, 0xe6, 0xd4, 0x4b, 0x28, 0x73, 0xd7, 0xa7, 0xcc, 0xd4, + 0x1c, 0xf6, 0x5e, 0x4a, 0xec, 0x04, 0xbd, 0xfe, 0xda, 0x15, 0x50, 0xf8, 0x65, 0x40, 0x75, 0xc5, + 0x60, 0xa9, 0x9b, 0xe3, 0x96, 0xac, 0x4c, 0x4d, 0x7e, 0x21, 0x61, 0xd5, 0xd4, 0x15, 0x16, 0xae, + 0x67, 0x45, 0xe8, 0xcb, 0x2f, 0xaa, 0xb4, 0xc5, 0x0a, 0xe9, 0x0d, 0x53, 0xd6, 0x4d, 0x45, 0x3d, + 0x95, 0x4c, 0xed, 0x73, 0xec, 0x80, 0xc1, 0x76, 0x6b, 0xdb, 0x6a, 0x14, 0xfe, 0x3d, 0x05, 0x4b, + 0x3e, 0xf6, 0x4c, 0x27, 0x3f, 0x84, 0x69, 0x97, 0xb7, 0x2f, 0x8c, 0x8f, 0xa0, 0x2e, 0xd1, 0x6d, + 0xb3, 0x47, 0xa0, 0xeb, 0x00, 0x2a, 0x7e, 0x61, 0xfa, 0xe6, 0x9d, 0xb5, 0x5a, 0xc8, 0x9c, 0xfc, + 0x6f, 0x71, 0x4e, 0xa6, 0x6f, 0xca, 0xe6, 0x90, 0x64, 0x95, 0xcc, 0x45, 0xe3, 0xae, 0xc4, 0xee, + 0x18, 0x3a, 0xef, 0xac, 0x58, 0x70, 0x7a, 0x1a, 0xe4, 0xb6, 0x31, 0xd0, 0xbe, 0x83, 0xb3, 0x76, + 0x34, 0xb5, 0xab, 0x98, 0x2e, 0xce, 0x7a, 0x35, 0x94, 0x20, 0xd0, 0xee, 0x8a, 0x95, 0x57, 0xd9, + 0xc8, 0xaa, 0xd3, 0xca, 0x7f, 0x01, 0x59, 0x7a, 0x1c, 0x63, 0x82, 0x05, 0xe8, 0x63, 0xc8, 0x19, + 0x44, 0xe2, 0x20, 0x30, 0x12, 0xb5, 0x27, 0xde, 0x15, 0x8a, 0x6c, 0x9c, 0xf0, 0x2d, 0xe0, 0xdd, + 0x8b, 0x69, 0x1f, 0x9b, 0xe3, 0x5f, 0xbf, 0x3b, 0xd6, 0x1a, 0x84, 0x3f, 0x4c, 0xc1, 0x5a, 0x24, + 0x83, 0xc9, 0x60, 0x0f, 0x74, 0x10, 0x58, 0xc9, 0xdb, 0xe1, 0x1b, 0x3b, 0xc4, 0x3c, 0x72, 0x45, + 0xfc, 0xaf, 0x5f, 0xec, 0x30, 0x2b, 0x13, 0x1f, 0x66, 0xe8, 0x1c, 0xe9, 0xce, 0xfc, 0x24, 0x05, + 0x68, 0x1f, 0x9b, 0x4e, 0xaa, 0xcc, 0xb6, 0x34, 0xc6, 0xdf, 0x70, 0x2f, 0xe1, 0x6f, 0xbe, 0xe3, + 0xf3, 0x37, 0xd4, 0x63, 0xdd, 0xf6, 0x54, 0x4e, 0x02, 0x53, 0x27, 0xde, 0x96, 0x31, 0xe9, 0x29, + 0x8d, 0xf9, 0xc7, 0x4b, 0x4f, 0x2f, 0xe8, 0x56, 0xfe, 0x95, 0x83, 0x25, 0x9f, 0xd0, 0x4c, 0x83, + 0xee, 0x02, 0x92, 0xcf, 0x64, 0xa5, 0x27, 0x5b, 0x82, 0xd9, 0xe9, 0x3f, 0x83, 0x03, 0x16, 0x9d, + 0x1e, 0x7b, 0x18, 0x7a, 0x04, 0x4b, 0x7d, 0xf9, 0x85, 0xd2, 0x1f, 0xf6, 0x25, 0xb6, 0xcf, 0x86, + 0xf2, 0x7d, 0x1b, 0x38, 0x5c, 0x0b, 0x01, 0xe8, 0x35, 0xd5, 0x7c, 0xff, 0x5d, 0x8a, 0xa0, 0x2f, + 0xb2, 0x71, 0x4c, 0x79, 0x94, 0xef, 0x63, 0x74, 0x08, 0x4b, 0x7d, 0x45, 0x0d, 0x31, 0x4b, 0x8f, + 0x64, 0x46, 0x0d, 0x7c, 0x91, 0x0d, 0x76, 0x39, 0x0a, 0x82, 0x37, 0xe8, 0x65, 0xcb, 0x0d, 0x56, + 0x9a, 0x7a, 0xde, 0x60, 0x31, 0x44, 0xc3, 0xb6, 0x65, 0x3f, 0xb2, 0xda, 0x74, 0x33, 0x6c, 0x36, + 0xac, 0xf4, 0x12, 0x5b, 0x78, 0xfa, 0x9f, 0xb4, 0xd7, 0x82, 0x43, 0xd4, 0xe8, 0x43, 0x48, 0xeb, + 0x83, 0x0e, 0x33, 0xdf, 0x37, 0xc6, 0xe0, 0x5f, 0x12, 0x0f, 0x77, 0x0f, 0xa6, 0x44, 0x6b, 0x14, + 0xff, 0x47, 0x69, 0x48, 0x8b, 0x87, 0xbb, 0xe8, 0x63, 0x5f, 0x89, 0xe5, 0xce, 0x98, 0x5c, 0xbc, + 0x15, 0x96, 0x7f, 0x48, 0x45, 0x95, 0x58, 0x8a, 0xb0, 0xbc, 0x2b, 0x56, 0xcb, 0xed, 0xaa, 0xb4, + 0x57, 0xad, 0x57, 0xdb, 0x55, 0x89, 0x56, 0x89, 0x0a, 0x1c, 0x5a, 0x87, 0xe2, 0xe1, 0x51, 0xa5, + 0x5e, 0x6b, 0x1d, 0x48, 0x47, 0x0d, 0xfb, 0x2f, 0xd6, 0x9b, 0x42, 0x05, 0x98, 0xab, 0xd7, 0x5a, + 0x6d, 0xd6, 0xd0, 0x2a, 0xa4, 0xad, 0x96, 0xfd, 0x6a, 0x5b, 0xda, 0x2d, 0x1f, 0x96, 0x77, 0x6b, + 0xed, 0xa7, 0x85, 0x0c, 0xe2, 0x61, 0xd5, 0xcf, 0xbb, 0xd5, 0x28, 0x1f, 0xb6, 0x0e, 0x9a, 0xed, + 0x42, 0x16, 0x21, 0x58, 0x20, 0xe3, 0xed, 0xa6, 0x56, 0x21, 0x67, 0x71, 0xd8, 0xad, 0x37, 0x1b, + 0x8e, 0x0c, 0xd3, 0x68, 0x19, 0x0a, 0xf6, 0xcc, 0x62, 0xb5, 0xbc, 0x47, 0x00, 0xbd, 0x19, 0xb4, + 0x08, 0xf3, 0xd5, 0xef, 0x1d, 0x96, 0x1b, 0x7b, 0x36, 0xe1, 0x2c, 0xda, 0x84, 0x75, 0xaf, 0x38, + 0x12, 0x1b, 0x55, 0xdd, 0x23, 0xa0, 0x5c, 0xab, 0x00, 0xe8, 0x1a, 0x14, 0x58, 0x01, 0x6c, 0xb7, + 0xd9, 0xd8, 0xab, 0xb5, 0x6b, 0xcd, 0x46, 0x21, 0x4f, 0x11, 0xbc, 0x25, 0x00, 0x4b, 0x72, 0xc6, + 0x6c, 0x6e, 0x34, 0xac, 0x37, 0x4f, 0x61, 0x3d, 0x1b, 0xb1, 0xfe, 0x79, 0x0a, 0x56, 0x28, 0x64, + 0x6d, 0x03, 0xe4, 0xb6, 0xaf, 0xda, 0x86, 0x02, 0xc5, 0xbb, 0xa4, 0xe0, 0x2d, 0xb0, 0x40, 0xdb, + 0x9f, 0xd8, 0x79, 0x87, 0x5d, 0x5e, 0x4a, 0x79, 0xca, 0x4b, 0xb5, 0x60, 0x16, 0x76, 0xdb, 0x5f, + 0x88, 0x09, 0xcc, 0x96, 0x94, 0xd8, 0x3f, 0x8e, 0x48, 0x13, 0xee, 0x26, 0x73, 0x4b, 0x0a, 0xa1, + 0x2e, 0x92, 0xc5, 0x5f, 0xd0, 0xcb, 0x3d, 0x84, 0xd5, 0xa0, 0xbc, 0xcc, 0xa0, 0xef, 0x84, 0xca, + 0x25, 0x8e, 0xdb, 0x75, 0x68, 0x1d, 0x0a, 0xe1, 0x87, 0x29, 0x98, 0xb1, 0x9b, 0xad, 0xf0, 0xc6, + 0xf2, 0x4b, 0x3e, 0xa4, 0x74, 0xd6, 0x6a, 0x71, 0x80, 0x57, 0x6f, 0xa1, 0x23, 0x15, 0x2c, 0x74, + 0x44, 0x9e, 0x73, 0x3a, 0xf2, 0x9c, 0xbf, 0x0d, 0xf3, 0x1d, 0x4b, 0x7c, 0x45, 0x53, 0x25, 0x53, + 0xe9, 0xdb, 0x40, 0x68, 0xb8, 0x30, 0xd9, 0xb6, 0x1f, 0x1c, 0x88, 0x73, 0xf6, 0x00, 0xab, 0x09, + 0x6d, 0xc2, 0x1c, 0x29, 0x54, 0x4a, 0xa6, 0x26, 0x0d, 0x0d, 0x5c, 0xcc, 0x12, 0x58, 0x08, 0x48, + 0x5b, 0x5b, 0x3b, 0x32, 0x30, 0xba, 0x07, 0x8b, 0x04, 0xc4, 0x97, 0xbc, 0x32, 0xe7, 0x2c, 0x69, + 0x58, 0xd4, 0x44, 0x7a, 0x5b, 0x8e, 0xf4, 0xc2, 0x5f, 0x73, 0xb0, 0x42, 0xe1, 0xb1, 0xa0, 0xfe, + 0x8e, 0xaa, 0xf0, 0x78, 0x55, 0x34, 0x70, 0x7d, 0x46, 0x32, 0x7c, 0x55, 0xe8, 0x40, 0x11, 0x56, + 0x83, 0xf3, 0x31, 0x48, 0xe0, 0xa7, 0x29, 0x58, 0xb6, 0x62, 0x39, 0xbb, 0xe3, 0xb2, 0xc3, 0xed, + 0x09, 0x8e, 0x3e, 0xb0, 0x99, 0x99, 0xd0, 0x66, 0x1e, 0x04, 0x13, 0xee, 0x37, 0xbd, 0xd1, 0x68, + 0x70, 0x05, 0xaf, 0x6a, 0x2f, 0x7f, 0xcc, 0xc1, 0x4a, 0x60, 0x3e, 0x66, 0x60, 0x1f, 0x05, 0x33, + 0x88, 0x9b, 0x31, 0xf2, 0xbd, 0x54, 0x0e, 0xf1, 0x9e, 0x1d, 0xbb, 0x4f, 0x66, 0xc7, 0xff, 0x98, + 0x82, 0xeb, 0xee, 0x2d, 0x48, 0xde, 0x16, 0x74, 0x27, 0x80, 0xc0, 0x2e, 0x56, 0xc2, 0xff, 0x6e, + 0xd0, 0x43, 0xdf, 0x0f, 0x5f, 0xcc, 0x11, 0x22, 0x25, 0x79, 0xea, 0x48, 0xe4, 0x38, 0x33, 0x29, + 0x72, 0x7c, 0x21, 0x0d, 0xf8, 0x35, 0x2f, 0x28, 0xee, 0x17, 0x9f, 0x69, 0xc2, 0x98, 0xd5, 0xa5, + 0xf7, 0xe1, 0x2a, 0x49, 0x17, 0x9c, 0xd7, 0x33, 0x76, 0xc1, 0x9e, 0xfa, 0xd0, 0x19, 0x71, 0xc5, + 0xea, 0x76, 0xde, 0x83, 0xb0, 0x8a, 0x4a, 0x57, 0xf8, 0x2a, 0x03, 0xab, 0x56, 0x3a, 0xd1, 0x32, + 0xe5, 0xd3, 0x49, 0x6a, 0x0d, 0xbf, 0x14, 0x86, 0x6e, 0x53, 0xfe, 0x63, 0x89, 0xe6, 0x3a, 0x0e, + 0x62, 0x8b, 0x4a, 0xb0, 0x64, 0x98, 0xf2, 0x29, 0x71, 0x07, 0xb2, 0x7e, 0x8a, 0x4d, 0x69, 0x20, + 0x9b, 0xcf, 0x99, 0xad, 0x2f, 0xb2, 0xae, 0x36, 0xe9, 0x39, 0x94, 0xcd, 0xe7, 0x97, 0x74, 0x90, + 0xe8, 0x3b, 0x41, 0xa7, 0xf0, 0xd6, 0x88, 0xb5, 0x24, 0xe8, 0xd6, 0xf7, 0x62, 0xe0, 0xfd, 0x77, + 0x46, 0xb0, 0x1c, 0x0d, 0xeb, 0x5f, 0x1c, 0xce, 0xfe, 0x9a, 0x2b, 0x03, 0xd7, 0xe0, 0x6a, 0x68, + 0xf1, 0xec, 0x0a, 0x39, 0x85, 0xa2, 0xd5, 0x75, 0xa4, 0x1a, 0x13, 0xaa, 0x63, 0x8c, 0xc6, 0xa4, + 0x62, 0x34, 0x46, 0x58, 0x83, 0x6b, 0x11, 0x13, 0x31, 0x29, 0xfe, 0x22, 0x4b, 0xc5, 0x98, 0xbc, + 0x48, 0xf5, 0x69, 0x9c, 0x55, 0xbc, 0xeb, 0x3d, 0xf6, 0xc8, 0x7a, 0xce, 0xab, 0xb0, 0x8b, 0x1b, + 0x90, 0xf7, 0xd2, 0xb1, 0x6b, 0xd0, 0x1c, 0x61, 0x38, 0xd9, 0x0b, 0xd5, 0xce, 0x72, 0x81, 0xda, + 0x59, 0xdd, 0x35, 0xaa, 0x69, 0x7f, 0x2c, 0x1c, 0xbb, 0x15, 0x09, 0x66, 0xf5, 0x2c, 0x64, 0x56, + 0x33, 0xfe, 0x82, 0x5c, 0x2c, 0xd3, 0x5f, 0x00, 0xc3, 0x62, 0x4a, 0x1d, 0x59, 0x29, 0x13, 0x9e, + 0x01, 0x4f, 0x35, 0x7e, 0xf2, 0xda, 0x55, 0x40, 0x8d, 0x52, 0x41, 0x35, 0x12, 0xae, 0xc3, 0x5a, + 0x24, 0x6f, 0x36, 0xf5, 0xef, 0x70, 0x54, 0x30, 0x07, 0x14, 0x6b, 0x99, 0xb2, 0x69, 0x8c, 0x3b, + 0x35, 0xeb, 0xf4, 0x4e, 0x4d, 0x9b, 0x88, 0x06, 0x4f, 0x68, 0x12, 0xc2, 0xef, 0x71, 0x74, 0x1f, + 0x82, 0xb2, 0xb0, 0xdb, 0xf6, 0x4d, 0xc8, 0x0e, 0x09, 0xee, 0x4f, 0xa3, 0xae, 0x25, 0xbf, 0x11, + 0x1c, 0x59, 0x5d, 0x22, 0xa5, 0xb8, 0x34, 0x24, 0x55, 0xf8, 0x29, 0x07, 0x79, 0x0f, 0x7f, 0xb4, + 0x0e, 0xb3, 0x0e, 0x54, 0x64, 0x27, 0x48, 0x4e, 0x83, 0x75, 0xfc, 0xa6, 0x66, 0xca, 0x3d, 0xf6, + 0x26, 0x85, 0x7e, 0x58, 0x39, 0xed, 0xd0, 0xc0, 0x34, 0x1c, 0x4e, 0x8b, 0xe4, 0x6f, 0x74, 0x07, + 0x32, 0x43, 0x55, 0x31, 0x89, 0xd9, 0x2f, 0x04, 0xed, 0x99, 0x4c, 0x55, 0x3a, 0x52, 0x15, 0x53, + 0x24, 0x54, 0xc2, 0x6d, 0xc8, 0x58, 0x5f, 0x7e, 0xc8, 0x62, 0x16, 0xb2, 0x95, 0xa7, 0xed, 0x6a, + 0xab, 0xc0, 0x21, 0x80, 0x5c, 0x8d, 0x26, 0xf8, 0x29, 0xa1, 0x6e, 0xbf, 0x4b, 0x75, 0x16, 0x61, + 0xb9, 0x00, 0xf9, 0x58, 0xd5, 0xf4, 0xbe, 0xdc, 0x23, 0x32, 0xcf, 0x88, 0xce, 0x77, 0x7c, 0x39, + 0x85, 0x82, 0x8f, 0xeb, 0xce, 0x89, 0x44, 0x01, 0x4c, 0x9f, 0x51, 0xdd, 0x8a, 0x83, 0x96, 0xca, + 0x91, 0xd0, 0xd2, 0x75, 0xdf, 0x2d, 0x3b, 0x02, 0x54, 0xfa, 0xdb, 0x14, 0xac, 0x44, 0xd2, 0xa1, + 0xf7, 0xbc, 0x70, 0xd2, 0x56, 0x22, 0x4f, 0x2f, 0x90, 0xf4, 0x15, 0x47, 0x81, 0xa4, 0x1d, 0x1f, + 0x90, 0xf4, 0xfa, 0xc8, 0xf1, 0x5e, 0x08, 0xe9, 0xc7, 0x5c, 0x0c, 0x84, 0xd4, 0x6a, 0x97, 0xf7, + 0xab, 0xd2, 0x51, 0x83, 0xfe, 0xeb, 0x40, 0x48, 0xcb, 0x50, 0x70, 0x81, 0x15, 0xa9, 0xd5, 0x2e, + 0x93, 0x47, 0xc6, 0x21, 0xf8, 0x26, 0x1d, 0x09, 0xce, 0x64, 0x46, 0xe3, 0x30, 0x59, 0x4a, 0xb2, + 0x0a, 0x88, 0x8d, 0x7e, 0xdc, 0x3c, 0x6a, 0xb4, 0x25, 0xf2, 0x84, 0xb9, 0x90, 0x73, 0xf0, 0x99, + 0x65, 0x40, 0xec, 0xb4, 0xbc, 0x2f, 0xf1, 0xff, 0x84, 0x83, 0x25, 0x5f, 0x33, 0x3b, 0x3c, 0x4f, + 0x51, 0x9c, 0xf3, 0x15, 0xc5, 0xef, 0xc1, 0xb2, 0x95, 0x31, 0x52, 0x4b, 0x31, 0xa4, 0x01, 0xd6, + 0x09, 0x18, 0xce, 0x74, 0x7e, 0xb1, 0x2f, 0xbf, 0x60, 0x05, 0x83, 0x43, 0xac, 0x5b, 0x8c, 0x2f, + 0x01, 0x12, 0x16, 0xbe, 0x4c, 0xd3, 0xb8, 0x64, 0xe2, 0xbc, 0x66, 0xa4, 0x8f, 0x0a, 0x27, 0x3e, + 0xe9, 0x09, 0x12, 0x9f, 0x18, 0x0f, 0x97, 0x99, 0x28, 0x18, 0x9e, 0xfc, 0x4e, 0x6f, 0xb8, 0xf7, + 0x36, 0x8d, 0x5c, 0xef, 0x78, 0xf5, 0x77, 0x64, 0xa6, 0x95, 0xfb, 0xb2, 0xc2, 0xfd, 0xe8, 0xb2, + 0xf2, 0xe4, 0x32, 0x8d, 0xc7, 0x2e, 0x90, 0x1f, 0x09, 0x77, 0xe0, 0x16, 0x79, 0x56, 0x39, 0x0a, + 0xd0, 0xa6, 0x2e, 0xe9, 0x57, 0xe1, 0xf5, 0x51, 0xd4, 0x6c, 0xfa, 0x7a, 0xa4, 0xff, 0x71, 0x6a, + 0x5b, 0x01, 0x2e, 0x23, 0x5c, 0x11, 0x9d, 0xfc, 0xb7, 0x53, 0xb0, 0x39, 0x6a, 0x1c, 0xfa, 0xd8, + 0xeb, 0x9a, 0xee, 0x8c, 0x3b, 0x9d, 0xd7, 0x4b, 0xfd, 0x01, 0xf3, 0x52, 0x55, 0x9f, 0x97, 0x7a, + 0x67, 0x12, 0x56, 0x5e, 0x87, 0x55, 0x8d, 0xf2, 0x57, 0x6f, 0xc3, 0x1b, 0x7e, 0x58, 0xda, 0xe3, + 0xa3, 0xe8, 0xaf, 0x1f, 0x1c, 0x9c, 0x9a, 0x23, 0x0e, 0x66, 0xc7, 0x87, 0xf6, 0xfe, 0x7e, 0x1a, + 0x36, 0xbd, 0x0f, 0x94, 0xf7, 0xbd, 0x68, 0x5a, 0xd2, 0xaf, 0x05, 0x6e, 0xc3, 0x62, 0x10, 0x29, + 0xb2, 0x1f, 0xe4, 0x5e, 0xf1, 0x43, 0x45, 0x46, 0xd2, 0x03, 0x9c, 0x11, 0x53, 0x27, 0xe7, 0x7f, + 0x61, 0x14, 0xf8, 0x9b, 0x63, 0x33, 0xfe, 0xff, 0x09, 0x08, 0x53, 0xf5, 0xec, 0xc1, 0x56, 0x82, + 0xfc, 0xcc, 0x2c, 0x2a, 0xb0, 0xe0, 0x07, 0x46, 0x99, 0xa6, 0x06, 0x5e, 0xa1, 0xfa, 0x07, 0xcf, + 0xfb, 0xd0, 0x52, 0x3a, 0xdb, 0x3f, 0x73, 0xf6, 0x83, 0x7d, 0x1f, 0xad, 0x75, 0xc2, 0x61, 0xe4, + 0x95, 0x2e, 0x22, 0x08, 0xba, 0xa2, 0x12, 0xcc, 0xda, 0x54, 0x46, 0xf0, 0x09, 0xa8, 0x33, 0xb9, + 0x4b, 0x12, 0x06, 0x8e, 0xd3, 0x17, 0x04, 0x8e, 0x33, 0x41, 0xe0, 0x98, 0xae, 0xed, 0x87, 0x29, + 0xd8, 0xf4, 0xbe, 0x95, 0x8c, 0x54, 0xef, 0x49, 0x16, 0xba, 0x05, 0x73, 0x1e, 0x2a, 0x5b, 0xe3, + 0xf3, 0x2e, 0xee, 0x99, 0xa4, 0xed, 0xa3, 0x24, 0x79, 0x45, 0x20, 0x28, 0xdd, 0x8a, 0x6d, 0xd8, + 0x4a, 0x98, 0x9f, 0x2a, 0x15, 0xa5, 0xfc, 0x41, 0x8a, 0xfc, 0xb6, 0xed, 0xff, 0x6e, 0xc7, 0xe2, + 0x81, 0xc7, 0x44, 0x31, 0x5e, 0xe9, 0x76, 0x29, 0xb0, 0x11, 0x37, 0xf9, 0x25, 0x1b, 0xe0, 0xfd, + 0xff, 0xe6, 0x60, 0xa6, 0xd6, 0xc5, 0xaa, 0x49, 0x83, 0x82, 0x79, 0xdf, 0xaf, 0x1c, 0xd1, 0x7a, + 0xcc, 0x8f, 0x1f, 0xc9, 0x16, 0xf0, 0xd7, 0x13, 0x7f, 0x1a, 0x29, 0x4c, 0xa1, 0x13, 0xcf, 0x2f, + 0x34, 0x7d, 0xcf, 0x05, 0x5e, 0x0b, 0x8d, 0x8c, 0xb8, 0xab, 0xf9, 0x5b, 0x23, 0xa8, 0x9c, 0x79, + 0xde, 0x87, 0x2c, 0xf9, 0xb1, 0x1a, 0x5a, 0x76, 0x7e, 0x30, 0xe7, 0xf9, 0x2d, 0x1b, 0xbf, 0x12, + 0x68, 0xb5, 0xc7, 0xdd, 0xff, 0xfb, 0x59, 0x00, 0xf7, 0x0e, 0x44, 0x8f, 0x60, 0xce, 0xeb, 0xfa, + 0xd0, 0x5a, 0xc2, 0xaf, 0xb5, 0xf8, 0xf5, 0xe8, 0x4e, 0x47, 0xa6, 0x47, 0x30, 0xe7, 0x55, 0x79, + 0x97, 0x59, 0xc4, 0x63, 0x6d, 0x97, 0x59, 0xe4, 0xdb, 0xea, 0x29, 0xd4, 0x83, 0xab, 0x31, 0x4f, + 0x65, 0xd1, 0xeb, 0xe3, 0x3d, 0x28, 0xe6, 0xdf, 0x18, 0xf3, 0xcd, 0xad, 0x30, 0x85, 0x74, 0xb8, + 0x16, 0xfb, 0x42, 0x14, 0x6d, 0x8f, 0xfb, 0x7e, 0x95, 0x7f, 0x73, 0x0c, 0x4a, 0x67, 0xce, 0x21, + 0xf0, 0xf1, 0xcf, 0xd2, 0xd0, 0x9b, 0x63, 0xbf, 0x97, 0xe4, 0x6f, 0x8f, 0xff, 0xca, 0x4d, 0x98, + 0x42, 0x07, 0x90, 0xf7, 0xbc, 0x4f, 0x42, 0x7c, 0xe4, 0xa3, 0x25, 0xca, 0x78, 0x2d, 0xe1, 0x41, + 0x13, 0xe5, 0xe4, 0x79, 0x32, 0xe2, 0x72, 0x0a, 0x3f, 0x7e, 0x71, 0x39, 0x45, 0xbc, 0x31, 0x09, + 0x6e, 0x7f, 0x20, 0x30, 0x8d, 0xda, 0xfe, 0xe8, 0x48, 0x37, 0x6a, 0xfb, 0x63, 0xa2, 0x5c, 0x61, + 0x0a, 0x7d, 0x17, 0x16, 0xfc, 0xb5, 0x60, 0x74, 0x3d, 0xb1, 0xa6, 0xcd, 0x6f, 0xc4, 0x75, 0x7b, + 0x59, 0xfa, 0x2b, 0x89, 0x2e, 0xcb, 0xc8, 0x8a, 0xa6, 0xcb, 0x32, 0xa6, 0x00, 0x39, 0x65, 0xf9, + 0x27, 0x5f, 0x7d, 0xcc, 0xf5, 0x4f, 0x51, 0x65, 0x3d, 0xd7, 0x3f, 0x45, 0x16, 0xd5, 0x84, 0x29, + 0xa4, 0xc0, 0x6a, 0x74, 0x79, 0x06, 0xdd, 0x1a, 0xab, 0xfa, 0xc4, 0xbf, 0x3e, 0x8a, 0xcc, 0x99, + 0xaa, 0x03, 0x4b, 0x11, 0xcf, 0xc7, 0x90, 0x90, 0xf8, 0xb6, 0x8c, 0x4e, 0x72, 0x73, 0x8c, 0xf7, + 0x67, 0x02, 0x71, 0xe6, 0xff, 0x95, 0x86, 0x2b, 0x81, 0xc0, 0x1e, 0xfd, 0x06, 0x07, 0x1b, 0xc9, + 0xc9, 0x0e, 0xba, 0x1b, 0x93, 0x14, 0xc4, 0x28, 0x56, 0x69, 0x5c, 0x72, 0x8f, 0x71, 0x5f, 0x8b, + 0x8d, 0x29, 0xd1, 0xf6, 0xb8, 0x61, 0xb3, 0x47, 0xa3, 0x47, 0x05, 0xa8, 0x64, 0x3b, 0xac, 0x69, + 0x63, 0xa3, 0x0e, 0xb4, 0x3d, 0x6e, 0x60, 0xe4, 0x4e, 0x3b, 0x32, 0x84, 0xa1, 0xd3, 0xf6, 0x60, + 0x35, 0xfa, 0xf6, 0x46, 0xb7, 0xc6, 0x0a, 0x2d, 0x5c, 0xad, 0x4a, 0x0e, 0x02, 0xc8, 0x6c, 0x24, + 0xad, 0xba, 0xff, 0x2f, 0x59, 0xc8, 0x10, 0xa0, 0xa4, 0x0d, 0x57, 0x02, 0xc5, 0x17, 0xb4, 0x91, + 0x5c, 0x92, 0xe2, 0x6f, 0xc4, 0xf6, 0x3b, 0xe7, 0xf7, 0x0c, 0x16, 0x43, 0xe5, 0x14, 0xb4, 0xe9, + 0x1d, 0x17, 0x55, 0xd2, 0xe1, 0xb7, 0x12, 0x28, 0x82, 0xbc, 0xfd, 0x97, 0xda, 0xe6, 0x28, 0xbc, + 0xdf, 0xcf, 0x3b, 0xee, 0x22, 0xfb, 0x8c, 0xe2, 0x52, 0xc1, 0x2b, 0x4c, 0xf0, 0xcb, 0x15, 0x79, + 0x79, 0xdd, 0x4c, 0xa4, 0x71, 0x66, 0xf8, 0xd4, 0x01, 0xc4, 0x3c, 0x70, 0x33, 0xf2, 0x09, 0x17, + 0x09, 0x8b, 0xf3, 0x42, 0x12, 0x89, 0xc3, 0xfe, 0x13, 0x28, 0x04, 0x91, 0x11, 0x74, 0x63, 0x04, + 0x50, 0xc3, 0x6f, 0xc6, 0x13, 0x04, 0x77, 0x26, 0xe8, 0x09, 0x82, 0x52, 0x45, 0x99, 0xff, 0xcd, + 0x44, 0x1a, 0xef, 0x7d, 0xe8, 0xc1, 0x04, 0xdd, 0xfb, 0x30, 0x8c, 0x1f, 0xba, 0xf7, 0x61, 0x04, + 0x88, 0x28, 0x4c, 0xed, 0x3c, 0x00, 0x90, 0x7b, 0x83, 0xe7, 0xb2, 0x84, 0xd5, 0x61, 0x1f, 0xad, + 0x87, 0xd2, 0xb4, 0xaa, 0x3a, 0xec, 0x37, 0x07, 0x56, 0x76, 0x66, 0x14, 0xff, 0x6c, 0x86, 0xe4, + 0x62, 0xb3, 0x64, 0x80, 0xd5, 0xb1, 0x53, 0x87, 0x82, 0x3b, 0x5a, 0x22, 0x81, 0x36, 0xda, 0x8a, + 0xe4, 0x41, 0x5e, 0x4b, 0x06, 0x18, 0x2d, 0x38, 0x8c, 0x48, 0xef, 0xce, 0x47, 0x00, 0x1d, 0x43, + 0x91, 0x68, 0xa4, 0x8f, 0xae, 0x87, 0xf8, 0x3c, 0x54, 0x70, 0xaf, 0x6b, 0xf3, 0xf8, 0x53, 0x26, + 0x4c, 0xc7, 0x50, 0x68, 0x3e, 0xb0, 0xf3, 0x6d, 0xc8, 0x53, 0x61, 0x4e, 0x2c, 0xba, 0x51, 0xe3, + 0x99, 0x0c, 0x74, 0xf5, 0xa4, 0x67, 0xa7, 0x0a, 0xf3, 0x94, 0x01, 0x83, 0xd8, 0xd1, 0x8d, 0x10, + 0x8b, 0xc7, 0xb4, 0x27, 0xc0, 0x64, 0x8e, 0x0c, 0x63, 0x7d, 0x3b, 0x15, 0x98, 0xb3, 0xd9, 0x98, + 0xcf, 0xb5, 0x2e, 0xda, 0x88, 0xe0, 0x62, 0x75, 0x04, 0x98, 0xe4, 0x19, 0x13, 0xab, 0xcb, 0x15, + 0xc5, 0xfe, 0x3f, 0x3e, 0xc2, 0xa2, 0x30, 0x54, 0x29, 0x52, 0x14, 0xd6, 0x57, 0xc9, 0x3e, 0x4b, + 0x77, 0x0c, 0xe5, 0x38, 0x47, 0x06, 0x7d, 0xe3, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x0c, 0x79, + 0xd8, 0xd8, 0x90, 0x46, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -5957,6 +6699,186 @@ var _Controller_serviceDesc = grpc.ServiceDesc{ Metadata: "github.com/container-storage-interface/spec/csi.proto", } +// GroupControllerClient is the client API for GroupController service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type GroupControllerClient interface { + GroupControllerGetCapabilities(ctx context.Context, in *GroupControllerGetCapabilitiesRequest, opts ...grpc.CallOption) (*GroupControllerGetCapabilitiesResponse, error) + CreateVolumeGroupSnapshot(ctx context.Context, in *CreateVolumeGroupSnapshotRequest, opts ...grpc.CallOption) (*CreateVolumeGroupSnapshotResponse, error) + DeleteVolumeGroupSnapshot(ctx context.Context, in *DeleteVolumeGroupSnapshotRequest, opts ...grpc.CallOption) (*DeleteVolumeGroupSnapshotResponse, error) + GetVolumeGroupSnapshot(ctx context.Context, in *GetVolumeGroupSnapshotRequest, opts ...grpc.CallOption) (*GetVolumeGroupSnapshotResponse, error) +} + +type groupControllerClient struct { + cc *grpc.ClientConn +} + +func NewGroupControllerClient(cc *grpc.ClientConn) GroupControllerClient { + return &groupControllerClient{cc} +} + +func (c *groupControllerClient) GroupControllerGetCapabilities(ctx context.Context, in *GroupControllerGetCapabilitiesRequest, opts ...grpc.CallOption) (*GroupControllerGetCapabilitiesResponse, error) { + out := new(GroupControllerGetCapabilitiesResponse) + err := c.cc.Invoke(ctx, "/csi.v1.GroupController/GroupControllerGetCapabilities", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *groupControllerClient) CreateVolumeGroupSnapshot(ctx context.Context, in *CreateVolumeGroupSnapshotRequest, opts ...grpc.CallOption) (*CreateVolumeGroupSnapshotResponse, error) { + out := new(CreateVolumeGroupSnapshotResponse) + err := c.cc.Invoke(ctx, "/csi.v1.GroupController/CreateVolumeGroupSnapshot", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *groupControllerClient) DeleteVolumeGroupSnapshot(ctx context.Context, in *DeleteVolumeGroupSnapshotRequest, opts ...grpc.CallOption) (*DeleteVolumeGroupSnapshotResponse, error) { + out := new(DeleteVolumeGroupSnapshotResponse) + err := c.cc.Invoke(ctx, "/csi.v1.GroupController/DeleteVolumeGroupSnapshot", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *groupControllerClient) GetVolumeGroupSnapshot(ctx context.Context, in *GetVolumeGroupSnapshotRequest, opts ...grpc.CallOption) (*GetVolumeGroupSnapshotResponse, error) { + out := new(GetVolumeGroupSnapshotResponse) + err := c.cc.Invoke(ctx, "/csi.v1.GroupController/GetVolumeGroupSnapshot", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// GroupControllerServer is the server API for GroupController service. +type GroupControllerServer interface { + GroupControllerGetCapabilities(context.Context, *GroupControllerGetCapabilitiesRequest) (*GroupControllerGetCapabilitiesResponse, error) + CreateVolumeGroupSnapshot(context.Context, *CreateVolumeGroupSnapshotRequest) (*CreateVolumeGroupSnapshotResponse, error) + DeleteVolumeGroupSnapshot(context.Context, *DeleteVolumeGroupSnapshotRequest) (*DeleteVolumeGroupSnapshotResponse, error) + GetVolumeGroupSnapshot(context.Context, *GetVolumeGroupSnapshotRequest) (*GetVolumeGroupSnapshotResponse, error) +} + +// UnimplementedGroupControllerServer can be embedded to have forward compatible implementations. +type UnimplementedGroupControllerServer struct { +} + +func (*UnimplementedGroupControllerServer) GroupControllerGetCapabilities(ctx context.Context, req *GroupControllerGetCapabilitiesRequest) (*GroupControllerGetCapabilitiesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GroupControllerGetCapabilities not implemented") +} +func (*UnimplementedGroupControllerServer) CreateVolumeGroupSnapshot(ctx context.Context, req *CreateVolumeGroupSnapshotRequest) (*CreateVolumeGroupSnapshotResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateVolumeGroupSnapshot not implemented") +} +func (*UnimplementedGroupControllerServer) DeleteVolumeGroupSnapshot(ctx context.Context, req *DeleteVolumeGroupSnapshotRequest) (*DeleteVolumeGroupSnapshotResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteVolumeGroupSnapshot not implemented") +} +func (*UnimplementedGroupControllerServer) GetVolumeGroupSnapshot(ctx context.Context, req *GetVolumeGroupSnapshotRequest) (*GetVolumeGroupSnapshotResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetVolumeGroupSnapshot not implemented") +} + +func RegisterGroupControllerServer(s *grpc.Server, srv GroupControllerServer) { + s.RegisterService(&_GroupController_serviceDesc, srv) +} + +func _GroupController_GroupControllerGetCapabilities_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GroupControllerGetCapabilitiesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GroupControllerServer).GroupControllerGetCapabilities(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v1.GroupController/GroupControllerGetCapabilities", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GroupControllerServer).GroupControllerGetCapabilities(ctx, req.(*GroupControllerGetCapabilitiesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _GroupController_CreateVolumeGroupSnapshot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateVolumeGroupSnapshotRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GroupControllerServer).CreateVolumeGroupSnapshot(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v1.GroupController/CreateVolumeGroupSnapshot", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GroupControllerServer).CreateVolumeGroupSnapshot(ctx, req.(*CreateVolumeGroupSnapshotRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _GroupController_DeleteVolumeGroupSnapshot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteVolumeGroupSnapshotRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GroupControllerServer).DeleteVolumeGroupSnapshot(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v1.GroupController/DeleteVolumeGroupSnapshot", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GroupControllerServer).DeleteVolumeGroupSnapshot(ctx, req.(*DeleteVolumeGroupSnapshotRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _GroupController_GetVolumeGroupSnapshot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetVolumeGroupSnapshotRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GroupControllerServer).GetVolumeGroupSnapshot(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v1.GroupController/GetVolumeGroupSnapshot", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GroupControllerServer).GetVolumeGroupSnapshot(ctx, req.(*GetVolumeGroupSnapshotRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _GroupController_serviceDesc = grpc.ServiceDesc{ + ServiceName: "csi.v1.GroupController", + HandlerType: (*GroupControllerServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GroupControllerGetCapabilities", + Handler: _GroupController_GroupControllerGetCapabilities_Handler, + }, + { + MethodName: "CreateVolumeGroupSnapshot", + Handler: _GroupController_CreateVolumeGroupSnapshot_Handler, + }, + { + MethodName: "DeleteVolumeGroupSnapshot", + Handler: _GroupController_DeleteVolumeGroupSnapshot_Handler, + }, + { + MethodName: "GetVolumeGroupSnapshot", + Handler: _GroupController_GetVolumeGroupSnapshot_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "github.com/container-storage-interface/spec/csi.proto", +} + // NodeClient is the client API for Node service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. diff --git a/cluster-autoscaler/vendor/github.com/containerd/ttrpc/.gitattributes b/cluster-autoscaler/vendor/github.com/containerd/ttrpc/.gitattributes new file mode 100644 index 000000000000..d207b1802b20 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/containerd/ttrpc/.gitattributes @@ -0,0 +1 @@ +*.go text eol=lf diff --git a/cluster-autoscaler/vendor/github.com/containerd/ttrpc/.gitignore b/cluster-autoscaler/vendor/github.com/containerd/ttrpc/.gitignore index ea58090bd21e..88ceb2764bd1 100644 --- a/cluster-autoscaler/vendor/github.com/containerd/ttrpc/.gitignore +++ b/cluster-autoscaler/vendor/github.com/containerd/ttrpc/.gitignore @@ -1,4 +1,5 @@ # Binaries for programs and plugins +/bin/ *.exe *.dll *.so @@ -9,3 +10,4 @@ # Output of the go coverage tool, specifically when used with LiteIDE *.out +coverage.txt diff --git a/cluster-autoscaler/vendor/github.com/containerd/ttrpc/.golangci.yml b/cluster-autoscaler/vendor/github.com/containerd/ttrpc/.golangci.yml new file mode 100644 index 000000000000..6462e52f66ff --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/containerd/ttrpc/.golangci.yml @@ -0,0 +1,52 @@ +linters: + enable: + - staticcheck + - unconvert + - gofmt + - goimports + - revive + - ineffassign + - vet + - unused + - misspell + disable: + - errcheck + +linters-settings: + revive: + ignore-generated-headers: true + rules: + - name: blank-imports + - name: context-as-argument + - name: context-keys-type + - name: dot-imports + - name: error-return + - name: error-strings + - name: error-naming + - name: exported + - name: if-return + - name: increment-decrement + - name: var-naming + arguments: [["UID", "GID"], []] + - name: var-declaration + - name: package-comments + - name: range + - name: receiver-naming + - name: time-naming + - name: unexported-return + - name: indent-error-flow + - name: errorf + - name: empty-block + - name: superfluous-else + - name: unused-parameter + - name: unreachable-code + - name: redefines-builtin-id + +issues: + include: + - EXC0002 + +run: + timeout: 8m + skip-dirs: + - example diff --git a/cluster-autoscaler/vendor/github.com/containerd/ttrpc/Makefile b/cluster-autoscaler/vendor/github.com/containerd/ttrpc/Makefile new file mode 100644 index 000000000000..c3a497dcac01 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/containerd/ttrpc/Makefile @@ -0,0 +1,180 @@ +# Copyright The containerd Authors. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# Go command to use for build +GO ?= go +INSTALL ?= install + +# Root directory of the project (absolute path). +ROOTDIR=$(dir $(abspath $(lastword $(MAKEFILE_LIST)))) + +WHALE = "🇩" +ONI = "👹" + +# Project binaries. +COMMANDS=protoc-gen-go-ttrpc protoc-gen-gogottrpc + +ifdef BUILDTAGS + GO_BUILDTAGS = ${BUILDTAGS} +endif +GO_BUILDTAGS ?= +GO_TAGS=$(if $(GO_BUILDTAGS),-tags "$(strip $(GO_BUILDTAGS))",) + +# Project packages. +PACKAGES=$(shell $(GO) list ${GO_TAGS} ./... | grep -v /example) +TESTPACKAGES=$(shell $(GO) list ${GO_TAGS} ./... | grep -v /cmd | grep -v /integration | grep -v /example) +BINPACKAGES=$(addprefix ./cmd/,$(COMMANDS)) + +#Replaces ":" (*nix), ";" (windows) with newline for easy parsing +GOPATHS=$(shell echo ${GOPATH} | tr ":" "\n" | tr ";" "\n") + +TESTFLAGS_RACE= +GO_BUILD_FLAGS= +# See Golang issue re: '-trimpath': https://github.com/golang/go/issues/13809 +GO_GCFLAGS=$(shell \ + set -- ${GOPATHS}; \ + echo "-gcflags=-trimpath=$${1}/src"; \ + ) + +BINARIES=$(addprefix bin/,$(COMMANDS)) + +# Flags passed to `go test` +TESTFLAGS ?= $(TESTFLAGS_RACE) $(EXTRA_TESTFLAGS) +TESTFLAGS_PARALLEL ?= 8 + +# Use this to replace `go test` with, for instance, `gotestsum` +GOTEST ?= $(GO) test + +.PHONY: clean all AUTHORS build binaries test integration generate protos check-protos coverage ci check help install vendor install-protobuf install-protobuild +.DEFAULT: default + +# Forcibly set the default goal to all, in case an include above brought in a rule definition. +.DEFAULT_GOAL := all + +all: binaries + +check: proto-fmt ## run all linters + @echo "$(WHALE) $@" + GOGC=75 golangci-lint run + +ci: check binaries check-protos coverage # coverage-integration ## to be used by the CI + +AUTHORS: .mailmap .git/HEAD + git log --format='%aN <%aE>' | sort -fu > $@ + +generate: protos + @echo "$(WHALE) $@" + @PATH="${ROOTDIR}/bin:${PATH}" $(GO) generate -x ${PACKAGES} + +protos: bin/protoc-gen-gogottrpc bin/protoc-gen-go-ttrpc ## generate protobuf + @echo "$(WHALE) $@" + @(PATH="${ROOTDIR}/bin:${PATH}" protobuild --quiet ${PACKAGES}) + +check-protos: protos ## check if protobufs needs to be generated again + @echo "$(WHALE) $@" + @test -z "$$(git status --short | grep ".pb.go" | tee /dev/stderr)" || \ + ((git diff | cat) && \ + (echo "$(ONI) please run 'make protos' when making changes to proto files" && false)) + +check-api-descriptors: protos ## check that protobuf changes aren't present. + @echo "$(WHALE) $@" + @test -z "$$(git status --short | grep ".pb.txt" | tee /dev/stderr)" || \ + ((git diff $$(find . -name '*.pb.txt') | cat) && \ + (echo "$(ONI) please run 'make protos' when making changes to proto files and check-in the generated descriptor file changes" && false)) + +proto-fmt: ## check format of proto files + @echo "$(WHALE) $@" + @test -z "$$(find . -name '*.proto' -type f -exec grep -Hn -e "^ " {} \; | tee /dev/stderr)" || \ + (echo "$(ONI) please indent proto files with tabs only" && false) + @test -z "$$(find . -name '*.proto' -type f -exec grep -Hn "Meta meta = " {} \; | grep -v '(gogoproto.nullable) = false' | tee /dev/stderr)" || \ + (echo "$(ONI) meta fields in proto files must have option (gogoproto.nullable) = false" && false) + +build: ## build the go packages + @echo "$(WHALE) $@" + @$(GO) build ${DEBUG_GO_GCFLAGS} ${GO_GCFLAGS} ${GO_BUILD_FLAGS} ${EXTRA_FLAGS} ${PACKAGES} + +test: ## run tests, except integration tests and tests that require root + @echo "$(WHALE) $@" + @$(GOTEST) ${TESTFLAGS} ${TESTPACKAGES} + +integration: ## run integration tests + @echo "$(WHALE) $@" + @cd "${ROOTDIR}/integration" && $(GOTEST) -v ${TESTFLAGS} -parallel ${TESTFLAGS_PARALLEL} . + +benchmark: ## run benchmarks tests + @echo "$(WHALE) $@" + @$(GO) test ${TESTFLAGS} -bench . -run Benchmark + +FORCE: + +define BUILD_BINARY +@echo "$(WHALE) $@" +@$(GO) build ${DEBUG_GO_GCFLAGS} ${GO_GCFLAGS} ${GO_BUILD_FLAGS} -o $@ ${GO_TAGS} ./$< +endef + +# Build a binary from a cmd. +bin/%: cmd/% FORCE + $(call BUILD_BINARY) + +binaries: $(BINARIES) ## build binaries + @echo "$(WHALE) $@" + +clean: ## clean up binaries + @echo "$(WHALE) $@" + @rm -f $(BINARIES) + +install: ## install binaries + @echo "$(WHALE) $@ $(BINPACKAGES)" + @$(GO) install $(BINPACKAGES) + +install-protobuf: + @echo "$(WHALE) $@" + @script/install-protobuf + +install-protobuild: + @echo "$(WHALE) $@" + @$(GO) install google.golang.org/protobuf/cmd/protoc-gen-go@v1.28.1 + @$(GO) install github.com/containerd/protobuild@14832ccc41429f5c4f81028e5af08aa233a219cf + +coverage: ## generate coverprofiles from the unit tests, except tests that require root + @echo "$(WHALE) $@" + @rm -f coverage.txt + @$(GO) test ${TESTFLAGS} ${TESTPACKAGES} 2> /dev/null + @( for pkg in ${PACKAGES}; do \ + $(GO) test ${TESTFLAGS} \ + -cover \ + -coverprofile=profile.out \ + -covermode=atomic $$pkg || exit; \ + if [ -f profile.out ]; then \ + cat profile.out >> coverage.txt; \ + rm profile.out; \ + fi; \ + done ) + +vendor: ## ensure all the go.mod/go.sum files are up-to-date + @echo "$(WHALE) $@" + @$(GO) mod tidy + @$(GO) mod verify + +verify-vendor: ## verify if all the go.mod/go.sum files are up-to-date + @echo "$(WHALE) $@" + @$(GO) mod tidy + @$(GO) mod verify + @test -z "$$(git status --short | grep "go.sum" | tee /dev/stderr)" || \ + ((git diff | cat) && \ + (echo "$(ONI) make sure to checkin changes after go mod tidy" && false)) + +help: ## this help + @awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST) | sort diff --git a/cluster-autoscaler/vendor/github.com/containerd/ttrpc/PROTOCOL.md b/cluster-autoscaler/vendor/github.com/containerd/ttrpc/PROTOCOL.md new file mode 100644 index 000000000000..12b43f6bd6e3 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/containerd/ttrpc/PROTOCOL.md @@ -0,0 +1,240 @@ +# Protocol Specification + +The ttrpc protocol is client/server protocol to support multiple request streams +over a single connection with lightweight framing. The client represents the +process which initiated the underlying connection and the server is the process +which accepted the connection. The protocol is currently defined as +asymmetrical, with clients sending requests and servers sending responses. Both +clients and servers are able to send stream data. The roles are also used in +determining the stream identifiers, with client initiated streams using odd +number identifiers and server initiated using even number. The protocol may be +extended in the future to support server initiated streams, that is not +supported in the latest version. + +## Purpose + +The ttrpc protocol is designed to be lightweight and optimized for low latency +and reliable connections between processes on the same host. The protocol does +not include features for handling unreliable connections such as handshakes, +resets, pings, or flow control. The protocol is designed to make low-overhead +implementations as simple as possible. It is not intended as a suitable +replacement for HTTP2/3 over the network. + +## Message Frame + +Each Message Frame consists of a 10-byte message header followed +by message data. The data length and stream ID are both big-endian +4-byte unsigned integers. The message type is an unsigned 1-byte +integer. The flags are also an unsigned 1-byte integer and +use is defined by the message type. + + +---------------------------------------------------------------+ + | Data Length (32) | + +---------------------------------------------------------------+ + | Stream ID (32) | + +---------------+-----------------------------------------------+ + | Msg Type (8) | + +---------------+ + | Flags (8) | + +---------------+-----------------------------------------------+ + | Data (*) | + +---------------------------------------------------------------+ + +The Data Length field represents the number of bytes in the Data field. The +total frame size will always be Data Length + 10 bytes. The maximum data length +is 4MB and any larger size should be rejected. Due to the maximum data size +being less than 16MB, the first frame byte should always be zero. This first +byte should be considered reserved for future use. + +The Stream ID must be odd for client initiated streams and even for server +initiated streams. Server initiated streams are not currently supported. + +## Mesage Types + +| Message Type | Name | Description | +|--------------|----------|----------------------------------| +| 0x01 | Request | Initiates stream | +| 0x02 | Response | Final stream data and terminates | +| 0x03 | Data | Stream data | + +### Request + +The request message is used to initiate stream and send along request data for +properly routing and handling the stream. The stream may indicate unary without +any inbound or outbound stream data with only a response is expected on the +stream. The request may also indicate the stream is still open for more data and +no response is expected until data is finished. If the remote indicates the +stream is closed, the request may be considered non-unary but without anymore +stream data sent. In the case of `remote closed`, the remote still expects to +receive a response or stream data. For compatibility with non streaming clients, +a request with empty flags indicates a unary request. + +#### Request Flags + +| Flag | Name | Description | +|------|-----------------|--------------------------------------------------| +| 0x01 | `remote closed` | Non-unary, but no more data expected from remote | +| 0x02 | `remote open` | Non-unary, remote is still sending data | + +### Response + +The response message is used to end a stream with data, an empty response, or +an error. A response message is the only expected message after a unary request. +A non-unary request does not require a response message if the server is sending +back stream data. A non-unary stream may return a single response message but no +other stream data may follow. + +#### Response Flags + +No response flags are defined at this time, flags should be empty. + +### Data + +The data message is used to send data on an already initialized stream. Either +client or server may send data. A data message is not allowed on a unary stream. +A data message should not be sent after indicating `remote closed` to the peer. +The last data message on a stream must set the `remote closed` flag. + +The `no data` flag is used to indicate that the data message does not include +any data. This is normally used with the `remote closed` flag to indicate the +stream is now closed without transmitting any data. Since ttrpc normally +transmits a single object per message, a zero length data message may be +interpreted as an empty object. For example, transmitting the number zero as a +protobuf message ends up with a data length of zero, but the message is still +considered data and should be processed. + +#### Data Flags + +| Flag | Name | Description | +|------|-----------------|-----------------------------------| +| 0x01 | `remote closed` | No more data expected from remote | +| 0x04 | `no data` | This message does not have data | + +## Streaming + +All ttrpc requests use streams to transfer data. Unary streams will only have +two messages sent per stream, a request from a client and a response from the +server. Non-unary streams, however, may send any numbers of messages from the +client and the server. This makes stream management more complicated than unary +streams since both client and server need to track additional state. To keep +this management as simple as possible, ttrpc minimizes the number of states and +uses two flags instead of control frames. Each stream has two states while a +stream is still alive: `local closed` and `remote closed`. Each peer considers +local and remote from their own perspective and sets flags from the other peer's +perspective. For example, if a client sends a data frame with the +`remote closed` flag, that is indicating that the client is now `local closed` +and the server will be `remote closed`. A unary operation does not need to send +these flags since each received message always indicates `remote closed`. Once a +peer is both `local closed` and `remote closed`, the stream is considered +finished and may be cleaned up. + +Due to the asymmetric nature of the current protocol, a client should +always be in the `local closed` state before `remote closed` and a server should +always be in the `remote closed` state before `local closed`. This happens +because the client is always initiating requests and a client always expects a +final response back from a server to indicate the initiated request has been +fulfilled. This may mean server sends a final empty response to finish a stream +even after it has already completed sending data before the client. + +### Unary State Diagram + + +--------+ +--------+ + | Client | | Server | + +---+----+ +----+---+ + | +---------+ | + local >---------------+ Request +--------------------> remote + closed | +---------+ | closed + | | + | +----------+ | + finished <--------------+ Response +--------------------< finished + | +----------+ | + | | + +### Non-Unary State Diagrams + +RC: `remote closed` flag +RO: `remote open` flag + + +--------+ +--------+ + | Client | | Server | + +---+----+ +----+---+ + | +--------------+ | + >-------------+ Request [RO] +-----------------> + | +--------------+ | + | | + | +------+ | + >-----------------+ Data +---------------------> + | +------+ | + | | + | +-----------+ | + local >---------------+ Data [RC] +------------------> remote + closed | +-----------+ | closed + | | + | +----------+ | + finished <--------------+ Response +--------------------< finished + | +----------+ | + | | + + +--------+ +--------+ + | Client | | Server | + +---+----+ +----+---+ + | +--------------+ | + local >-------------+ Request [RC] +-----------------> remote + closed | +--------------+ | closed + | | + | +------+ | + <-----------------+ Data +---------------------< + | +------+ | + | | + | +-----------+ | + finished <---------------+ Data [RC] +------------------< finished + | +-----------+ | + | | + + +--------+ +--------+ + | Client | | Server | + +---+----+ +----+---+ + | +--------------+ | + >-------------+ Request [RO] +-----------------> + | +--------------+ | + | | + | +------+ | + >-----------------+ Data +---------------------> + | +------+ | + | | + | +------+ | + <-----------------+ Data +---------------------< + | +------+ | + | | + | +------+ | + >-----------------+ Data +---------------------> + | +------+ | + | | + | +-----------+ | + local >---------------+ Data [RC] +------------------> remote + closed | +-----------+ | closed + | | + | +------+ | + <-----------------+ Data +---------------------< + | +------+ | + | | + | +-----------+ | + finished <---------------+ Data [RC] +------------------< finished + | +-----------+ | + | | + +## RPC + +While this protocol is defined primarily to support Remote Procedure Calls, the +protocol does not define the request and response types beyond the messages +defined in the protocol. The implementation provides a default protobuf +definition of request and response which may be used for cross language rpc. +All implementations should at least define a request type which support +routing by procedure name and a response type which supports call status. + +## Version History + +| Version | Features | +|---------|---------------------| +| 1.0 | Unary requests only | +| 1.2 | Streaming support | diff --git a/cluster-autoscaler/vendor/github.com/containerd/ttrpc/Protobuild.toml b/cluster-autoscaler/vendor/github.com/containerd/ttrpc/Protobuild.toml new file mode 100644 index 000000000000..0f6ccbd1e81a --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/containerd/ttrpc/Protobuild.toml @@ -0,0 +1,28 @@ +version = "2" +generators = ["go"] + +# Control protoc include paths. Below are usually some good defaults, but feel +# free to try it without them if it works for your project. +[includes] + # Include paths that will be added before all others. Typically, you want to + # treat the root of the project as an include, but this may not be necessary. + before = ["."] + + # Paths that will be added untouched to the end of the includes. We use + # `/usr/local/include` to pickup the common install location of protobuf. + # This is the default. + after = ["/usr/local/include"] + +# This section maps protobuf imports to Go packages. These will become +# `-M` directives in the call to the go protobuf generator. +[packages] + "google/protobuf/any.proto" = "github.com/gogo/protobuf/types" + "proto/status.proto" = "google.golang.org/genproto/googleapis/rpc/status" + +[[overrides]] +# enable ttrpc and disable fieldpath and grpc for the shim +prefixes = ["github.com/containerd/ttrpc/integration/streaming"] +generators = ["go", "go-ttrpc"] + +[overrides.parameters.go-ttrpc] +prefix = "TTRPC" diff --git a/cluster-autoscaler/vendor/github.com/containerd/ttrpc/README.md b/cluster-autoscaler/vendor/github.com/containerd/ttrpc/README.md index 547a1297df7d..675a5179ef83 100644 --- a/cluster-autoscaler/vendor/github.com/containerd/ttrpc/README.md +++ b/cluster-autoscaler/vendor/github.com/containerd/ttrpc/README.md @@ -1,7 +1,6 @@ # ttrpc [![Build Status](https://github.com/containerd/ttrpc/workflows/CI/badge.svg)](https://github.com/containerd/ttrpc/actions?query=workflow%3ACI) -[![codecov](https://codecov.io/gh/containerd/ttrpc/branch/main/graph/badge.svg)](https://codecov.io/gh/containerd/ttrpc) GRPC for low-memory environments. @@ -20,13 +19,17 @@ Please note that while this project supports generating either end of the protocol, the generated service definitions will be incompatible with regular GRPC services, as they do not speak the same protocol. +# Protocol + +See the [protocol specification](./PROTOCOL.md). + # Usage Create a gogo vanity binary (see [`cmd/protoc-gen-gogottrpc/main.go`](cmd/protoc-gen-gogottrpc/main.go) for an example with the ttrpc plugin enabled. -It's recommended to use [`protobuild`](https://github.com//stevvooe/protobuild) +It's recommended to use [`protobuild`](https://github.com/containerd/protobuild) to build the protobufs for this project, but this will work with protoc directly, if required. @@ -37,13 +40,11 @@ directly, if required. - The client and server interface are identical whereas in GRPC there is a client and server interface that are different. - The Go stdlib context package is used instead. -- No support for streams yet. # Status TODO: -- [ ] Document protocol layout - [ ] Add testing under concurrent load to ensure - [ ] Verify connection error handling diff --git a/cluster-autoscaler/vendor/github.com/containerd/ttrpc/channel.go b/cluster-autoscaler/vendor/github.com/containerd/ttrpc/channel.go index 81116a5e23fc..feafd9a6b57c 100644 --- a/cluster-autoscaler/vendor/github.com/containerd/ttrpc/channel.go +++ b/cluster-autoscaler/vendor/github.com/containerd/ttrpc/channel.go @@ -38,6 +38,26 @@ type messageType uint8 const ( messageTypeRequest messageType = 0x1 messageTypeResponse messageType = 0x2 + messageTypeData messageType = 0x3 +) + +func (mt messageType) String() string { + switch mt { + case messageTypeRequest: + return "request" + case messageTypeResponse: + return "response" + case messageTypeData: + return "data" + default: + return "unknown" + } +} + +const ( + flagRemoteClosed uint8 = 0x1 + flagRemoteOpen uint8 = 0x2 + flagNoData uint8 = 0x4 ) // messageHeader represents the fixed-length message header of 10 bytes sent @@ -46,7 +66,7 @@ type messageHeader struct { Length uint32 // length excluding this header. b[:4] StreamID uint32 // identifies which request stream message is a part of. b[4:8] Type messageType // message type b[8] - Flags uint8 // reserved b[9] + Flags uint8 // type specific flags b[9] } func readMessageHeader(p []byte, r io.Reader) (messageHeader, error) { @@ -111,22 +131,31 @@ func (ch *channel) recv() (messageHeader, []byte, error) { return mh, nil, status.Errorf(codes.ResourceExhausted, "message length %v exceed maximum message size of %v", mh.Length, messageLengthMax) } - p := ch.getmbuf(int(mh.Length)) - if _, err := io.ReadFull(ch.br, p); err != nil { - return messageHeader{}, nil, fmt.Errorf("failed reading message: %w", err) + var p []byte + if mh.Length > 0 { + p = ch.getmbuf(int(mh.Length)) + if _, err := io.ReadFull(ch.br, p); err != nil { + return messageHeader{}, nil, fmt.Errorf("failed reading message: %w", err) + } } return mh, p, nil } -func (ch *channel) send(streamID uint32, t messageType, p []byte) error { - if err := writeMessageHeader(ch.bw, ch.hwbuf[:], messageHeader{Length: uint32(len(p)), StreamID: streamID, Type: t}); err != nil { +func (ch *channel) send(streamID uint32, t messageType, flags uint8, p []byte) error { + // TODO: Error on send rather than on recv + //if len(p) > messageLengthMax { + // return status.Errorf(codes.InvalidArgument, "refusing to send, message length %v exceed maximum message size of %v", len(p), messageLengthMax) + //} + if err := writeMessageHeader(ch.bw, ch.hwbuf[:], messageHeader{Length: uint32(len(p)), StreamID: streamID, Type: t, Flags: flags}); err != nil { return err } - _, err := ch.bw.Write(p) - if err != nil { - return err + if len(p) > 0 { + _, err := ch.bw.Write(p) + if err != nil { + return err + } } return ch.bw.Flush() diff --git a/cluster-autoscaler/vendor/github.com/containerd/ttrpc/client.go b/cluster-autoscaler/vendor/github.com/containerd/ttrpc/client.go index 26c3dd2a9819..4b1e1e709baa 100644 --- a/cluster-autoscaler/vendor/github.com/containerd/ttrpc/client.go +++ b/cluster-autoscaler/vendor/github.com/containerd/ttrpc/client.go @@ -19,30 +19,30 @@ package ttrpc import ( "context" "errors" + "fmt" "io" "net" - "os" "strings" "sync" "syscall" "time" - "github.com/gogo/protobuf/proto" "github.com/sirupsen/logrus" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" ) -// ErrClosed is returned by client methods when the underlying connection is -// closed. -var ErrClosed = errors.New("ttrpc: closed") - // Client for a ttrpc server type Client struct { codec codec conn net.Conn channel *channel - calls chan *callRequest + + streamLock sync.RWMutex + streams map[streamID]*stream + nextStreamID streamID + sendLock sync.Mutex ctx context.Context closed func() @@ -51,8 +51,6 @@ type Client struct { userCloseFunc func() userCloseWaitCh chan struct{} - errOnce sync.Once - err error interceptor UnaryClientInterceptor } @@ -73,13 +71,16 @@ func WithUnaryClientInterceptor(i UnaryClientInterceptor) ClientOpts { } } +// NewClient creates a new ttrpc client using the given connection func NewClient(conn net.Conn, opts ...ClientOpts) *Client { ctx, cancel := context.WithCancel(context.Background()) + channel := newChannel(conn) c := &Client{ codec: codec{}, conn: conn, - channel: newChannel(conn), - calls: make(chan *callRequest), + channel: channel, + streams: make(map[streamID]*stream), + nextStreamID: 1, closed: cancel, ctx: ctx, userCloseFunc: func() {}, @@ -95,13 +96,13 @@ func NewClient(conn net.Conn, opts ...ClientOpts) *Client { return c } -type callRequest struct { - ctx context.Context - req *Request - resp *Response // response will be written back here - errs chan error // error written here on completion +func (c *Client) send(sid uint32, mt messageType, flags uint8, b []byte) error { + c.sendLock.Lock() + defer c.sendLock.Unlock() + return c.channel.send(sid, mt, flags, b) } +// Call makes a unary request and returns with response func (c *Client) Call(ctx context.Context, service, method string, req, resp interface{}) error { payload, err := c.codec.Marshal(req) if err != nil { @@ -113,6 +114,7 @@ func (c *Client) Call(ctx context.Context, service, method string, req, resp int Service: service, Method: method, Payload: payload, + // TODO: metadata from context } cresp = &Response{} @@ -123,7 +125,7 @@ func (c *Client) Call(ctx context.Context, service, method string, req, resp int } if dl, ok := ctx.Deadline(); ok { - creq.TimeoutNano = dl.Sub(time.Now()).Nanoseconds() + creq.TimeoutNano = time.Until(dl).Nanoseconds() } info := &UnaryClientInfo{ @@ -143,36 +145,143 @@ func (c *Client) Call(ctx context.Context, service, method string, req, resp int return nil } -func (c *Client) dispatch(ctx context.Context, req *Request, resp *Response) error { - errs := make(chan error, 1) - call := &callRequest{ - ctx: ctx, - req: req, - resp: resp, - errs: errs, +// StreamDesc describes the stream properties, whether the stream has +// a streaming client, a streaming server, or both +type StreamDesc struct { + StreamingClient bool + StreamingServer bool +} + +// ClientStream is used to send or recv messages on the underlying stream +type ClientStream interface { + CloseSend() error + SendMsg(m interface{}) error + RecvMsg(m interface{}) error +} + +type clientStream struct { + ctx context.Context + s *stream + c *Client + desc *StreamDesc + localClosed bool + remoteClosed bool +} + +func (cs *clientStream) CloseSend() error { + if !cs.desc.StreamingClient { + return fmt.Errorf("%w: cannot close non-streaming client", ErrProtocol) } + if cs.localClosed { + return ErrStreamClosed + } + err := cs.s.send(messageTypeData, flagRemoteClosed|flagNoData, nil) + if err != nil { + return filterCloseErr(err) + } + cs.localClosed = true + return nil +} - select { - case <-ctx.Done(): - return ctx.Err() - case c.calls <- call: - case <-c.ctx.Done(): - return c.error() +func (cs *clientStream) SendMsg(m interface{}) error { + if !cs.desc.StreamingClient { + return fmt.Errorf("%w: cannot send data from non-streaming client", ErrProtocol) + } + if cs.localClosed { + return ErrStreamClosed } - select { - case <-ctx.Done(): - return ctx.Err() - case err := <-errs: + var ( + payload []byte + err error + ) + if m != nil { + payload, err = cs.c.codec.Marshal(m) + if err != nil { + return err + } + } + + err = cs.s.send(messageTypeData, 0, payload) + if err != nil { return filterCloseErr(err) - case <-c.ctx.Done(): - return c.error() } + + return nil +} + +func (cs *clientStream) RecvMsg(m interface{}) error { + if cs.remoteClosed { + return io.EOF + } + + var msg *streamMessage + select { + case <-cs.ctx.Done(): + return cs.ctx.Err() + case <-cs.s.recvClose: + // If recv has a pending message, process that first + select { + case msg = <-cs.s.recv: + default: + return cs.s.recvErr + } + case msg = <-cs.s.recv: + } + + if msg.header.Type == messageTypeResponse { + resp := &Response{} + err := proto.Unmarshal(msg.payload[:msg.header.Length], resp) + // return the payload buffer for reuse + cs.c.channel.putmbuf(msg.payload) + if err != nil { + return err + } + + if err := cs.c.codec.Unmarshal(resp.Payload, m); err != nil { + return err + } + + if resp.Status != nil && resp.Status.Code != int32(codes.OK) { + return status.ErrorProto(resp.Status) + } + + cs.c.deleteStream(cs.s) + cs.remoteClosed = true + + return nil + } else if msg.header.Type == messageTypeData { + if !cs.desc.StreamingServer { + cs.c.deleteStream(cs.s) + cs.remoteClosed = true + return fmt.Errorf("received data from non-streaming server: %w", ErrProtocol) + } + if msg.header.Flags&flagRemoteClosed == flagRemoteClosed { + cs.c.deleteStream(cs.s) + cs.remoteClosed = true + + if msg.header.Flags&flagNoData == flagNoData { + return io.EOF + } + } + + err := cs.c.codec.Unmarshal(msg.payload[:msg.header.Length], m) + cs.c.channel.putmbuf(msg.payload) + if err != nil { + return err + } + return nil + } + + return fmt.Errorf("unexpected %q message received: %w", msg.header.Type, ErrProtocol) } +// Close closes the ttrpc connection and underlying connection func (c *Client) Close() error { c.closeOnce.Do(func() { c.closed() + + c.conn.Close() }) return nil } @@ -188,194 +297,105 @@ func (c *Client) UserOnCloseWait(ctx context.Context) error { } } -type message struct { - messageHeader - p []byte - err error -} +func (c *Client) run() { + err := c.receiveLoop() + c.Close() + c.cleanupStreams(err) -// callMap provides access to a map of active calls, guarded by a mutex. -type callMap struct { - m sync.Mutex - activeCalls map[uint32]*callRequest - closeErr error + c.userCloseFunc() + close(c.userCloseWaitCh) } -// newCallMap returns a new callMap with an empty set of active calls. -func newCallMap() *callMap { - return &callMap{ - activeCalls: make(map[uint32]*callRequest), - } -} +func (c *Client) receiveLoop() error { + for { + select { + case <-c.ctx.Done(): + return ErrClosed + default: + var ( + msg = &streamMessage{} + err error + ) + + msg.header, msg.payload, err = c.channel.recv() + if err != nil { + _, ok := status.FromError(err) + if !ok { + // treat all errors that are not an rpc status as terminal. + // all others poison the connection. + return filterCloseErr(err) + } + } + sid := streamID(msg.header.StreamID) + s := c.getStream(sid) + if s == nil { + logrus.WithField("stream", sid).Errorf("ttrpc: received message on inactive stream") + continue + } -// set adds a call entry to the map with the given streamID key. -func (cm *callMap) set(streamID uint32, cr *callRequest) error { - cm.m.Lock() - defer cm.m.Unlock() - if cm.closeErr != nil { - return cm.closeErr + if err != nil { + s.closeWithError(err) + } else { + if err := s.receive(c.ctx, msg); err != nil { + logrus.WithError(err).WithField("stream", sid).Errorf("ttrpc: failed to handle message") + } + } + } } - cm.activeCalls[streamID] = cr - return nil } -// get looks up the call entry for the given streamID key, then removes it -// from the map and returns it. -func (cm *callMap) get(streamID uint32) (cr *callRequest, ok bool, err error) { - cm.m.Lock() - defer cm.m.Unlock() - if cm.closeErr != nil { - return nil, false, cm.closeErr - } - cr, ok = cm.activeCalls[streamID] - if ok { - delete(cm.activeCalls, streamID) - } - return -} +// createStream creates a new stream and registers it with the client +// Introduce stream types for multiple or single response +func (c *Client) createStream(flags uint8, b []byte) (*stream, error) { + c.streamLock.Lock() -// abort sends the given error to each active call, and clears the map. -// Once abort has been called, any subsequent calls to the callMap will return the error passed to abort. -func (cm *callMap) abort(err error) error { - cm.m.Lock() - defer cm.m.Unlock() - if cm.closeErr != nil { - return cm.closeErr - } - for streamID, call := range cm.activeCalls { - call.errs <- err - delete(cm.activeCalls, streamID) + // Check if closed since lock acquired to prevent adding + // anything after cleanup completes + select { + case <-c.ctx.Done(): + c.streamLock.Unlock() + return nil, ErrClosed + default: } - cm.closeErr = err - return nil -} -func (c *Client) run() { - var ( - waiters = newCallMap() - receiverDone = make(chan struct{}) - ) + // Stream ID should be allocated at same time + s := newStream(c.nextStreamID, c) + c.streams[s.id] = s + c.nextStreamID = c.nextStreamID + 2 - // Sender goroutine - // Receives calls from dispatch, adds them to the set of active calls, and sends them - // to the server. - go func() { - var streamID uint32 = 1 - for { - select { - case <-c.ctx.Done(): - return - case call := <-c.calls: - id := streamID - streamID += 2 // enforce odd client initiated request ids - if err := waiters.set(id, call); err != nil { - call.errs <- err // errs is buffered so should not block. - continue - } - if err := c.send(id, messageTypeRequest, call.req); err != nil { - call.errs <- err // errs is buffered so should not block. - waiters.get(id) // remove from waiters set - } - } - } - }() - - // Receiver goroutine - // Receives responses from the server, looks up the call info in the set of active calls, - // and notifies the caller of the response. - go func() { - defer close(receiverDone) - for { - select { - case <-c.ctx.Done(): - c.setError(c.ctx.Err()) - return - default: - mh, p, err := c.channel.recv() - if err != nil { - _, ok := status.FromError(err) - if !ok { - // treat all errors that are not an rpc status as terminal. - // all others poison the connection. - c.setError(filterCloseErr(err)) - return - } - } - msg := &message{ - messageHeader: mh, - p: p[:mh.Length], - err: err, - } - call, ok, err := waiters.get(mh.StreamID) - if err != nil { - logrus.Errorf("ttrpc: failed to look up active call: %s", err) - continue - } - if !ok { - logrus.Errorf("ttrpc: received message for unknown channel %v", mh.StreamID) - continue - } - call.errs <- c.recv(call.resp, msg) - } - } - }() - - defer func() { - c.conn.Close() - c.userCloseFunc() - close(c.userCloseWaitCh) - }() + c.sendLock.Lock() + defer c.sendLock.Unlock() + c.streamLock.Unlock() - for { - select { - case <-receiverDone: - // The receiver has exited. - // don't return out, let the close of the context trigger the abort of waiters - c.Close() - case <-c.ctx.Done(): - // Abort all active calls. This will also prevent any new calls from being added - // to waiters. - waiters.abort(c.error()) - return - } + if err := c.channel.send(uint32(s.id), messageTypeRequest, flags, b); err != nil { + return s, filterCloseErr(err) } -} -func (c *Client) error() error { - c.errOnce.Do(func() { - if c.err == nil { - c.err = ErrClosed - } - }) - return c.err + return s, nil } -func (c *Client) setError(err error) { - c.errOnce.Do(func() { - c.err = err - }) +func (c *Client) deleteStream(s *stream) { + c.streamLock.Lock() + delete(c.streams, s.id) + c.streamLock.Unlock() + s.closeWithError(nil) } -func (c *Client) send(streamID uint32, mtype messageType, msg interface{}) error { - p, err := c.codec.Marshal(msg) - if err != nil { - return err - } - - return c.channel.send(streamID, mtype, p) +func (c *Client) getStream(sid streamID) *stream { + c.streamLock.RLock() + s := c.streams[sid] + c.streamLock.RUnlock() + return s } -func (c *Client) recv(resp *Response, msg *message) error { - if msg.err != nil { - return msg.err - } +func (c *Client) cleanupStreams(err error) { + c.streamLock.Lock() + defer c.streamLock.Unlock() - if msg.Type != messageTypeResponse { - return errors.New("unknown message type received") + for sid, s := range c.streams { + s.closeWithError(err) + delete(c.streams, sid) } - - defer c.channel.putmbuf(msg.p) - return proto.Unmarshal(msg.p, resp) } // filterCloseErr rewrites EOF and EPIPE errors to ErrClosed. Use when @@ -388,6 +408,8 @@ func filterCloseErr(err error) error { return nil case err == io.EOF: return ErrClosed + case errors.Is(err, io.ErrClosedPipe): + return ErrClosed case errors.Is(err, io.EOF): return ErrClosed case strings.Contains(err.Error(), "use of closed network connection"): @@ -395,11 +417,9 @@ func filterCloseErr(err error) error { default: // if we have an epipe on a write or econnreset on a read , we cast to errclosed var oerr *net.OpError - if errors.As(err, &oerr) && (oerr.Op == "write" || oerr.Op == "read") { - serr, sok := oerr.Err.(*os.SyscallError) - if sok && ((serr.Err == syscall.EPIPE && oerr.Op == "write") || - (serr.Err == syscall.ECONNRESET && oerr.Op == "read")) { - + if errors.As(err, &oerr) { + if (oerr.Op == "write" && errors.Is(err, syscall.EPIPE)) || + (oerr.Op == "read" && errors.Is(err, syscall.ECONNRESET)) { return ErrClosed } } @@ -407,3 +427,86 @@ func filterCloseErr(err error) error { return err } + +// NewStream creates a new stream with the given stream descriptor to the +// specified service and method. If not a streaming client, the request object +// may be provided. +func (c *Client) NewStream(ctx context.Context, desc *StreamDesc, service, method string, req interface{}) (ClientStream, error) { + var payload []byte + if req != nil { + var err error + payload, err = c.codec.Marshal(req) + if err != nil { + return nil, err + } + } + + request := &Request{ + Service: service, + Method: method, + Payload: payload, + // TODO: metadata from context + } + p, err := c.codec.Marshal(request) + if err != nil { + return nil, err + } + + var flags uint8 + if desc.StreamingClient { + flags = flagRemoteOpen + } else { + flags = flagRemoteClosed + } + s, err := c.createStream(flags, p) + if err != nil { + return nil, err + } + + return &clientStream{ + ctx: ctx, + s: s, + c: c, + desc: desc, + }, nil +} + +func (c *Client) dispatch(ctx context.Context, req *Request, resp *Response) error { + p, err := c.codec.Marshal(req) + if err != nil { + return err + } + + s, err := c.createStream(0, p) + if err != nil { + return err + } + defer c.deleteStream(s) + + var msg *streamMessage + select { + case <-ctx.Done(): + return ctx.Err() + case <-c.ctx.Done(): + return ErrClosed + case <-s.recvClose: + // If recv has a pending message, process that first + select { + case msg = <-s.recv: + default: + return s.recvErr + } + case msg = <-s.recv: + } + + if msg.header.Type == messageTypeResponse { + err = proto.Unmarshal(msg.payload[:msg.header.Length], resp) + } else { + err = fmt.Errorf("unexpected %q message received: %w", msg.header.Type, ErrProtocol) + } + + // return the payload buffer for reuse + c.channel.putmbuf(msg.payload) + + return err +} diff --git a/cluster-autoscaler/vendor/github.com/containerd/ttrpc/codec.go b/cluster-autoscaler/vendor/github.com/containerd/ttrpc/codec.go index 880634c27e39..3e82722a4245 100644 --- a/cluster-autoscaler/vendor/github.com/containerd/ttrpc/codec.go +++ b/cluster-autoscaler/vendor/github.com/containerd/ttrpc/codec.go @@ -19,7 +19,7 @@ package ttrpc import ( "fmt" - "github.com/gogo/protobuf/proto" + "google.golang.org/protobuf/proto" ) type codec struct{} diff --git a/cluster-autoscaler/vendor/github.com/containerd/ttrpc/doc.go b/cluster-autoscaler/vendor/github.com/containerd/ttrpc/doc.go new file mode 100644 index 000000000000..d80cd424cc8b --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/containerd/ttrpc/doc.go @@ -0,0 +1,23 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +/* +package ttrpc defines and implements a low level simple transfer protocol +optimized for low latency and reliable connections between processes on the same +host. The protocol uses simple framing for sending requests, responses, and data +using multiple streams. +*/ +package ttrpc diff --git a/cluster-autoscaler/vendor/github.com/containerd/ttrpc/errors.go b/cluster-autoscaler/vendor/github.com/containerd/ttrpc/errors.go new file mode 100644 index 000000000000..ec14b7952bfb --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/containerd/ttrpc/errors.go @@ -0,0 +1,34 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package ttrpc + +import "errors" + +var ( + // ErrProtocol is a general error in the handling the protocol. + ErrProtocol = errors.New("protocol error") + + // ErrClosed is returned by client methods when the underlying connection is + // closed. + ErrClosed = errors.New("ttrpc: closed") + + // ErrServerClosed is returned when the Server has closed its connection. + ErrServerClosed = errors.New("ttrpc: server closed") + + // ErrStreamClosed is when the streaming connection is closed. + ErrStreamClosed = errors.New("ttrpc: stream closed") +) diff --git a/cluster-autoscaler/vendor/github.com/containerd/ttrpc/handshake.go b/cluster-autoscaler/vendor/github.com/containerd/ttrpc/handshake.go index a424b67a4976..3c6b610d35d4 100644 --- a/cluster-autoscaler/vendor/github.com/containerd/ttrpc/handshake.go +++ b/cluster-autoscaler/vendor/github.com/containerd/ttrpc/handshake.go @@ -45,6 +45,6 @@ func (fn handshakerFunc) Handshake(ctx context.Context, conn net.Conn) (net.Conn return fn(ctx, conn) } -func noopHandshake(ctx context.Context, conn net.Conn) (net.Conn, interface{}, error) { +func noopHandshake(_ context.Context, conn net.Conn) (net.Conn, interface{}, error) { return conn, nil, nil } diff --git a/cluster-autoscaler/vendor/github.com/containerd/ttrpc/interceptor.go b/cluster-autoscaler/vendor/github.com/containerd/ttrpc/interceptor.go index c1219dac65f6..7ff5e9d33f23 100644 --- a/cluster-autoscaler/vendor/github.com/containerd/ttrpc/interceptor.go +++ b/cluster-autoscaler/vendor/github.com/containerd/ttrpc/interceptor.go @@ -28,6 +28,13 @@ type UnaryClientInfo struct { FullMethod string } +// StreamServerInfo provides information about the server request +type StreamServerInfo struct { + FullMethod string + StreamingClient bool + StreamingServer bool +} + // Unmarshaler contains the server request data and allows it to be unmarshaled // into a concrete type type Unmarshaler func(interface{}) error @@ -41,10 +48,18 @@ type UnaryServerInterceptor func(context.Context, Unmarshaler, *UnaryServerInfo, // UnaryClientInterceptor specifies the interceptor function for client request/response type UnaryClientInterceptor func(context.Context, *Request, *Response, *UnaryClientInfo, Invoker) error -func defaultServerInterceptor(ctx context.Context, unmarshal Unmarshaler, info *UnaryServerInfo, method Method) (interface{}, error) { +func defaultServerInterceptor(ctx context.Context, unmarshal Unmarshaler, _ *UnaryServerInfo, method Method) (interface{}, error) { return method(ctx, unmarshal) } func defaultClientInterceptor(ctx context.Context, req *Request, resp *Response, _ *UnaryClientInfo, invoker Invoker) error { return invoker(ctx, req, resp) } + +type StreamServerInterceptor func(context.Context, StreamServer, *StreamServerInfo, StreamHandler) (interface{}, error) + +func defaultStreamServerInterceptor(ctx context.Context, ss StreamServer, _ *StreamServerInfo, stream StreamHandler) (interface{}, error) { + return stream(ctx, ss) +} + +type StreamClientInterceptor func(context.Context) diff --git a/cluster-autoscaler/vendor/github.com/containerd/ttrpc/request.pb.go b/cluster-autoscaler/vendor/github.com/containerd/ttrpc/request.pb.go new file mode 100644 index 000000000000..3921ae5a356c --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/containerd/ttrpc/request.pb.go @@ -0,0 +1,396 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.20.1 +// source: github.com/containerd/ttrpc/request.proto + +package ttrpc + +import ( + status "google.golang.org/genproto/googleapis/rpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"` + Method string `protobuf:"bytes,2,opt,name=method,proto3" json:"method,omitempty"` + Payload []byte `protobuf:"bytes,3,opt,name=payload,proto3" json:"payload,omitempty"` + TimeoutNano int64 `protobuf:"varint,4,opt,name=timeout_nano,json=timeoutNano,proto3" json:"timeout_nano,omitempty"` + Metadata []*KeyValue `protobuf:"bytes,5,rep,name=metadata,proto3" json:"metadata,omitempty"` +} + +func (x *Request) Reset() { + *x = Request{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_ttrpc_request_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Request) ProtoMessage() {} + +func (x *Request) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_ttrpc_request_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Request.ProtoReflect.Descriptor instead. +func (*Request) Descriptor() ([]byte, []int) { + return file_github_com_containerd_ttrpc_request_proto_rawDescGZIP(), []int{0} +} + +func (x *Request) GetService() string { + if x != nil { + return x.Service + } + return "" +} + +func (x *Request) GetMethod() string { + if x != nil { + return x.Method + } + return "" +} + +func (x *Request) GetPayload() []byte { + if x != nil { + return x.Payload + } + return nil +} + +func (x *Request) GetTimeoutNano() int64 { + if x != nil { + return x.TimeoutNano + } + return 0 +} + +func (x *Request) GetMetadata() []*KeyValue { + if x != nil { + return x.Metadata + } + return nil +} + +type Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Status *status.Status `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` + Payload []byte `protobuf:"bytes,2,opt,name=payload,proto3" json:"payload,omitempty"` +} + +func (x *Response) Reset() { + *x = Response{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_ttrpc_request_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Response) ProtoMessage() {} + +func (x *Response) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_ttrpc_request_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Response.ProtoReflect.Descriptor instead. +func (*Response) Descriptor() ([]byte, []int) { + return file_github_com_containerd_ttrpc_request_proto_rawDescGZIP(), []int{1} +} + +func (x *Response) GetStatus() *status.Status { + if x != nil { + return x.Status + } + return nil +} + +func (x *Response) GetPayload() []byte { + if x != nil { + return x.Payload + } + return nil +} + +type StringList struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + List []string `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"` +} + +func (x *StringList) Reset() { + *x = StringList{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_ttrpc_request_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StringList) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StringList) ProtoMessage() {} + +func (x *StringList) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_ttrpc_request_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StringList.ProtoReflect.Descriptor instead. +func (*StringList) Descriptor() ([]byte, []int) { + return file_github_com_containerd_ttrpc_request_proto_rawDescGZIP(), []int{2} +} + +func (x *StringList) GetList() []string { + if x != nil { + return x.List + } + return nil +} + +type KeyValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *KeyValue) Reset() { + *x = KeyValue{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_ttrpc_request_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *KeyValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*KeyValue) ProtoMessage() {} + +func (x *KeyValue) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_ttrpc_request_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use KeyValue.ProtoReflect.Descriptor instead. +func (*KeyValue) Descriptor() ([]byte, []int) { + return file_github_com_containerd_ttrpc_request_proto_rawDescGZIP(), []int{3} +} + +func (x *KeyValue) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *KeyValue) GetValue() string { + if x != nil { + return x.Value + } + return "" +} + +var File_github_com_containerd_ttrpc_request_proto protoreflect.FileDescriptor + +var file_github_com_containerd_ttrpc_request_proto_rawDesc = []byte{ + 0x0a, 0x29, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x74, 0x74, 0x72, 0x70, 0x63, 0x2f, 0x72, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x05, 0x74, 0x74, 0x72, + 0x70, 0x63, 0x1a, 0x12, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa5, 0x01, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x16, 0x0a, 0x06, + 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6d, 0x65, + 0x74, 0x68, 0x6f, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x21, + 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x4e, 0x61, 0x6e, + 0x6f, 0x12, 0x2b, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x05, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x4b, 0x65, 0x79, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x45, + 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1f, 0x0a, 0x06, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x07, 0x2e, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x70, + 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x61, + 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0x20, 0x0a, 0x0a, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4c, + 0x69, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x04, 0x6c, 0x69, 0x73, 0x74, 0x22, 0x32, 0x0a, 0x08, 0x4b, 0x65, 0x79, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x1d, 0x5a, 0x1b, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x74, 0x74, 0x72, 0x70, 0x63, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_github_com_containerd_ttrpc_request_proto_rawDescOnce sync.Once + file_github_com_containerd_ttrpc_request_proto_rawDescData = file_github_com_containerd_ttrpc_request_proto_rawDesc +) + +func file_github_com_containerd_ttrpc_request_proto_rawDescGZIP() []byte { + file_github_com_containerd_ttrpc_request_proto_rawDescOnce.Do(func() { + file_github_com_containerd_ttrpc_request_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_ttrpc_request_proto_rawDescData) + }) + return file_github_com_containerd_ttrpc_request_proto_rawDescData +} + +var file_github_com_containerd_ttrpc_request_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_github_com_containerd_ttrpc_request_proto_goTypes = []interface{}{ + (*Request)(nil), // 0: ttrpc.Request + (*Response)(nil), // 1: ttrpc.Response + (*StringList)(nil), // 2: ttrpc.StringList + (*KeyValue)(nil), // 3: ttrpc.KeyValue + (*status.Status)(nil), // 4: Status +} +var file_github_com_containerd_ttrpc_request_proto_depIdxs = []int32{ + 3, // 0: ttrpc.Request.metadata:type_name -> ttrpc.KeyValue + 4, // 1: ttrpc.Response.status:type_name -> Status + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_github_com_containerd_ttrpc_request_proto_init() } +func file_github_com_containerd_ttrpc_request_proto_init() { + if File_github_com_containerd_ttrpc_request_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_github_com_containerd_ttrpc_request_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_ttrpc_request_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_ttrpc_request_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StringList); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_ttrpc_request_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*KeyValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_containerd_ttrpc_request_proto_rawDesc, + NumEnums: 0, + NumMessages: 4, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_containerd_ttrpc_request_proto_goTypes, + DependencyIndexes: file_github_com_containerd_ttrpc_request_proto_depIdxs, + MessageInfos: file_github_com_containerd_ttrpc_request_proto_msgTypes, + }.Build() + File_github_com_containerd_ttrpc_request_proto = out.File + file_github_com_containerd_ttrpc_request_proto_rawDesc = nil + file_github_com_containerd_ttrpc_request_proto_goTypes = nil + file_github_com_containerd_ttrpc_request_proto_depIdxs = nil +} diff --git a/cluster-autoscaler/vendor/github.com/containerd/ttrpc/request.proto b/cluster-autoscaler/vendor/github.com/containerd/ttrpc/request.proto new file mode 100644 index 000000000000..37da334fc2a9 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/containerd/ttrpc/request.proto @@ -0,0 +1,29 @@ +syntax = "proto3"; + +package ttrpc; + +import "proto/status.proto"; + +option go_package = "github.com/containerd/ttrpc"; + +message Request { + string service = 1; + string method = 2; + bytes payload = 3; + int64 timeout_nano = 4; + repeated KeyValue metadata = 5; +} + +message Response { + Status status = 1; + bytes payload = 2; +} + +message StringList { + repeated string list = 1; +} + +message KeyValue { + string key = 1; + string value = 2; +} diff --git a/cluster-autoscaler/vendor/github.com/containerd/ttrpc/server.go b/cluster-autoscaler/vendor/github.com/containerd/ttrpc/server.go index b0e48073e4d9..7af59f828e52 100644 --- a/cluster-autoscaler/vendor/github.com/containerd/ttrpc/server.go +++ b/cluster-autoscaler/vendor/github.com/containerd/ttrpc/server.go @@ -24,6 +24,7 @@ import ( "net" "sync" "sync/atomic" + "syscall" "time" "github.com/sirupsen/logrus" @@ -31,10 +32,6 @@ import ( "google.golang.org/grpc/status" ) -var ( - ErrServerClosed = errors.New("ttrpc: server closed") -) - type Server struct { config *serverConfig services *serviceSet @@ -66,8 +63,14 @@ func NewServer(opts ...ServerOpt) (*Server, error) { }, nil } +// Register registers a map of methods to method handlers +// TODO: Remove in 2.0, does not support streams func (s *Server) Register(name string, methods map[string]Method) { - s.services.register(name, methods) + s.services.register(name, &ServiceDesc{Methods: methods}) +} + +func (s *Server) RegisterService(name string, desc *ServiceDesc) { + s.services.register(name, desc) } func (s *Server) Serve(ctx context.Context, l net.Listener) error { @@ -118,12 +121,18 @@ func (s *Server) Serve(ctx context.Context, l net.Listener) error { approved, handshake, err := handshaker.Handshake(ctx, conn) if err != nil { - logrus.WithError(err).Errorf("ttrpc: refusing connection after handshake") + logrus.WithError(err).Error("ttrpc: refusing connection after handshake") + conn.Close() + continue + } + + sc, err := s.newConn(approved, handshake) + if err != nil { + logrus.WithError(err).Error("ttrpc: create connection failed") conn.Close() continue } - sc := s.newConn(approved, handshake) go sc.run(ctx) } } @@ -142,15 +151,20 @@ func (s *Server) Shutdown(ctx context.Context) error { ticker := time.NewTicker(200 * time.Millisecond) defer ticker.Stop() for { - if s.closeIdleConns() { - return lnerr + s.closeIdleConns() + + if s.countConnection() == 0 { + break } + select { case <-ctx.Done(): return ctx.Err() case <-ticker.C: } } + + return lnerr } // Close the server without waiting for active connections. @@ -202,11 +216,18 @@ func (s *Server) closeListeners() error { return err } -func (s *Server) addConnection(c *serverConn) { +func (s *Server) addConnection(c *serverConn) error { s.mu.Lock() defer s.mu.Unlock() + select { + case <-s.done: + return ErrServerClosed + default: + } + s.connections[c] = struct{}{} + return nil } func (s *Server) delConnection(c *serverConn) { @@ -223,20 +244,17 @@ func (s *Server) countConnection() int { return len(s.connections) } -func (s *Server) closeIdleConns() bool { +func (s *Server) closeIdleConns() { s.mu.Lock() defer s.mu.Unlock() - quiescent := true + for c := range s.connections { - st, ok := c.getState() - if !ok || st != connStateIdle { - quiescent = false + if st, ok := c.getState(); !ok || st == connStateActive { continue } c.close() delete(s.connections, c) } - return quiescent } type connState int @@ -260,7 +278,7 @@ func (cs connState) String() string { } } -func (s *Server) newConn(conn net.Conn, handshake interface{}) *serverConn { +func (s *Server) newConn(conn net.Conn, handshake interface{}) (*serverConn, error) { c := &serverConn{ server: s, conn: conn, @@ -268,8 +286,11 @@ func (s *Server) newConn(conn net.Conn, handshake interface{}) *serverConn { shutdown: make(chan struct{}), } c.setState(connStateIdle) - s.addConnection(c) - return c + if err := s.addConnection(c); err != nil { + c.close() + return nil, err + } + return c, nil } type serverConn struct { @@ -301,27 +322,25 @@ func (c *serverConn) close() error { func (c *serverConn) run(sctx context.Context) { type ( - request struct { - id uint32 - req *Request - } - response struct { - id uint32 - resp *Response + id uint32 + status *status.Status + data []byte + closeStream bool + streaming bool } ) var ( - ch = newChannel(c.conn) - ctx, cancel = context.WithCancel(sctx) - active int - state connState = connStateIdle - responses = make(chan response) - requests = make(chan request) - recvErr = make(chan error, 1) - shutdown = c.shutdown - done = make(chan struct{}) + ch = newChannel(c.conn) + ctx, cancel = context.WithCancel(sctx) + state connState = connStateIdle + responses = make(chan response) + recvErr = make(chan error, 1) + done = make(chan struct{}) + streams = sync.Map{} + active int32 + lastStreamID uint32 ) defer c.conn.Close() @@ -329,27 +348,26 @@ func (c *serverConn) run(sctx context.Context) { defer close(done) defer c.server.delConnection(c) - go func(recvErr chan error) { - defer close(recvErr) - sendImmediate := func(id uint32, st *status.Status) bool { - select { - case responses <- response{ - // even though we've had an invalid stream id, we send it - // back on the same stream id so the client knows which - // stream id was bad. - id: id, - resp: &Response{ - Status: st.Proto(), - }, - }: - return true - case <-c.shutdown: - return false - case <-done: - return false - } + sendStatus := func(id uint32, st *status.Status) bool { + select { + case responses <- response{ + // even though we've had an invalid stream id, we send it + // back on the same stream id so the client knows which + // stream id was bad. + id: id, + status: st, + closeStream: true, + }: + return true + case <-c.shutdown: + return false + case <-done: + return false } + } + go func(recvErr chan error) { + defer close(recvErr) for { select { case <-c.shutdown: @@ -369,112 +387,173 @@ func (c *serverConn) run(sctx context.Context) { // in this case, we send an error for that particular message // when the status is defined. - if !sendImmediate(mh.StreamID, status) { + if !sendStatus(mh.StreamID, status) { return } continue } - if mh.Type != messageTypeRequest { - // we must ignore this for future compat. - continue - } - - var req Request - if err := c.server.codec.Unmarshal(p, &req); err != nil { - ch.putmbuf(p) - if !sendImmediate(mh.StreamID, status.Newf(codes.InvalidArgument, "unmarshal request error: %v", err)) { - return - } - continue - } - ch.putmbuf(p) - if mh.StreamID%2 != 1 { // enforce odd client initiated identifiers. - if !sendImmediate(mh.StreamID, status.Newf(codes.InvalidArgument, "StreamID must be odd for client initiated streams")) { + if !sendStatus(mh.StreamID, status.Newf(codes.InvalidArgument, "StreamID must be odd for client initiated streams")) { return } continue } - // Forward the request to the main loop. We don't wait on s.done - // because we have already accepted the client request. - select { - case requests <- request{ - id: mh.StreamID, - req: &req, - }: - case <-done: - return + if mh.Type == messageTypeData { + i, ok := streams.Load(mh.StreamID) + if !ok { + if !sendStatus(mh.StreamID, status.Newf(codes.InvalidArgument, "StreamID is no longer active")) { + return + } + } + sh := i.(*streamHandler) + if mh.Flags&flagNoData != flagNoData { + unmarshal := func(obj interface{}) error { + err := protoUnmarshal(p, obj) + ch.putmbuf(p) + return err + } + + if err := sh.data(unmarshal); err != nil { + if !sendStatus(mh.StreamID, status.Newf(codes.InvalidArgument, "data handling error: %v", err)) { + return + } + } + } + + if mh.Flags&flagRemoteClosed == flagRemoteClosed { + sh.closeSend() + if len(p) > 0 { + if !sendStatus(mh.StreamID, status.Newf(codes.InvalidArgument, "data close message cannot include data")) { + return + } + } + } + } else if mh.Type == messageTypeRequest { + if mh.StreamID <= lastStreamID { + // enforce odd client initiated identifiers. + if !sendStatus(mh.StreamID, status.Newf(codes.InvalidArgument, "StreamID cannot be re-used and must increment")) { + return + } + continue + + } + lastStreamID = mh.StreamID + + // TODO: Make request type configurable + // Unmarshaller which takes in a byte array and returns an interface? + var req Request + if err := c.server.codec.Unmarshal(p, &req); err != nil { + ch.putmbuf(p) + if !sendStatus(mh.StreamID, status.Newf(codes.InvalidArgument, "unmarshal request error: %v", err)) { + return + } + continue + } + ch.putmbuf(p) + + id := mh.StreamID + respond := func(status *status.Status, data []byte, streaming, closeStream bool) error { + select { + case responses <- response{ + id: id, + status: status, + data: data, + closeStream: closeStream, + streaming: streaming, + }: + case <-done: + return ErrClosed + } + return nil + } + sh, err := c.server.services.handle(ctx, &req, respond) + if err != nil { + status, _ := status.FromError(err) + if !sendStatus(mh.StreamID, status) { + return + } + continue + } + + streams.Store(id, sh) + atomic.AddInt32(&active, 1) } + // TODO: else we must ignore this for future compat. log this? } }(recvErr) for { - newstate := state - switch { - case active > 0: + var ( + newstate connState + shutdown chan struct{} + ) + + activeN := atomic.LoadInt32(&active) + if activeN > 0 { newstate = connStateActive shutdown = nil - case active == 0: + } else { newstate = connStateIdle shutdown = c.shutdown // only enable this branch in idle mode } - if newstate != state { c.setState(newstate) state = newstate } select { - case request := <-requests: - active++ - go func(id uint32) { - ctx, cancel := getRequestContext(ctx, request.req) - defer cancel() - - p, status := c.server.services.call(ctx, request.req.Service, request.req.Method, request.req.Payload) - resp := &Response{ - Status: status.Proto(), - Payload: p, + case response := <-responses: + if !response.streaming || response.status.Code() != codes.OK { + p, err := c.server.codec.Marshal(&Response{ + Status: response.status.Proto(), + Payload: response.data, + }) + if err != nil { + logrus.WithError(err).Error("failed marshaling response") + return } - select { - case responses <- response{ - id: id, - resp: resp, - }: - case <-done: + if err := ch.send(response.id, messageTypeResponse, 0, p); err != nil { + logrus.WithError(err).Error("failed sending message on channel") + return + } + } else { + var flags uint8 + if response.closeStream { + flags = flagRemoteClosed + } + if response.data == nil { + flags = flags | flagNoData + } + if err := ch.send(response.id, messageTypeData, flags, response.data); err != nil { + logrus.WithError(err).Error("failed sending message on channel") + return } - }(request.id) - case response := <-responses: - p, err := c.server.codec.Marshal(response.resp) - if err != nil { - logrus.WithError(err).Error("failed marshaling response") - return } - if err := ch.send(response.id, messageTypeResponse, p); err != nil { - logrus.WithError(err).Error("failed sending message on channel") - return + if response.closeStream { + // The ttrpc protocol currently does not support the case where + // the server is localClosed but not remoteClosed. Once the server + // is closing, the whole stream may be considered finished + streams.Delete(response.id) + atomic.AddInt32(&active, -1) } - - active-- case err := <-recvErr: // TODO(stevvooe): Not wildly clear what we should do in this // branch. Basically, it means that we are no longer receiving // requests due to a terminal error. recvErr = nil // connection is now "closing" - if err == io.EOF || err == io.ErrUnexpectedEOF { + if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) || errors.Is(err, syscall.ECONNRESET) { // The client went away and we should stop processing // requests, so that the client connection is closed return } - if err != nil { - logrus.WithError(err).Error("error receiving message") - } + logrus.WithError(err).Error("error receiving message") + // else, initiate shutdown case <-shutdown: return } diff --git a/cluster-autoscaler/vendor/github.com/containerd/ttrpc/services.go b/cluster-autoscaler/vendor/github.com/containerd/ttrpc/services.go index f359e9611f9e..6aabfbb4d18d 100644 --- a/cluster-autoscaler/vendor/github.com/containerd/ttrpc/services.go +++ b/cluster-autoscaler/vendor/github.com/containerd/ttrpc/services.go @@ -25,43 +25,62 @@ import ( "path" "unsafe" - "github.com/gogo/protobuf/proto" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" ) type Method func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) +type StreamHandler func(context.Context, StreamServer) (interface{}, error) + +type Stream struct { + Handler StreamHandler + StreamingClient bool + StreamingServer bool +} + type ServiceDesc struct { Methods map[string]Method - - // TODO(stevvooe): Add stream support. + Streams map[string]Stream } type serviceSet struct { - services map[string]ServiceDesc - interceptor UnaryServerInterceptor + services map[string]*ServiceDesc + unaryInterceptor UnaryServerInterceptor + streamInterceptor StreamServerInterceptor } func newServiceSet(interceptor UnaryServerInterceptor) *serviceSet { return &serviceSet{ - services: make(map[string]ServiceDesc), - interceptor: interceptor, + services: make(map[string]*ServiceDesc), + unaryInterceptor: interceptor, + streamInterceptor: defaultStreamServerInterceptor, } } -func (s *serviceSet) register(name string, methods map[string]Method) { +func (s *serviceSet) register(name string, desc *ServiceDesc) { if _, ok := s.services[name]; ok { panic(fmt.Errorf("duplicate service %v registered", name)) } - s.services[name] = ServiceDesc{ - Methods: methods, - } + s.services[name] = desc } -func (s *serviceSet) call(ctx context.Context, serviceName, methodName string, p []byte) ([]byte, *status.Status) { - p, err := s.dispatch(ctx, serviceName, methodName, p) +func (s *serviceSet) unaryCall(ctx context.Context, method Method, info *UnaryServerInfo, data []byte) (p []byte, st *status.Status) { + unmarshal := func(obj interface{}) error { + return protoUnmarshal(data, obj) + } + + resp, err := s.unaryInterceptor(ctx, unmarshal, info, method) + if err == nil { + if isNil(resp) { + err = errors.New("ttrpc: marshal called with nil") + } else { + p, err = protoMarshal(resp) + } + } + st, ok := status.FromError(err) if !ok { st = status.New(convertCode(err), err.Error()) @@ -70,38 +89,142 @@ func (s *serviceSet) call(ctx context.Context, serviceName, methodName string, p return p, st } -func (s *serviceSet) dispatch(ctx context.Context, serviceName, methodName string, p []byte) ([]byte, error) { - method, err := s.resolve(serviceName, methodName) - if err != nil { - return nil, err +func (s *serviceSet) streamCall(ctx context.Context, stream StreamHandler, info *StreamServerInfo, ss StreamServer) (p []byte, st *status.Status) { + resp, err := s.streamInterceptor(ctx, ss, info, stream) + if err == nil { + p, err = protoMarshal(resp) + } + st, ok := status.FromError(err) + if !ok { + st = status.New(convertCode(err), err.Error()) } + return +} - unmarshal := func(obj interface{}) error { - switch v := obj.(type) { - case proto.Message: - if err := proto.Unmarshal(p, v); err != nil { - return status.Errorf(codes.Internal, "ttrpc: error unmarshalling payload: %v", err.Error()) +func (s *serviceSet) handle(ctx context.Context, req *Request, respond func(*status.Status, []byte, bool, bool) error) (*streamHandler, error) { + srv, ok := s.services[req.Service] + if !ok { + return nil, status.Errorf(codes.Unimplemented, "service %v", req.Service) + } + + if method, ok := srv.Methods[req.Method]; ok { + go func() { + ctx, cancel := getRequestContext(ctx, req) + defer cancel() + + info := &UnaryServerInfo{ + FullMethod: fullPath(req.Service, req.Method), } - default: - return status.Errorf(codes.Internal, "ttrpc: error unsupported request type: %T", v) + p, st := s.unaryCall(ctx, method, info, req.Payload) + + respond(st, p, false, true) + }() + return nil, nil + } + if stream, ok := srv.Streams[req.Method]; ok { + ctx, cancel := getRequestContext(ctx, req) + info := &StreamServerInfo{ + FullMethod: fullPath(req.Service, req.Method), + StreamingClient: stream.StreamingClient, + StreamingServer: stream.StreamingServer, } - return nil + sh := &streamHandler{ + ctx: ctx, + respond: respond, + recv: make(chan Unmarshaler, 5), + info: info, + } + go func() { + defer cancel() + p, st := s.streamCall(ctx, stream.Handler, info, sh) + respond(st, p, stream.StreamingServer, true) + }() + + if req.Payload != nil { + unmarshal := func(obj interface{}) error { + return protoUnmarshal(req.Payload, obj) + } + if err := sh.data(unmarshal); err != nil { + return nil, err + } + } + + return sh, nil } + return nil, status.Errorf(codes.Unimplemented, "method %v", req.Method) +} + +type streamHandler struct { + ctx context.Context + respond func(*status.Status, []byte, bool, bool) error + recv chan Unmarshaler + info *StreamServerInfo + + remoteClosed bool + localClosed bool +} - info := &UnaryServerInfo{ - FullMethod: fullPath(serviceName, methodName), +func (s *streamHandler) closeSend() { + if !s.remoteClosed { + s.remoteClosed = true + close(s.recv) + } +} + +func (s *streamHandler) data(unmarshal Unmarshaler) error { + if s.remoteClosed { + return ErrStreamClosed + } + select { + case s.recv <- unmarshal: + return nil + case <-s.ctx.Done(): + return s.ctx.Err() } +} - resp, err := s.interceptor(ctx, unmarshal, info, method) +func (s *streamHandler) SendMsg(m interface{}) error { + if s.localClosed { + return ErrStreamClosed + } + p, err := protoMarshal(m) if err != nil { - return nil, err + return err } + return s.respond(nil, p, true, false) +} + +func (s *streamHandler) RecvMsg(m interface{}) error { + select { + case unmarshal, ok := <-s.recv: + if !ok { + return io.EOF + } + return unmarshal(m) + case <-s.ctx.Done(): + return s.ctx.Err() - if isNil(resp) { - return nil, errors.New("ttrpc: marshal called with nil") } +} - switch v := resp.(type) { +func protoUnmarshal(p []byte, obj interface{}) error { + switch v := obj.(type) { + case proto.Message: + if err := proto.Unmarshal(p, v); err != nil { + return status.Errorf(codes.Internal, "ttrpc: error unmarshalling payload: %v", err.Error()) + } + default: + return status.Errorf(codes.Internal, "ttrpc: error unsupported request type: %T", v) + } + return nil +} + +func protoMarshal(obj interface{}) ([]byte, error) { + if obj == nil { + return nil, nil + } + + switch v := obj.(type) { case proto.Message: r, err := proto.Marshal(v) if err != nil { @@ -114,20 +237,6 @@ func (s *serviceSet) dispatch(ctx context.Context, serviceName, methodName strin } } -func (s *serviceSet) resolve(service, method string) (Method, error) { - srv, ok := s.services[service] - if !ok { - return nil, status.Errorf(codes.Unimplemented, "service %v", service) - } - - mthd, ok := srv.Methods[method] - if !ok { - return nil, status.Errorf(codes.Unimplemented, "method %v", method) - } - - return mthd, nil -} - // convertCode maps stdlib go errors into grpc space. // // This is ripped from the grpc-go code base. diff --git a/cluster-autoscaler/vendor/github.com/containerd/ttrpc/stream.go b/cluster-autoscaler/vendor/github.com/containerd/ttrpc/stream.go new file mode 100644 index 000000000000..739a4c9675b7 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/containerd/ttrpc/stream.go @@ -0,0 +1,84 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package ttrpc + +import ( + "context" + "sync" +) + +type streamID uint32 + +type streamMessage struct { + header messageHeader + payload []byte +} + +type stream struct { + id streamID + sender sender + recv chan *streamMessage + + closeOnce sync.Once + recvErr error + recvClose chan struct{} +} + +func newStream(id streamID, send sender) *stream { + return &stream{ + id: id, + sender: send, + recv: make(chan *streamMessage, 1), + recvClose: make(chan struct{}), + } +} + +func (s *stream) closeWithError(err error) error { + s.closeOnce.Do(func() { + if err != nil { + s.recvErr = err + } else { + s.recvErr = ErrClosed + } + close(s.recvClose) + }) + return nil +} + +func (s *stream) send(mt messageType, flags uint8, b []byte) error { + return s.sender.send(uint32(s.id), mt, flags, b) +} + +func (s *stream) receive(ctx context.Context, msg *streamMessage) error { + select { + case <-s.recvClose: + return s.recvErr + default: + } + select { + case <-s.recvClose: + return s.recvErr + case s.recv <- msg: + return nil + case <-ctx.Done(): + return ctx.Err() + } +} + +type sender interface { + send(uint32, messageType, uint8, []byte) error +} diff --git a/cluster-autoscaler/vendor/github.com/containerd/ttrpc/stream_server.go b/cluster-autoscaler/vendor/github.com/containerd/ttrpc/stream_server.go new file mode 100644 index 000000000000..b6d1ba720a43 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/containerd/ttrpc/stream_server.go @@ -0,0 +1,22 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package ttrpc + +type StreamServer interface { + SendMsg(m interface{}) error + RecvMsg(m interface{}) error +} diff --git a/cluster-autoscaler/vendor/github.com/containerd/ttrpc/test.proto b/cluster-autoscaler/vendor/github.com/containerd/ttrpc/test.proto new file mode 100644 index 000000000000..0e114d556886 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/containerd/ttrpc/test.proto @@ -0,0 +1,16 @@ +syntax = "proto3"; + +package ttrpc; + +option go_package = "github.com/containerd/ttrpc/internal"; + +message TestPayload { + string foo = 1; + int64 deadline = 2; + string metadata = 3; +} + +message EchoPayload { + int64 seq = 1; + string msg = 2; +} diff --git a/cluster-autoscaler/vendor/github.com/containerd/ttrpc/types.go b/cluster-autoscaler/vendor/github.com/containerd/ttrpc/types.go deleted file mode 100644 index 9a1c19a7238d..000000000000 --- a/cluster-autoscaler/vendor/github.com/containerd/ttrpc/types.go +++ /dev/null @@ -1,63 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package ttrpc - -import ( - "fmt" - - spb "google.golang.org/genproto/googleapis/rpc/status" -) - -type Request struct { - Service string `protobuf:"bytes,1,opt,name=service,proto3"` - Method string `protobuf:"bytes,2,opt,name=method,proto3"` - Payload []byte `protobuf:"bytes,3,opt,name=payload,proto3"` - TimeoutNano int64 `protobuf:"varint,4,opt,name=timeout_nano,proto3"` - Metadata []*KeyValue `protobuf:"bytes,5,rep,name=metadata,proto3"` -} - -func (r *Request) Reset() { *r = Request{} } -func (r *Request) String() string { return fmt.Sprintf("%+#v", r) } -func (r *Request) ProtoMessage() {} - -type Response struct { - Status *spb.Status `protobuf:"bytes,1,opt,name=status,proto3"` - Payload []byte `protobuf:"bytes,2,opt,name=payload,proto3"` -} - -func (r *Response) Reset() { *r = Response{} } -func (r *Response) String() string { return fmt.Sprintf("%+#v", r) } -func (r *Response) ProtoMessage() {} - -type StringList struct { - List []string `protobuf:"bytes,1,rep,name=list,proto3"` -} - -func (r *StringList) Reset() { *r = StringList{} } -func (r *StringList) String() string { return fmt.Sprintf("%+#v", r) } -func (r *StringList) ProtoMessage() {} - -func makeStringList(item ...string) StringList { return StringList{List: item} } - -type KeyValue struct { - Key string `protobuf:"bytes,1,opt,name=key,proto3"` - Value string `protobuf:"bytes,2,opt,name=value,proto3"` -} - -func (m *KeyValue) Reset() { *m = KeyValue{} } -func (*KeyValue) ProtoMessage() {} -func (m *KeyValue) String() string { return fmt.Sprintf("%+#v", m) } diff --git a/cluster-autoscaler/vendor/github.com/containerd/ttrpc/unixcreds_linux.go b/cluster-autoscaler/vendor/github.com/containerd/ttrpc/unixcreds_linux.go index a59dad60cd55..c82c9f9d4c74 100644 --- a/cluster-autoscaler/vendor/github.com/containerd/ttrpc/unixcreds_linux.go +++ b/cluster-autoscaler/vendor/github.com/containerd/ttrpc/unixcreds_linux.go @@ -29,7 +29,7 @@ import ( type UnixCredentialsFunc func(*unix.Ucred) error -func (fn UnixCredentialsFunc) Handshake(ctx context.Context, conn net.Conn) (net.Conn, interface{}, error) { +func (fn UnixCredentialsFunc) Handshake(_ context.Context, conn net.Conn) (net.Conn, interface{}, error) { uc, err := requireUnixSocket(conn) if err != nil { return nil, nil, fmt.Errorf("ttrpc.UnixCredentialsFunc: require unix socket: %w", err) @@ -50,7 +50,7 @@ func (fn UnixCredentialsFunc) Handshake(ctx context.Context, conn net.Conn) (net } if ucredErr != nil { - return nil, nil, fmt.Errorf("ttrpc.UnixCredentialsFunc: failed to retrieve socket peer credentials: %w", err) + return nil, nil, fmt.Errorf("ttrpc.UnixCredentialsFunc: failed to retrieve socket peer credentials: %w", ucredErr) } if err := fn(ucred); err != nil { @@ -88,10 +88,6 @@ func UnixSocketRequireSameUser() UnixCredentialsFunc { return UnixSocketRequireUidGid(euid, egid) } -func requireRoot(ucred *unix.Ucred) error { - return requireUidGid(ucred, 0, 0) -} - func requireUidGid(ucred *unix.Ucred, uid, gid int) error { if (uid != -1 && uint32(uid) != ucred.Uid) || (gid != -1 && uint32(gid) != ucred.Gid) { return fmt.Errorf("ttrpc: invalid credentials: %v", syscall.EPERM) diff --git a/cluster-autoscaler/vendor/github.com/docker/distribution/reference/reference.go b/cluster-autoscaler/vendor/github.com/docker/distribution/reference/reference.go index 8c0c23b2fe1b..b7cd00b0d68e 100644 --- a/cluster-autoscaler/vendor/github.com/docker/distribution/reference/reference.go +++ b/cluster-autoscaler/vendor/github.com/docker/distribution/reference/reference.go @@ -3,13 +3,13 @@ // // Grammar // -// reference := name [ ":" tag ] [ "@" digest ] +// reference := name [ ":" tag ] [ "@" digest ] // name := [domain '/'] path-component ['/' path-component]* // domain := domain-component ['.' domain-component]* [':' port-number] // domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/ // port-number := /[0-9]+/ // path-component := alpha-numeric [separator alpha-numeric]* -// alpha-numeric := /[a-z0-9]+/ +// alpha-numeric := /[a-z0-9]+/ // separator := /[_.]|__|[-]*/ // // tag := /[\w][\w.-]{0,127}/ diff --git a/cluster-autoscaler/vendor/github.com/ghodss/yaml/.gitignore b/cluster-autoscaler/vendor/github.com/ghodss/yaml/.gitignore deleted file mode 100644 index e256a31e00a5..000000000000 --- a/cluster-autoscaler/vendor/github.com/ghodss/yaml/.gitignore +++ /dev/null @@ -1,20 +0,0 @@ -# OSX leaves these everywhere on SMB shares -._* - -# Eclipse files -.classpath -.project -.settings/** - -# Emacs save files -*~ - -# Vim-related files -[._]*.s[a-w][a-z] -[._]s[a-w][a-z] -*.un~ -Session.vim -.netrwhist - -# Go test binaries -*.test diff --git a/cluster-autoscaler/vendor/github.com/ghodss/yaml/.travis.yml b/cluster-autoscaler/vendor/github.com/ghodss/yaml/.travis.yml deleted file mode 100644 index 0e9d6edc010a..000000000000 --- a/cluster-autoscaler/vendor/github.com/ghodss/yaml/.travis.yml +++ /dev/null @@ -1,7 +0,0 @@ -language: go -go: - - 1.3 - - 1.4 -script: - - go test - - go build diff --git a/cluster-autoscaler/vendor/github.com/ghodss/yaml/README.md b/cluster-autoscaler/vendor/github.com/ghodss/yaml/README.md deleted file mode 100644 index 0200f75b4d12..000000000000 --- a/cluster-autoscaler/vendor/github.com/ghodss/yaml/README.md +++ /dev/null @@ -1,121 +0,0 @@ -# YAML marshaling and unmarshaling support for Go - -[![Build Status](https://travis-ci.org/ghodss/yaml.svg)](https://travis-ci.org/ghodss/yaml) - -## Introduction - -A wrapper around [go-yaml](https://github.com/go-yaml/yaml) designed to enable a better way of handling YAML when marshaling to and from structs. - -In short, this library first converts YAML to JSON using go-yaml and then uses `json.Marshal` and `json.Unmarshal` to convert to or from the struct. This means that it effectively reuses the JSON struct tags as well as the custom JSON methods `MarshalJSON` and `UnmarshalJSON` unlike go-yaml. For a detailed overview of the rationale behind this method, [see this blog post](http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/). - -## Compatibility - -This package uses [go-yaml](https://github.com/go-yaml/yaml) and therefore supports [everything go-yaml supports](https://github.com/go-yaml/yaml#compatibility). - -## Caveats - -**Caveat #1:** When using `yaml.Marshal` and `yaml.Unmarshal`, binary data should NOT be preceded with the `!!binary` YAML tag. If you do, go-yaml will convert the binary data from base64 to native binary data, which is not compatible with JSON. You can still use binary in your YAML files though - just store them without the `!!binary` tag and decode the base64 in your code (e.g. in the custom JSON methods `MarshalJSON` and `UnmarshalJSON`). This also has the benefit that your YAML and your JSON binary data will be decoded exactly the same way. As an example: - -``` -BAD: - exampleKey: !!binary gIGC - -GOOD: - exampleKey: gIGC -... and decode the base64 data in your code. -``` - -**Caveat #2:** When using `YAMLToJSON` directly, maps with keys that are maps will result in an error since this is not supported by JSON. This error will occur in `Unmarshal` as well since you can't unmarshal map keys anyways since struct fields can't be keys. - -## Installation and usage - -To install, run: - -``` -$ go get github.com/ghodss/yaml -``` - -And import using: - -``` -import "github.com/ghodss/yaml" -``` - -Usage is very similar to the JSON library: - -```go -package main - -import ( - "fmt" - - "github.com/ghodss/yaml" -) - -type Person struct { - Name string `json:"name"` // Affects YAML field names too. - Age int `json:"age"` -} - -func main() { - // Marshal a Person struct to YAML. - p := Person{"John", 30} - y, err := yaml.Marshal(p) - if err != nil { - fmt.Printf("err: %v\n", err) - return - } - fmt.Println(string(y)) - /* Output: - age: 30 - name: John - */ - - // Unmarshal the YAML back into a Person struct. - var p2 Person - err = yaml.Unmarshal(y, &p2) - if err != nil { - fmt.Printf("err: %v\n", err) - return - } - fmt.Println(p2) - /* Output: - {John 30} - */ -} -``` - -`yaml.YAMLToJSON` and `yaml.JSONToYAML` methods are also available: - -```go -package main - -import ( - "fmt" - - "github.com/ghodss/yaml" -) - -func main() { - j := []byte(`{"name": "John", "age": 30}`) - y, err := yaml.JSONToYAML(j) - if err != nil { - fmt.Printf("err: %v\n", err) - return - } - fmt.Println(string(y)) - /* Output: - name: John - age: 30 - */ - j2, err := yaml.YAMLToJSON(y) - if err != nil { - fmt.Printf("err: %v\n", err) - return - } - fmt.Println(string(j2)) - /* Output: - {"age":30,"name":"John"} - */ -} -``` diff --git a/cluster-autoscaler/vendor/github.com/ghodss/yaml/fields.go b/cluster-autoscaler/vendor/github.com/ghodss/yaml/fields.go deleted file mode 100644 index 58600740266c..000000000000 --- a/cluster-autoscaler/vendor/github.com/ghodss/yaml/fields.go +++ /dev/null @@ -1,501 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -package yaml - -import ( - "bytes" - "encoding" - "encoding/json" - "reflect" - "sort" - "strings" - "sync" - "unicode" - "unicode/utf8" -) - -// indirect walks down v allocating pointers as needed, -// until it gets to a non-pointer. -// if it encounters an Unmarshaler, indirect stops and returns that. -// if decodingNull is true, indirect stops at the last pointer so it can be set to nil. -func indirect(v reflect.Value, decodingNull bool) (json.Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { - // If v is a named type and is addressable, - // start with its address, so that if the type has pointer methods, - // we find them. - if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { - v = v.Addr() - } - for { - // Load value from interface, but only if the result will be - // usefully addressable. - if v.Kind() == reflect.Interface && !v.IsNil() { - e := v.Elem() - if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { - v = e - continue - } - } - - if v.Kind() != reflect.Ptr { - break - } - - if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() { - break - } - if v.IsNil() { - if v.CanSet() { - v.Set(reflect.New(v.Type().Elem())) - } else { - v = reflect.New(v.Type().Elem()) - } - } - if v.Type().NumMethod() > 0 { - if u, ok := v.Interface().(json.Unmarshaler); ok { - return u, nil, reflect.Value{} - } - if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { - return nil, u, reflect.Value{} - } - } - v = v.Elem() - } - return nil, nil, v -} - -// A field represents a single field found in a struct. -type field struct { - name string - nameBytes []byte // []byte(name) - equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent - - tag bool - index []int - typ reflect.Type - omitEmpty bool - quoted bool -} - -func fillField(f field) field { - f.nameBytes = []byte(f.name) - f.equalFold = foldFunc(f.nameBytes) - return f -} - -// byName sorts field by name, breaking ties with depth, -// then breaking ties with "name came from json tag", then -// breaking ties with index sequence. -type byName []field - -func (x byName) Len() int { return len(x) } - -func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } - -func (x byName) Less(i, j int) bool { - if x[i].name != x[j].name { - return x[i].name < x[j].name - } - if len(x[i].index) != len(x[j].index) { - return len(x[i].index) < len(x[j].index) - } - if x[i].tag != x[j].tag { - return x[i].tag - } - return byIndex(x).Less(i, j) -} - -// byIndex sorts field by index sequence. -type byIndex []field - -func (x byIndex) Len() int { return len(x) } - -func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } - -func (x byIndex) Less(i, j int) bool { - for k, xik := range x[i].index { - if k >= len(x[j].index) { - return false - } - if xik != x[j].index[k] { - return xik < x[j].index[k] - } - } - return len(x[i].index) < len(x[j].index) -} - -// typeFields returns a list of fields that JSON should recognize for the given type. -// The algorithm is breadth-first search over the set of structs to include - the top struct -// and then any reachable anonymous structs. -func typeFields(t reflect.Type) []field { - // Anonymous fields to explore at the current level and the next. - current := []field{} - next := []field{{typ: t}} - - // Count of queued names for current level and the next. - count := map[reflect.Type]int{} - nextCount := map[reflect.Type]int{} - - // Types already visited at an earlier level. - visited := map[reflect.Type]bool{} - - // Fields found. - var fields []field - - for len(next) > 0 { - current, next = next, current[:0] - count, nextCount = nextCount, map[reflect.Type]int{} - - for _, f := range current { - if visited[f.typ] { - continue - } - visited[f.typ] = true - - // Scan f.typ for fields to include. - for i := 0; i < f.typ.NumField(); i++ { - sf := f.typ.Field(i) - if sf.PkgPath != "" { // unexported - continue - } - tag := sf.Tag.Get("json") - if tag == "-" { - continue - } - name, opts := parseTag(tag) - if !isValidTag(name) { - name = "" - } - index := make([]int, len(f.index)+1) - copy(index, f.index) - index[len(f.index)] = i - - ft := sf.Type - if ft.Name() == "" && ft.Kind() == reflect.Ptr { - // Follow pointer. - ft = ft.Elem() - } - - // Record found field and index sequence. - if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { - tagged := name != "" - if name == "" { - name = sf.Name - } - fields = append(fields, fillField(field{ - name: name, - tag: tagged, - index: index, - typ: ft, - omitEmpty: opts.Contains("omitempty"), - quoted: opts.Contains("string"), - })) - if count[f.typ] > 1 { - // If there were multiple instances, add a second, - // so that the annihilation code will see a duplicate. - // It only cares about the distinction between 1 or 2, - // so don't bother generating any more copies. - fields = append(fields, fields[len(fields)-1]) - } - continue - } - - // Record new anonymous struct to explore in next round. - nextCount[ft]++ - if nextCount[ft] == 1 { - next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft})) - } - } - } - } - - sort.Sort(byName(fields)) - - // Delete all fields that are hidden by the Go rules for embedded fields, - // except that fields with JSON tags are promoted. - - // The fields are sorted in primary order of name, secondary order - // of field index length. Loop over names; for each name, delete - // hidden fields by choosing the one dominant field that survives. - out := fields[:0] - for advance, i := 0, 0; i < len(fields); i += advance { - // One iteration per name. - // Find the sequence of fields with the name of this first field. - fi := fields[i] - name := fi.name - for advance = 1; i+advance < len(fields); advance++ { - fj := fields[i+advance] - if fj.name != name { - break - } - } - if advance == 1 { // Only one field with this name - out = append(out, fi) - continue - } - dominant, ok := dominantField(fields[i : i+advance]) - if ok { - out = append(out, dominant) - } - } - - fields = out - sort.Sort(byIndex(fields)) - - return fields -} - -// dominantField looks through the fields, all of which are known to -// have the same name, to find the single field that dominates the -// others using Go's embedding rules, modified by the presence of -// JSON tags. If there are multiple top-level fields, the boolean -// will be false: This condition is an error in Go and we skip all -// the fields. -func dominantField(fields []field) (field, bool) { - // The fields are sorted in increasing index-length order. The winner - // must therefore be one with the shortest index length. Drop all - // longer entries, which is easy: just truncate the slice. - length := len(fields[0].index) - tagged := -1 // Index of first tagged field. - for i, f := range fields { - if len(f.index) > length { - fields = fields[:i] - break - } - if f.tag { - if tagged >= 0 { - // Multiple tagged fields at the same level: conflict. - // Return no field. - return field{}, false - } - tagged = i - } - } - if tagged >= 0 { - return fields[tagged], true - } - // All remaining fields have the same length. If there's more than one, - // we have a conflict (two fields named "X" at the same level) and we - // return no field. - if len(fields) > 1 { - return field{}, false - } - return fields[0], true -} - -var fieldCache struct { - sync.RWMutex - m map[reflect.Type][]field -} - -// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. -func cachedTypeFields(t reflect.Type) []field { - fieldCache.RLock() - f := fieldCache.m[t] - fieldCache.RUnlock() - if f != nil { - return f - } - - // Compute fields without lock. - // Might duplicate effort but won't hold other computations back. - f = typeFields(t) - if f == nil { - f = []field{} - } - - fieldCache.Lock() - if fieldCache.m == nil { - fieldCache.m = map[reflect.Type][]field{} - } - fieldCache.m[t] = f - fieldCache.Unlock() - return f -} - -func isValidTag(s string) bool { - if s == "" { - return false - } - for _, c := range s { - switch { - case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c): - // Backslash and quote chars are reserved, but - // otherwise any punctuation chars are allowed - // in a tag name. - default: - if !unicode.IsLetter(c) && !unicode.IsDigit(c) { - return false - } - } - } - return true -} - -const ( - caseMask = ^byte(0x20) // Mask to ignore case in ASCII. - kelvin = '\u212a' - smallLongEss = '\u017f' -) - -// foldFunc returns one of four different case folding equivalence -// functions, from most general (and slow) to fastest: -// -// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8 -// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S') -// 3) asciiEqualFold, no special, but includes non-letters (including _) -// 4) simpleLetterEqualFold, no specials, no non-letters. -// -// The letters S and K are special because they map to 3 runes, not just 2: -// * S maps to s and to U+017F 'ſ' Latin small letter long s -// * k maps to K and to U+212A 'K' Kelvin sign -// See http://play.golang.org/p/tTxjOc0OGo -// -// The returned function is specialized for matching against s and -// should only be given s. It's not curried for performance reasons. -func foldFunc(s []byte) func(s, t []byte) bool { - nonLetter := false - special := false // special letter - for _, b := range s { - if b >= utf8.RuneSelf { - return bytes.EqualFold - } - upper := b & caseMask - if upper < 'A' || upper > 'Z' { - nonLetter = true - } else if upper == 'K' || upper == 'S' { - // See above for why these letters are special. - special = true - } - } - if special { - return equalFoldRight - } - if nonLetter { - return asciiEqualFold - } - return simpleLetterEqualFold -} - -// equalFoldRight is a specialization of bytes.EqualFold when s is -// known to be all ASCII (including punctuation), but contains an 's', -// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t. -// See comments on foldFunc. -func equalFoldRight(s, t []byte) bool { - for _, sb := range s { - if len(t) == 0 { - return false - } - tb := t[0] - if tb < utf8.RuneSelf { - if sb != tb { - sbUpper := sb & caseMask - if 'A' <= sbUpper && sbUpper <= 'Z' { - if sbUpper != tb&caseMask { - return false - } - } else { - return false - } - } - t = t[1:] - continue - } - // sb is ASCII and t is not. t must be either kelvin - // sign or long s; sb must be s, S, k, or K. - tr, size := utf8.DecodeRune(t) - switch sb { - case 's', 'S': - if tr != smallLongEss { - return false - } - case 'k', 'K': - if tr != kelvin { - return false - } - default: - return false - } - t = t[size:] - - } - if len(t) > 0 { - return false - } - return true -} - -// asciiEqualFold is a specialization of bytes.EqualFold for use when -// s is all ASCII (but may contain non-letters) and contains no -// special-folding letters. -// See comments on foldFunc. -func asciiEqualFold(s, t []byte) bool { - if len(s) != len(t) { - return false - } - for i, sb := range s { - tb := t[i] - if sb == tb { - continue - } - if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') { - if sb&caseMask != tb&caseMask { - return false - } - } else { - return false - } - } - return true -} - -// simpleLetterEqualFold is a specialization of bytes.EqualFold for -// use when s is all ASCII letters (no underscores, etc) and also -// doesn't contain 'k', 'K', 's', or 'S'. -// See comments on foldFunc. -func simpleLetterEqualFold(s, t []byte) bool { - if len(s) != len(t) { - return false - } - for i, b := range s { - if b&caseMask != t[i]&caseMask { - return false - } - } - return true -} - -// tagOptions is the string following a comma in a struct field's "json" -// tag, or the empty string. It does not include the leading comma. -type tagOptions string - -// parseTag splits a struct field's json tag into its name and -// comma-separated options. -func parseTag(tag string) (string, tagOptions) { - if idx := strings.Index(tag, ","); idx != -1 { - return tag[:idx], tagOptions(tag[idx+1:]) - } - return tag, tagOptions("") -} - -// Contains reports whether a comma-separated list of options -// contains a particular substr flag. substr must be surrounded by a -// string boundary or commas. -func (o tagOptions) Contains(optionName string) bool { - if len(o) == 0 { - return false - } - s := string(o) - for s != "" { - var next string - i := strings.Index(s, ",") - if i >= 0 { - s, next = s[:i], s[i+1:] - } - if s == optionName { - return true - } - s = next - } - return false -} diff --git a/cluster-autoscaler/vendor/github.com/ghodss/yaml/yaml.go b/cluster-autoscaler/vendor/github.com/ghodss/yaml/yaml.go deleted file mode 100644 index 4fb4054a8b74..000000000000 --- a/cluster-autoscaler/vendor/github.com/ghodss/yaml/yaml.go +++ /dev/null @@ -1,277 +0,0 @@ -package yaml - -import ( - "bytes" - "encoding/json" - "fmt" - "reflect" - "strconv" - - "gopkg.in/yaml.v2" -) - -// Marshals the object into JSON then converts JSON to YAML and returns the -// YAML. -func Marshal(o interface{}) ([]byte, error) { - j, err := json.Marshal(o) - if err != nil { - return nil, fmt.Errorf("error marshaling into JSON: %v", err) - } - - y, err := JSONToYAML(j) - if err != nil { - return nil, fmt.Errorf("error converting JSON to YAML: %v", err) - } - - return y, nil -} - -// Converts YAML to JSON then uses JSON to unmarshal into an object. -func Unmarshal(y []byte, o interface{}) error { - vo := reflect.ValueOf(o) - j, err := yamlToJSON(y, &vo) - if err != nil { - return fmt.Errorf("error converting YAML to JSON: %v", err) - } - - err = json.Unmarshal(j, o) - if err != nil { - return fmt.Errorf("error unmarshaling JSON: %v", err) - } - - return nil -} - -// Convert JSON to YAML. -func JSONToYAML(j []byte) ([]byte, error) { - // Convert the JSON to an object. - var jsonObj interface{} - // We are using yaml.Unmarshal here (instead of json.Unmarshal) because the - // Go JSON library doesn't try to pick the right number type (int, float, - // etc.) when unmarshalling to interface{}, it just picks float64 - // universally. go-yaml does go through the effort of picking the right - // number type, so we can preserve number type throughout this process. - err := yaml.Unmarshal(j, &jsonObj) - if err != nil { - return nil, err - } - - // Marshal this object into YAML. - return yaml.Marshal(jsonObj) -} - -// Convert YAML to JSON. Since JSON is a subset of YAML, passing JSON through -// this method should be a no-op. -// -// Things YAML can do that are not supported by JSON: -// * In YAML you can have binary and null keys in your maps. These are invalid -// in JSON. (int and float keys are converted to strings.) -// * Binary data in YAML with the !!binary tag is not supported. If you want to -// use binary data with this library, encode the data as base64 as usual but do -// not use the !!binary tag in your YAML. This will ensure the original base64 -// encoded data makes it all the way through to the JSON. -func YAMLToJSON(y []byte) ([]byte, error) { - return yamlToJSON(y, nil) -} - -func yamlToJSON(y []byte, jsonTarget *reflect.Value) ([]byte, error) { - // Convert the YAML to an object. - var yamlObj interface{} - err := yaml.Unmarshal(y, &yamlObj) - if err != nil { - return nil, err - } - - // YAML objects are not completely compatible with JSON objects (e.g. you - // can have non-string keys in YAML). So, convert the YAML-compatible object - // to a JSON-compatible object, failing with an error if irrecoverable - // incompatibilties happen along the way. - jsonObj, err := convertToJSONableObject(yamlObj, jsonTarget) - if err != nil { - return nil, err - } - - // Convert this object to JSON and return the data. - return json.Marshal(jsonObj) -} - -func convertToJSONableObject(yamlObj interface{}, jsonTarget *reflect.Value) (interface{}, error) { - var err error - - // Resolve jsonTarget to a concrete value (i.e. not a pointer or an - // interface). We pass decodingNull as false because we're not actually - // decoding into the value, we're just checking if the ultimate target is a - // string. - if jsonTarget != nil { - ju, tu, pv := indirect(*jsonTarget, false) - // We have a JSON or Text Umarshaler at this level, so we can't be trying - // to decode into a string. - if ju != nil || tu != nil { - jsonTarget = nil - } else { - jsonTarget = &pv - } - } - - // If yamlObj is a number or a boolean, check if jsonTarget is a string - - // if so, coerce. Else return normal. - // If yamlObj is a map or array, find the field that each key is - // unmarshaling to, and when you recurse pass the reflect.Value for that - // field back into this function. - switch typedYAMLObj := yamlObj.(type) { - case map[interface{}]interface{}: - // JSON does not support arbitrary keys in a map, so we must convert - // these keys to strings. - // - // From my reading of go-yaml v2 (specifically the resolve function), - // keys can only have the types string, int, int64, float64, binary - // (unsupported), or null (unsupported). - strMap := make(map[string]interface{}) - for k, v := range typedYAMLObj { - // Resolve the key to a string first. - var keyString string - switch typedKey := k.(type) { - case string: - keyString = typedKey - case int: - keyString = strconv.Itoa(typedKey) - case int64: - // go-yaml will only return an int64 as a key if the system - // architecture is 32-bit and the key's value is between 32-bit - // and 64-bit. Otherwise the key type will simply be int. - keyString = strconv.FormatInt(typedKey, 10) - case float64: - // Stolen from go-yaml to use the same conversion to string as - // the go-yaml library uses to convert float to string when - // Marshaling. - s := strconv.FormatFloat(typedKey, 'g', -1, 32) - switch s { - case "+Inf": - s = ".inf" - case "-Inf": - s = "-.inf" - case "NaN": - s = ".nan" - } - keyString = s - case bool: - if typedKey { - keyString = "true" - } else { - keyString = "false" - } - default: - return nil, fmt.Errorf("Unsupported map key of type: %s, key: %+#v, value: %+#v", - reflect.TypeOf(k), k, v) - } - - // jsonTarget should be a struct or a map. If it's a struct, find - // the field it's going to map to and pass its reflect.Value. If - // it's a map, find the element type of the map and pass the - // reflect.Value created from that type. If it's neither, just pass - // nil - JSON conversion will error for us if it's a real issue. - if jsonTarget != nil { - t := *jsonTarget - if t.Kind() == reflect.Struct { - keyBytes := []byte(keyString) - // Find the field that the JSON library would use. - var f *field - fields := cachedTypeFields(t.Type()) - for i := range fields { - ff := &fields[i] - if bytes.Equal(ff.nameBytes, keyBytes) { - f = ff - break - } - // Do case-insensitive comparison. - if f == nil && ff.equalFold(ff.nameBytes, keyBytes) { - f = ff - } - } - if f != nil { - // Find the reflect.Value of the most preferential - // struct field. - jtf := t.Field(f.index[0]) - strMap[keyString], err = convertToJSONableObject(v, &jtf) - if err != nil { - return nil, err - } - continue - } - } else if t.Kind() == reflect.Map { - // Create a zero value of the map's element type to use as - // the JSON target. - jtv := reflect.Zero(t.Type().Elem()) - strMap[keyString], err = convertToJSONableObject(v, &jtv) - if err != nil { - return nil, err - } - continue - } - } - strMap[keyString], err = convertToJSONableObject(v, nil) - if err != nil { - return nil, err - } - } - return strMap, nil - case []interface{}: - // We need to recurse into arrays in case there are any - // map[interface{}]interface{}'s inside and to convert any - // numbers to strings. - - // If jsonTarget is a slice (which it really should be), find the - // thing it's going to map to. If it's not a slice, just pass nil - // - JSON conversion will error for us if it's a real issue. - var jsonSliceElemValue *reflect.Value - if jsonTarget != nil { - t := *jsonTarget - if t.Kind() == reflect.Slice { - // By default slices point to nil, but we need a reflect.Value - // pointing to a value of the slice type, so we create one here. - ev := reflect.Indirect(reflect.New(t.Type().Elem())) - jsonSliceElemValue = &ev - } - } - - // Make and use a new array. - arr := make([]interface{}, len(typedYAMLObj)) - for i, v := range typedYAMLObj { - arr[i], err = convertToJSONableObject(v, jsonSliceElemValue) - if err != nil { - return nil, err - } - } - return arr, nil - default: - // If the target type is a string and the YAML type is a number, - // convert the YAML type to a string. - if jsonTarget != nil && (*jsonTarget).Kind() == reflect.String { - // Based on my reading of go-yaml, it may return int, int64, - // float64, or uint64. - var s string - switch typedVal := typedYAMLObj.(type) { - case int: - s = strconv.FormatInt(int64(typedVal), 10) - case int64: - s = strconv.FormatInt(typedVal, 10) - case float64: - s = strconv.FormatFloat(typedVal, 'g', -1, 32) - case uint64: - s = strconv.FormatUint(typedVal, 10) - case bool: - if typedVal { - s = "true" - } else { - s = "false" - } - } - if len(s) > 0 { - yamlObj = interface{}(s) - } - } - return yamlObj, nil - } - - return nil, nil -} diff --git a/cluster-autoscaler/vendor/github.com/gofrs/uuid/.travis.yml b/cluster-autoscaler/vendor/github.com/gofrs/uuid/.travis.yml deleted file mode 100644 index 0783aaa9c4cf..000000000000 --- a/cluster-autoscaler/vendor/github.com/gofrs/uuid/.travis.yml +++ /dev/null @@ -1,22 +0,0 @@ -language: go -sudo: false -go: - - 1.7.x - - 1.8.x - - 1.9.x - - 1.10.x - - 1.11.x - - 1.12.x - - tip -matrix: - allow_failures: - - go: tip - fast_finish: true -before_install: - - go get golang.org/x/tools/cmd/cover -script: - - go test ./... -race -coverprofile=coverage.txt -covermode=atomic -after_success: - - bash <(curl -s https://codecov.io/bash) -notifications: - email: false diff --git a/cluster-autoscaler/vendor/github.com/gofrs/uuid/README.md b/cluster-autoscaler/vendor/github.com/gofrs/uuid/README.md index 2685a832e384..4f73bec82c09 100644 --- a/cluster-autoscaler/vendor/github.com/gofrs/uuid/README.md +++ b/cluster-autoscaler/vendor/github.com/gofrs/uuid/README.md @@ -16,6 +16,14 @@ This package supports the following UUID versions: * Version 4, based on random numbers (RFC-4122) * Version 5, based on SHA-1 hashing of a named value (RFC-4122) +This package also supports experimental Universally Unique Identifier implementations based on a +[draft RFC](https://www.ietf.org/archive/id/draft-peabody-dispatch-new-uuid-format-04.html) that updates RFC-4122 +* Version 6, a k-sortable id based on timestamp, and field-compatible with v1 (draft-peabody-dispatch-new-uuid-format, RFC-4122) +* Version 7, a k-sortable id based on timestamp (draft-peabody-dispatch-new-uuid-format, RFC-4122) + +The v6 and v7 IDs are **not** considered a part of the stable API, and may be subject to behavior or API changes as part of minor releases +to this package. They will be updated as the draft RFC changes, and will become stable if and when the draft RFC is accepted. + ## Project History This project was originally forked from the @@ -106,3 +114,4 @@ func main() { * [RFC-4122](https://tools.ietf.org/html/rfc4122) * [DCE 1.1: Authentication and Security Services](http://pubs.opengroup.org/onlinepubs/9696989899/chap5.htm#tagcjh_08_02_01_01) +* [New UUID Formats RFC Draft (Peabody) Rev 04](https://www.ietf.org/archive/id/draft-peabody-dispatch-new-uuid-format-04.html#) diff --git a/cluster-autoscaler/vendor/github.com/gofrs/uuid/codec.go b/cluster-autoscaler/vendor/github.com/gofrs/uuid/codec.go index e3014c68c663..665026414c30 100644 --- a/cluster-autoscaler/vendor/github.com/gofrs/uuid/codec.go +++ b/cluster-autoscaler/vendor/github.com/gofrs/uuid/codec.go @@ -22,8 +22,7 @@ package uuid import ( - "bytes" - "encoding/hex" + "errors" "fmt" ) @@ -45,11 +44,77 @@ func FromBytesOrNil(input []byte) UUID { return uuid } +var errInvalidFormat = errors.New("uuid: invalid UUID format") + +func fromHexChar(c byte) byte { + switch { + case '0' <= c && c <= '9': + return c - '0' + case 'a' <= c && c <= 'f': + return c - 'a' + 10 + case 'A' <= c && c <= 'F': + return c - 'A' + 10 + } + return 255 +} + +// Parse parses the UUID stored in the string text. Parsing and supported +// formats are the same as UnmarshalText. +func (u *UUID) Parse(s string) error { + switch len(s) { + case 32: // hash + case 36: // canonical + case 34, 38: + if s[0] != '{' || s[len(s)-1] != '}' { + return fmt.Errorf("uuid: incorrect UUID format in string %q", s) + } + s = s[1 : len(s)-1] + case 41, 45: + if s[:9] != "urn:uuid:" { + return fmt.Errorf("uuid: incorrect UUID format in string %q", s[:9]) + } + s = s[9:] + default: + return fmt.Errorf("uuid: incorrect UUID length %d in string %q", len(s), s) + } + // canonical + if len(s) == 36 { + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return fmt.Errorf("uuid: incorrect UUID format in string %q", s) + } + for i, x := range [16]byte{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34, + } { + v1 := fromHexChar(s[x]) + v2 := fromHexChar(s[x+1]) + if v1|v2 == 255 { + return errInvalidFormat + } + u[i] = (v1 << 4) | v2 + } + return nil + } + // hash like + for i := 0; i < 32; i += 2 { + v1 := fromHexChar(s[i]) + v2 := fromHexChar(s[i+1]) + if v1|v2 == 255 { + return errInvalidFormat + } + u[i/2] = (v1 << 4) | v2 + } + return nil +} + // FromString returns a UUID parsed from the input string. // Input is expected in a form accepted by UnmarshalText. -func FromString(input string) (UUID, error) { - u := UUID{} - err := u.UnmarshalText([]byte(input)) +func FromString(text string) (UUID, error) { + var u UUID + err := u.Parse(text) return u, err } @@ -66,133 +131,90 @@ func FromStringOrNil(input string) UUID { // MarshalText implements the encoding.TextMarshaler interface. // The encoding is the same as returned by the String() method. func (u UUID) MarshalText() ([]byte, error) { - return []byte(u.String()), nil + var buf [36]byte + encodeCanonical(buf[:], u) + return buf[:], nil } // UnmarshalText implements the encoding.TextUnmarshaler interface. // Following formats are supported: // -// "6ba7b810-9dad-11d1-80b4-00c04fd430c8", -// "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}", -// "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8" -// "6ba7b8109dad11d180b400c04fd430c8" -// "{6ba7b8109dad11d180b400c04fd430c8}", -// "urn:uuid:6ba7b8109dad11d180b400c04fd430c8" +// "6ba7b810-9dad-11d1-80b4-00c04fd430c8", +// "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}", +// "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8" +// "6ba7b8109dad11d180b400c04fd430c8" +// "{6ba7b8109dad11d180b400c04fd430c8}", +// "urn:uuid:6ba7b8109dad11d180b400c04fd430c8" // // ABNF for supported UUID text representation follows: // -// URN := 'urn' -// UUID-NID := 'uuid' -// -// hexdig := '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' | -// 'a' | 'b' | 'c' | 'd' | 'e' | 'f' | -// 'A' | 'B' | 'C' | 'D' | 'E' | 'F' +// URN := 'urn' +// UUID-NID := 'uuid' // -// hexoct := hexdig hexdig -// 2hexoct := hexoct hexoct -// 4hexoct := 2hexoct 2hexoct -// 6hexoct := 4hexoct 2hexoct -// 12hexoct := 6hexoct 6hexoct +// hexdig := '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' | +// 'a' | 'b' | 'c' | 'd' | 'e' | 'f' | +// 'A' | 'B' | 'C' | 'D' | 'E' | 'F' // -// hashlike := 12hexoct -// canonical := 4hexoct '-' 2hexoct '-' 2hexoct '-' 6hexoct +// hexoct := hexdig hexdig +// 2hexoct := hexoct hexoct +// 4hexoct := 2hexoct 2hexoct +// 6hexoct := 4hexoct 2hexoct +// 12hexoct := 6hexoct 6hexoct // -// plain := canonical | hashlike -// uuid := canonical | hashlike | braced | urn +// hashlike := 12hexoct +// canonical := 4hexoct '-' 2hexoct '-' 2hexoct '-' 6hexoct // -// braced := '{' plain '}' | '{' hashlike '}' -// urn := URN ':' UUID-NID ':' plain +// plain := canonical | hashlike +// uuid := canonical | hashlike | braced | urn // -func (u *UUID) UnmarshalText(text []byte) error { - switch len(text) { - case 32: - return u.decodeHashLike(text) +// braced := '{' plain '}' | '{' hashlike '}' +// urn := URN ':' UUID-NID ':' plain +func (u *UUID) UnmarshalText(b []byte) error { + switch len(b) { + case 32: // hash + case 36: // canonical case 34, 38: - return u.decodeBraced(text) - case 36: - return u.decodeCanonical(text) + if b[0] != '{' || b[len(b)-1] != '}' { + return fmt.Errorf("uuid: incorrect UUID format in string %q", b) + } + b = b[1 : len(b)-1] case 41, 45: - return u.decodeURN(text) + if string(b[:9]) != "urn:uuid:" { + return fmt.Errorf("uuid: incorrect UUID format in string %q", b[:9]) + } + b = b[9:] default: - return fmt.Errorf("uuid: incorrect UUID length %d in string %q", len(text), text) - } -} - -// decodeCanonical decodes UUID strings that are formatted as defined in RFC-4122 (section 3): -// "6ba7b810-9dad-11d1-80b4-00c04fd430c8". -func (u *UUID) decodeCanonical(t []byte) error { - if t[8] != '-' || t[13] != '-' || t[18] != '-' || t[23] != '-' { - return fmt.Errorf("uuid: incorrect UUID format in string %q", t) + return fmt.Errorf("uuid: incorrect UUID length %d in string %q", len(b), b) } - - src := t - dst := u[:] - - for i, byteGroup := range byteGroups { - if i > 0 { - src = src[1:] // skip dash + if len(b) == 36 { + if b[8] != '-' || b[13] != '-' || b[18] != '-' || b[23] != '-' { + return fmt.Errorf("uuid: incorrect UUID format in string %q", b) } - _, err := hex.Decode(dst[:byteGroup/2], src[:byteGroup]) - if err != nil { - return err + for i, x := range [16]byte{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34, + } { + v1 := fromHexChar(b[x]) + v2 := fromHexChar(b[x+1]) + if v1|v2 == 255 { + return errInvalidFormat + } + u[i] = (v1 << 4) | v2 } - src = src[byteGroup:] - dst = dst[byteGroup/2:] - } - - return nil -} - -// decodeHashLike decodes UUID strings that are using the following format: -// "6ba7b8109dad11d180b400c04fd430c8". -func (u *UUID) decodeHashLike(t []byte) error { - src := t[:] - dst := u[:] - - _, err := hex.Decode(dst, src) - return err -} - -// decodeBraced decodes UUID strings that are using the following formats: -// "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}" -// "{6ba7b8109dad11d180b400c04fd430c8}". -func (u *UUID) decodeBraced(t []byte) error { - l := len(t) - - if t[0] != '{' || t[l-1] != '}' { - return fmt.Errorf("uuid: incorrect UUID format in string %q", t) + return nil } - - return u.decodePlain(t[1 : l-1]) -} - -// decodeURN decodes UUID strings that are using the following formats: -// "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8" -// "urn:uuid:6ba7b8109dad11d180b400c04fd430c8". -func (u *UUID) decodeURN(t []byte) error { - total := len(t) - - urnUUIDPrefix := t[:9] - - if !bytes.Equal(urnUUIDPrefix, urnPrefix) { - return fmt.Errorf("uuid: incorrect UUID format in string %q", t) - } - - return u.decodePlain(t[9:total]) -} - -// decodePlain decodes UUID strings that are using the following formats: -// "6ba7b810-9dad-11d1-80b4-00c04fd430c8" or in hash-like format -// "6ba7b8109dad11d180b400c04fd430c8". -func (u *UUID) decodePlain(t []byte) error { - switch len(t) { - case 32: - return u.decodeHashLike(t) - case 36: - return u.decodeCanonical(t) - default: - return fmt.Errorf("uuid: incorrect UUID length %d in string %q", len(t), t) + for i := 0; i < 32; i += 2 { + v1 := fromHexChar(b[i]) + v2 := fromHexChar(b[i+1]) + if v1|v2 == 255 { + return errInvalidFormat + } + u[i/2] = (v1 << 4) | v2 } + return nil } // MarshalBinary implements the encoding.BinaryMarshaler interface. diff --git a/cluster-autoscaler/vendor/github.com/gofrs/uuid/fuzz.go b/cluster-autoscaler/vendor/github.com/gofrs/uuid/fuzz.go index afaefbc8e399..ccf8d4ca2960 100644 --- a/cluster-autoscaler/vendor/github.com/gofrs/uuid/fuzz.go +++ b/cluster-autoscaler/vendor/github.com/gofrs/uuid/fuzz.go @@ -19,6 +19,7 @@ // OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION // WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +//go:build gofuzz // +build gofuzz package uuid @@ -27,15 +28,15 @@ package uuid // // To run: // -// $ go get github.com/dvyukov/go-fuzz/... -// $ cd $GOPATH/src/github.com/gofrs/uuid -// $ go-fuzz-build github.com/gofrs/uuid -// $ go-fuzz -bin=uuid-fuzz.zip -workdir=./testdata +// $ go get github.com/dvyukov/go-fuzz/... +// $ cd $GOPATH/src/github.com/gofrs/uuid +// $ go-fuzz-build github.com/gofrs/uuid +// $ go-fuzz -bin=uuid-fuzz.zip -workdir=./testdata // // If you make significant changes to FromString / UnmarshalText and add // new cases to fromStringTests (in codec_test.go), please run // -// $ go test -seed_fuzz_corpus +// $ go test -seed_fuzz_corpus // // to seed the corpus with the new interesting inputs, then run the fuzzer. func Fuzz(data []byte) int { diff --git a/cluster-autoscaler/vendor/github.com/gofrs/uuid/generator.go b/cluster-autoscaler/vendor/github.com/gofrs/uuid/generator.go index 2783d9e7787f..44be9e15854e 100644 --- a/cluster-autoscaler/vendor/github.com/gofrs/uuid/generator.go +++ b/cluster-autoscaler/vendor/github.com/gofrs/uuid/generator.go @@ -38,7 +38,8 @@ import ( // UUID epoch (October 15, 1582) and Unix epoch (January 1, 1970). const epochStart = 122192928000000000 -type epochFunc func() time.Time +// EpochFunc is the function type used to provide the current time. +type EpochFunc func() time.Time // HWAddrFunc is the function type used to provide hardware (MAC) addresses. type HWAddrFunc func() (net.HardwareAddr, error) @@ -66,12 +67,39 @@ func NewV5(ns UUID, name string) UUID { return DefaultGenerator.NewV5(ns, name) } +// NewV6 returns a k-sortable UUID based on a timestamp and 48 bits of +// pseudorandom data. The timestamp in a V6 UUID is the same as V1, with the bit +// order being adjusted to allow the UUID to be k-sortable. +// +// This is implemented based on revision 03 of the Peabody UUID draft, and may +// be subject to change pending further revisions. Until the final specification +// revision is finished, changes required to implement updates to the spec will +// not be considered a breaking change. They will happen as a minor version +// releases until the spec is final. +func NewV6() (UUID, error) { + return DefaultGenerator.NewV6() +} + +// NewV7 returns a k-sortable UUID based on the current millisecond precision +// UNIX epoch and 74 bits of pseudorandom data. It supports single-node batch generation (multiple UUIDs in the same timestamp) with a Monotonic Random counter. +// +// This is implemented based on revision 04 of the Peabody UUID draft, and may +// be subject to change pending further revisions. Until the final specification +// revision is finished, changes required to implement updates to the spec will +// not be considered a breaking change. They will happen as a minor version +// releases until the spec is final. +func NewV7() (UUID, error) { + return DefaultGenerator.NewV7() +} + // Generator provides an interface for generating UUIDs. type Generator interface { NewV1() (UUID, error) NewV3(ns UUID, name string) UUID NewV4() (UUID, error) NewV5(ns UUID, name string) UUID + NewV6() (UUID, error) + NewV7() (UUID, error) } // Gen is a reference UUID generator based on the specifications laid out in @@ -92,13 +120,16 @@ type Gen struct { rand io.Reader - epochFunc epochFunc + epochFunc EpochFunc hwAddrFunc HWAddrFunc lastTime uint64 clockSequence uint16 hardwareAddr [6]byte } +// GenOption is a function type that can be used to configure a Gen generator. +type GenOption func(*Gen) + // interface check -- build will fail if *Gen doesn't satisfy Generator var _ Generator = (*Gen)(nil) @@ -120,18 +151,82 @@ func NewGen() *Gen { // MAC address being used, you'll need to create a new generator using this // function. func NewGenWithHWAF(hwaf HWAddrFunc) *Gen { - return &Gen{ + return NewGenWithOptions(WithHWAddrFunc(hwaf)) +} + +// NewGenWithOptions returns a new instance of Gen with the options provided. +// Most people should use NewGen() or NewGenWithHWAF() instead. +// +// To customize the generator, you can pass in one or more GenOption functions. +// For example: +// +// gen := NewGenWithOptions( +// WithHWAddrFunc(myHWAddrFunc), +// WithEpochFunc(myEpochFunc), +// WithRandomReader(myRandomReader), +// ) +// +// NewGenWithOptions(WithHWAddrFunc(myHWAddrFunc)) is equivalent to calling +// NewGenWithHWAF(myHWAddrFunc) +// NewGenWithOptions() is equivalent to calling NewGen() +func NewGenWithOptions(opts ...GenOption) *Gen { + gen := &Gen{ epochFunc: time.Now, - hwAddrFunc: hwaf, + hwAddrFunc: defaultHWAddrFunc, rand: rand.Reader, } + + for _, opt := range opts { + opt(gen) + } + + return gen +} + +// WithHWAddrFunc is a GenOption that allows you to provide your own HWAddrFunc +// function. +// When this option is nil, the defaultHWAddrFunc is used. +func WithHWAddrFunc(hwaf HWAddrFunc) GenOption { + return func(gen *Gen) { + if hwaf == nil { + hwaf = defaultHWAddrFunc + } + + gen.hwAddrFunc = hwaf + } +} + +// WithEpochFunc is a GenOption that allows you to provide your own EpochFunc +// function. +// When this option is nil, time.Now is used. +func WithEpochFunc(epochf EpochFunc) GenOption { + return func(gen *Gen) { + if epochf == nil { + epochf = time.Now + } + + gen.epochFunc = epochf + } +} + +// WithRandomReader is a GenOption that allows you to provide your own random +// reader. +// When this option is nil, the default rand.Reader is used. +func WithRandomReader(reader io.Reader) GenOption { + return func(gen *Gen) { + if reader == nil { + reader = rand.Reader + } + + gen.rand = reader + } } // NewV1 returns a UUID based on the current timestamp and MAC address. func (g *Gen) NewV1() (UUID, error) { u := UUID{} - timeNow, clockSeq, err := g.getClockSequence() + timeNow, clockSeq, err := g.getClockSequence(false) if err != nil { return Nil, err } @@ -182,8 +277,44 @@ func (g *Gen) NewV5(ns UUID, name string) UUID { return u } -// getClockSequence returns the epoch and clock sequence. -func (g *Gen) getClockSequence() (uint64, uint16, error) { +// NewV6 returns a k-sortable UUID based on a timestamp and 48 bits of +// pseudorandom data. The timestamp in a V6 UUID is the same as V1, with the bit +// order being adjusted to allow the UUID to be k-sortable. +// +// This is implemented based on revision 03 of the Peabody UUID draft, and may +// be subject to change pending further revisions. Until the final specification +// revision is finished, changes required to implement updates to the spec will +// not be considered a breaking change. They will happen as a minor version +// releases until the spec is final. +func (g *Gen) NewV6() (UUID, error) { + var u UUID + + if _, err := io.ReadFull(g.rand, u[10:]); err != nil { + return Nil, err + } + + timeNow, clockSeq, err := g.getClockSequence(false) + if err != nil { + return Nil, err + } + + binary.BigEndian.PutUint32(u[0:], uint32(timeNow>>28)) // set time_high + binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>12)) // set time_mid + binary.BigEndian.PutUint16(u[6:], uint16(timeNow&0xfff)) // set time_low (minus four version bits) + binary.BigEndian.PutUint16(u[8:], clockSeq&0x3fff) // set clk_seq_hi_res (minus two variant bits) + + u.SetVersion(V6) + u.SetVariant(VariantRFC4122) + + return u, nil +} + +// getClockSequence returns the epoch and clock sequence for V1,V6 and V7 UUIDs. +// +// When useUnixTSMs is false, it uses the Coordinated Universal Time (UTC) as a count of 100- +// +// nanosecond intervals since 00:00:00.00, 15 October 1582 (the date of Gregorian reform to the Christian calendar). +func (g *Gen) getClockSequence(useUnixTSMs bool) (uint64, uint16, error) { var err error g.clockSequenceOnce.Do(func() { buf := make([]byte, 2) @@ -199,7 +330,12 @@ func (g *Gen) getClockSequence() (uint64, uint16, error) { g.storageMutex.Lock() defer g.storageMutex.Unlock() - timeNow := g.getEpoch() + var timeNow uint64 + if useUnixTSMs { + timeNow = uint64(g.epochFunc().UnixMilli()) + } else { + timeNow = g.getEpoch() + } // Clock didn't change since last UUID generation. // Should increase clock sequence. if timeNow <= g.lastTime { @@ -210,6 +346,59 @@ func (g *Gen) getClockSequence() (uint64, uint16, error) { return timeNow, g.clockSequence, nil } +// NewV7 returns a k-sortable UUID based on the current millisecond precision +// UNIX epoch and 74 bits of pseudorandom data. +// +// This is implemented based on revision 04 of the Peabody UUID draft, and may +// be subject to change pending further revisions. Until the final specification +// revision is finished, changes required to implement updates to the spec will +// not be considered a breaking change. They will happen as a minor version +// releases until the spec is final. +func (g *Gen) NewV7() (UUID, error) { + var u UUID + /* https://www.ietf.org/archive/id/draft-peabody-dispatch-new-uuid-format-04.html#name-uuid-version-7 + 0 1 2 3 + 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | unix_ts_ms | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | unix_ts_ms | ver | rand_a | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + |var| rand_b | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | rand_b | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */ + + ms, clockSeq, err := g.getClockSequence(true) + if err != nil { + return Nil, err + } + //UUIDv7 features a 48 bit timestamp. First 32bit (4bytes) represents seconds since 1970, followed by 2 bytes for the ms granularity. + u[0] = byte(ms >> 40) //1-6 bytes: big-endian unsigned number of Unix epoch timestamp + u[1] = byte(ms >> 32) + u[2] = byte(ms >> 24) + u[3] = byte(ms >> 16) + u[4] = byte(ms >> 8) + u[5] = byte(ms) + + //support batching by using a monotonic pseudo-random sequence + //The 6th byte contains the version and partially rand_a data. + //We will lose the most significant bites from the clockSeq (with SetVersion), but it is ok, we need the least significant that contains the counter to ensure the monotonic property + binary.BigEndian.PutUint16(u[6:8], clockSeq) // set rand_a with clock seq which is random and monotonic + + //override first 4bits of u[6]. + u.SetVersion(V7) + + //set rand_b 64bits of pseudo-random bits (first 2 will be overridden) + if _, err = io.ReadFull(g.rand, u[8:16]); err != nil { + return Nil, err + } + //override first 2 bits of byte[8] for the variant + u.SetVariant(VariantRFC4122) + + return u, nil +} + // Returns the hardware address. func (g *Gen) getHardwareAddr() ([]byte, error) { var err error @@ -250,9 +439,11 @@ func newFromHash(h hash.Hash, ns UUID, name string) UUID { return u } +var netInterfaces = net.Interfaces + // Returns the hardware address. func defaultHWAddrFunc() (net.HardwareAddr, error) { - ifaces, err := net.Interfaces() + ifaces, err := netInterfaces() if err != nil { return []byte{}, err } diff --git a/cluster-autoscaler/vendor/github.com/gofrs/uuid/sql.go b/cluster-autoscaler/vendor/github.com/gofrs/uuid/sql.go index 6f254a4fd107..01d5d88496ce 100644 --- a/cluster-autoscaler/vendor/github.com/gofrs/uuid/sql.go +++ b/cluster-autoscaler/vendor/github.com/gofrs/uuid/sql.go @@ -22,12 +22,14 @@ package uuid import ( - "bytes" + "database/sql" "database/sql/driver" - "encoding/json" "fmt" ) +var _ driver.Valuer = UUID{} +var _ sql.Scanner = (*UUID)(nil) + // Value implements the driver.Valuer interface. func (u UUID) Value() (driver.Value, error) { return u.String(), nil @@ -49,7 +51,9 @@ func (u *UUID) Scan(src interface{}) error { return u.UnmarshalText(src) case string: - return u.UnmarshalText([]byte(src)) + uu, err := FromString(src) + *u = uu + return err } return fmt.Errorf("uuid: cannot convert %T to UUID", src) @@ -83,27 +87,30 @@ func (u *NullUUID) Scan(src interface{}) error { return u.UUID.Scan(src) } +var nullJSON = []byte("null") + // MarshalJSON marshals the NullUUID as null or the nested UUID func (u NullUUID) MarshalJSON() ([]byte, error) { if !u.Valid { - return json.Marshal(nil) + return nullJSON, nil } - - return json.Marshal(u.UUID) + var buf [38]byte + buf[0] = '"' + encodeCanonical(buf[1:37], u.UUID) + buf[37] = '"' + return buf[:], nil } // UnmarshalJSON unmarshals a NullUUID func (u *NullUUID) UnmarshalJSON(b []byte) error { - if bytes.Equal(b, []byte("null")) { + if string(b) == "null" { u.UUID, u.Valid = Nil, false return nil } - - if err := json.Unmarshal(b, &u.UUID); err != nil { - return err + if n := len(b); n >= 2 && b[0] == '"' { + b = b[1 : n-1] } - - u.Valid = true - - return nil + err := u.UUID.UnmarshalText(b) + u.Valid = (err == nil) + return err } diff --git a/cluster-autoscaler/vendor/github.com/gofrs/uuid/uuid.go b/cluster-autoscaler/vendor/github.com/gofrs/uuid/uuid.go index 78aed6e2539b..5320fb53894b 100644 --- a/cluster-autoscaler/vendor/github.com/gofrs/uuid/uuid.go +++ b/cluster-autoscaler/vendor/github.com/gofrs/uuid/uuid.go @@ -20,11 +20,13 @@ // WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. // Package uuid provides implementations of the Universally Unique Identifier -// (UUID), as specified in RFC-4122, +// (UUID), as specified in RFC-4122 and the Peabody RFC Draft (revision 03). // -// RFC-4122[1] provides the specification for versions 1, 3, 4, and 5. +// RFC-4122[1] provides the specification for versions 1, 3, 4, and 5. The +// Peabody UUID RFC Draft[2] provides the specification for the new k-sortable +// UUIDs, versions 6 and 7. // -// DCE 1.1[2] provides the specification for version 2, but version 2 support +// DCE 1.1[3] provides the specification for version 2, but version 2 support // was removed from this package in v4 due to some concerns with the // specification itself. Reading the spec, it seems that it would result in // generating UUIDs that aren't very unique. In having read the spec it seemed @@ -34,15 +36,14 @@ // ensure we were understanding the specification correctly. // // [1] https://tools.ietf.org/html/rfc4122 -// [2] http://pubs.opengroup.org/onlinepubs/9696989899/chap5.htm#tagcjh_08_02_01_01 +// [2] https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03 +// [3] http://pubs.opengroup.org/onlinepubs/9696989899/chap5.htm#tagcjh_08_02_01_01 package uuid import ( "encoding/binary" "encoding/hex" "fmt" - "io" - "strings" "time" ) @@ -60,6 +61,9 @@ const ( V3 // Version 3 (namespace name-based) V4 // Version 4 (random) V5 // Version 5 (namespace name-based) + V6 // Version 6 (k-sortable timestamp and random data, field-compatible with v1) [peabody draft] + V7 // Version 7 (k-sortable timestamp and random data) [peabody draft] + _ // Version 8 (k-sortable timestamp, meant for custom implementations) [peabody draft] [not implemented] ) // UUID layout variants. @@ -88,6 +92,7 @@ const _100nsPerSecond = 10000000 func (t Timestamp) Time() (time.Time, error) { secs := uint64(t) / _100nsPerSecond nsecs := 100 * (uint64(t) % _100nsPerSecond) + return time.Unix(int64(secs)-(epochStart/_100nsPerSecond), int64(nsecs)), nil } @@ -98,17 +103,33 @@ func TimestampFromV1(u UUID) (Timestamp, error) { err := fmt.Errorf("uuid: %s is version %d, not version 1", u, u.Version()) return 0, err } + low := binary.BigEndian.Uint32(u[0:4]) mid := binary.BigEndian.Uint16(u[4:6]) hi := binary.BigEndian.Uint16(u[6:8]) & 0xfff + return Timestamp(uint64(low) + (uint64(mid) << 32) + (uint64(hi) << 48)), nil } -// String parse helpers. -var ( - urnPrefix = []byte("urn:uuid:") - byteGroups = []int{8, 4, 4, 4, 12} -) +// TimestampFromV6 returns the Timestamp embedded within a V6 UUID. This +// function returns an error if the UUID is any version other than 6. +// +// This is implemented based on revision 03 of the Peabody UUID draft, and may +// be subject to change pending further revisions. Until the final specification +// revision is finished, changes required to implement updates to the spec will +// not be considered a breaking change. They will happen as a minor version +// releases until the spec is final. +func TimestampFromV6(u UUID) (Timestamp, error) { + if u.Version() != 6 { + return 0, fmt.Errorf("uuid: %s is version %d, not version 6", u, u.Version()) + } + + hi := binary.BigEndian.Uint32(u[0:4]) + mid := binary.BigEndian.Uint16(u[4:6]) + low := binary.BigEndian.Uint16(u[6:8]) & 0xfff + + return Timestamp(uint64(low) + (uint64(mid) << 12) + (uint64(hi) << 28)), nil +} // Nil is the nil UUID, as specified in RFC-4122, that has all 128 bits set to // zero. @@ -122,6 +143,11 @@ var ( NamespaceX500 = Must(FromString("6ba7b814-9dad-11d1-80b4-00c04fd430c8")) ) +// IsNil returns if the UUID is equal to the nil UUID +func (u UUID) IsNil() bool { + return u == Nil +} + // Version returns the algorithm version used to generate the UUID. func (u UUID) Version() byte { return u[6] >> 4 @@ -148,22 +174,33 @@ func (u UUID) Bytes() []byte { return u[:] } +// encodeCanonical encodes the canonical RFC-4122 form of UUID u into the +// first 36 bytes dst. +func encodeCanonical(dst []byte, u UUID) { + const hextable = "0123456789abcdef" + dst[8] = '-' + dst[13] = '-' + dst[18] = '-' + dst[23] = '-' + for i, x := range [16]byte{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34, + } { + c := u[i] + dst[x] = hextable[c>>4] + dst[x+1] = hextable[c&0x0f] + } +} + // String returns a canonical RFC-4122 string representation of the UUID: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx. func (u UUID) String() string { - buf := make([]byte, 36) - - hex.Encode(buf[0:8], u[0:4]) - buf[8] = '-' - hex.Encode(buf[9:13], u[4:6]) - buf[13] = '-' - hex.Encode(buf[14:18], u[6:8]) - buf[18] = '-' - hex.Encode(buf[19:23], u[8:10]) - buf[23] = '-' - hex.Encode(buf[24:], u[10:]) - - return string(buf) + var buf [36]byte + encodeCanonical(buf[:], u) + return string(buf[:]) } // Format implements fmt.Formatter for UUID values. @@ -176,52 +213,41 @@ func (u UUID) String() string { // All other verbs not handled directly by the fmt package (like '%p') are unsupported and will return // "%!verb(uuid.UUID=value)" as recommended by the fmt package. func (u UUID) Format(f fmt.State, c rune) { + if c == 'v' && f.Flag('#') { + fmt.Fprintf(f, "%#v", [Size]byte(u)) + return + } switch c { case 'x', 'X': - s := hex.EncodeToString(u.Bytes()) + b := make([]byte, 32) + hex.Encode(b, u[:]) if c == 'X' { - s = strings.Map(toCapitalHexDigits, s) - } - _, _ = io.WriteString(f, s) - case 'v': - var s string - if f.Flag('#') { - s = fmt.Sprintf("%#v", [Size]byte(u)) - } else { - s = u.String() + toUpperHex(b) } - _, _ = io.WriteString(f, s) - case 's', 'S': - s := u.String() + _, _ = f.Write(b) + case 'v', 's', 'S': + b, _ := u.MarshalText() if c == 'S' { - s = strings.Map(toCapitalHexDigits, s) + toUpperHex(b) } - _, _ = io.WriteString(f, s) + _, _ = f.Write(b) case 'q': - _, _ = io.WriteString(f, `"`+u.String()+`"`) + b := make([]byte, 38) + b[0] = '"' + encodeCanonical(b[1:], u) + b[37] = '"' + _, _ = f.Write(b) default: // invalid/unsupported format verb fmt.Fprintf(f, "%%!%c(uuid.UUID=%s)", c, u.String()) } } -func toCapitalHexDigits(ch rune) rune { - // convert a-f hex digits to A-F - switch ch { - case 'a': - return 'A' - case 'b': - return 'B' - case 'c': - return 'C' - case 'd': - return 'D' - case 'e': - return 'E' - case 'f': - return 'F' - default: - return ch +func toUpperHex(b []byte) { + for i, c := range b { + if 'a' <= c && c <= 'f' { + b[i] = c - ('a' - 'A') + } } } @@ -249,7 +275,8 @@ func (u *UUID) SetVariant(v byte) { // Must is a helper that wraps a call to a function returning (UUID, error) // and panics if the error is non-nil. It is intended for use in variable // initializations such as -// var packageUUID = uuid.Must(uuid.FromString("123e4567-e89b-12d3-a456-426655440000")) +// +// var packageUUID = uuid.Must(uuid.FromString("123e4567-e89b-12d3-a456-426655440000")) func Must(u UUID, err error) UUID { if err != nil { panic(err) diff --git a/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v4/README.md b/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v4/README.md index f5d551ca8fd8..30f2f2a6f70c 100644 --- a/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v4/README.md +++ b/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v4/README.md @@ -54,9 +54,9 @@ import "github.com/golang-jwt/jwt/v4" See [the project documentation](https://pkg.go.dev/github.com/golang-jwt/jwt/v4) for examples of usage: -* [Simple example of parsing and validating a token](https://pkg.go.dev/github.com/golang-jwt/jwt#example-Parse-Hmac) -* [Simple example of building and signing a token](https://pkg.go.dev/github.com/golang-jwt/jwt#example-New-Hmac) -* [Directory of Examples](https://pkg.go.dev/github.com/golang-jwt/jwt#pkg-examples) +* [Simple example of parsing and validating a token](https://pkg.go.dev/github.com/golang-jwt/jwt/v4#example-Parse-Hmac) +* [Simple example of building and signing a token](https://pkg.go.dev/github.com/golang-jwt/jwt/v4#example-New-Hmac) +* [Directory of Examples](https://pkg.go.dev/github.com/golang-jwt/jwt/v4#pkg-examples) ## Extensions @@ -96,7 +96,7 @@ A token is simply a JSON object that is signed by its author. this tells you exa * The author of the token was in the possession of the signing secret * The data has not been modified since it was signed -It's important to know that JWT does not provide encryption, which means anyone who has access to the token can read its contents. If you need to protect (encrypt) the data, there is a companion spec, `JWE`, that provides this functionality. JWE is currently outside the scope of this library. +It's important to know that JWT does not provide encryption, which means anyone who has access to the token can read its contents. If you need to protect (encrypt) the data, there is a companion spec, `JWE`, that provides this functionality. The companion project https://github.com/golang-jwt/jwe aims at a (very) experimental implementation of the JWE standard. ### Choosing a Signing Method @@ -110,10 +110,10 @@ Asymmetric signing methods, such as RSA, use different keys for signing and veri Each signing method expects a different object type for its signing keys. See the package documentation for details. Here are the most common ones: -* The [HMAC signing method](https://pkg.go.dev/github.com/golang-jwt/jwt#SigningMethodHMAC) (`HS256`,`HS384`,`HS512`) expect `[]byte` values for signing and validation -* The [RSA signing method](https://pkg.go.dev/github.com/golang-jwt/jwt#SigningMethodRSA) (`RS256`,`RS384`,`RS512`) expect `*rsa.PrivateKey` for signing and `*rsa.PublicKey` for validation -* The [ECDSA signing method](https://pkg.go.dev/github.com/golang-jwt/jwt#SigningMethodECDSA) (`ES256`,`ES384`,`ES512`) expect `*ecdsa.PrivateKey` for signing and `*ecdsa.PublicKey` for validation -* The [EdDSA signing method](https://pkg.go.dev/github.com/golang-jwt/jwt#SigningMethodEd25519) (`Ed25519`) expect `ed25519.PrivateKey` for signing and `ed25519.PublicKey` for validation +* The [HMAC signing method](https://pkg.go.dev/github.com/golang-jwt/jwt/v4#SigningMethodHMAC) (`HS256`,`HS384`,`HS512`) expect `[]byte` values for signing and validation +* The [RSA signing method](https://pkg.go.dev/github.com/golang-jwt/jwt/v4#SigningMethodRSA) (`RS256`,`RS384`,`RS512`) expect `*rsa.PrivateKey` for signing and `*rsa.PublicKey` for validation +* The [ECDSA signing method](https://pkg.go.dev/github.com/golang-jwt/jwt/v4#SigningMethodECDSA) (`ES256`,`ES384`,`ES512`) expect `*ecdsa.PrivateKey` for signing and `*ecdsa.PublicKey` for validation +* The [EdDSA signing method](https://pkg.go.dev/github.com/golang-jwt/jwt/v4#SigningMethodEd25519) (`Ed25519`) expect `ed25519.PrivateKey` for signing and `ed25519.PublicKey` for validation ### JWT and OAuth @@ -131,7 +131,7 @@ This library uses descriptive error messages whenever possible. If you are not g ## More -Documentation can be found [on pkg.go.dev](https://pkg.go.dev/github.com/golang-jwt/jwt). +Documentation can be found [on pkg.go.dev](https://pkg.go.dev/github.com/golang-jwt/jwt/v4). The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. You'll also find several implementation examples in the documentation. diff --git a/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v4/claims.go b/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v4/claims.go index 9d95cad2bf27..364cec8773cd 100644 --- a/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v4/claims.go +++ b/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v4/claims.go @@ -265,9 +265,5 @@ func verifyIss(iss string, cmp string, required bool) bool { if iss == "" { return !required } - if subtle.ConstantTimeCompare([]byte(iss), []byte(cmp)) != 0 { - return true - } else { - return false - } + return subtle.ConstantTimeCompare([]byte(iss), []byte(cmp)) != 0 } diff --git a/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v4/parser.go b/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v4/parser.go index 2f61a69d7fcb..c0a6f6927917 100644 --- a/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v4/parser.go +++ b/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v4/parser.go @@ -42,6 +42,13 @@ func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { return p.ParseWithClaims(tokenString, MapClaims{}, keyFunc) } +// ParseWithClaims parses, validates, and verifies like Parse, but supplies a default object implementing the Claims +// interface. This provides default values which can be overridden and allows a caller to use their own type, rather +// than the default MapClaims implementation of Claims. +// +// Note: If you provide a custom claim implementation that embeds one of the standard claims (such as RegisteredClaims), +// make sure that a) you either embed a non-pointer version of the claims or b) if you are using a pointer, allocate the +// proper memory for it before passing in the overall claims, otherwise you might run into a panic. func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) { token, parts, err := p.ParseUnverified(tokenString, claims) if err != nil { diff --git a/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v4/token.go b/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v4/token.go index 3cb0f3f0e4c0..786b275ce03a 100644 --- a/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v4/token.go +++ b/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v4/token.go @@ -14,6 +14,12 @@ import ( // To use the non-recommended decoding, set this boolean to `true` prior to using this package. var DecodePaddingAllowed bool +// DecodeStrict will switch the codec used for decoding JWTs into strict mode. +// In this mode, the decoder requires that trailing padding bits are zero, as described in RFC 4648 section 3.5. +// Note that this is a global variable, and updating it will change the behavior on a package level, and is also NOT go-routine safe. +// To use strict decoding, set this boolean to `true` prior to using this package. +var DecodeStrict bool + // TimeFunc provides the current time when parsing token to validate "exp" claim (expiration time). // You can override it to use another time value. This is useful for testing or if your // server uses a different time zone than your tokens. @@ -99,6 +105,11 @@ func Parse(tokenString string, keyFunc Keyfunc, options ...ParserOption) (*Token return NewParser(options...).Parse(tokenString, keyFunc) } +// ParseWithClaims is a shortcut for NewParser().ParseWithClaims(). +// +// Note: If you provide a custom claim implementation that embeds one of the standard claims (such as RegisteredClaims), +// make sure that a) you either embed a non-pointer version of the claims or b) if you are using a pointer, allocate the +// proper memory for it before passing in the overall claims, otherwise you might run into a panic. func ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc, options ...ParserOption) (*Token, error) { return NewParser(options...).ParseWithClaims(tokenString, claims, keyFunc) } @@ -116,12 +127,17 @@ func EncodeSegment(seg []byte) string { // Deprecated: In a future release, we will demote this function to a non-exported function, since it // should only be used internally func DecodeSegment(seg string) ([]byte, error) { + encoding := base64.RawURLEncoding + if DecodePaddingAllowed { if l := len(seg) % 4; l > 0 { seg += strings.Repeat("=", 4-l) } - return base64.URLEncoding.DecodeString(seg) + encoding = base64.URLEncoding } - return base64.RawURLEncoding.DecodeString(seg) + if DecodeStrict { + encoding = encoding.Strict() + } + return encoding.DecodeString(seg) } diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/cel/BUILD.bazel b/cluster-autoscaler/vendor/github.com/google/cel-go/cel/BUILD.bazel index ddddbd2804e2..4331321139ec 100644 --- a/cluster-autoscaler/vendor/github.com/google/cel-go/cel/BUILD.bazel +++ b/cluster-autoscaler/vendor/github.com/google/cel-go/cel/BUILD.bazel @@ -32,7 +32,7 @@ go_library( "//interpreter:go_default_library", "//interpreter/functions:go_default_library", "//parser:go_default_library", - "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library", + "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", "@org_golang_google_protobuf//proto:go_default_library", "@org_golang_google_protobuf//reflect/protodesc:go_default_library", "@org_golang_google_protobuf//reflect/protoreflect:go_default_library", @@ -70,7 +70,7 @@ go_test( "//test/proto2pb:go_default_library", "//test/proto3pb:go_default_library", "@io_bazel_rules_go//proto/wkt:descriptor_go_proto", - "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library", + "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", "@org_golang_google_protobuf//proto:go_default_library", "@org_golang_google_protobuf//types/known/structpb:go_default_library", ], diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/cel/env.go b/cluster-autoscaler/vendor/github.com/google/cel-go/cel/env.go index 8cf442ee714b..d9c2ef63f27a 100644 --- a/cluster-autoscaler/vendor/github.com/google/cel-go/cel/env.go +++ b/cluster-autoscaler/vendor/github.com/google/cel-go/cel/env.go @@ -109,10 +109,11 @@ type Env struct { prsrOpts []parser.Option // Internal checker representation - chk *checker.Env - chkErr error - chkOnce sync.Once - chkOpts []checker.Option + chkMutex sync.Mutex + chk *checker.Env + chkErr error + chkOnce sync.Once + chkOpts []checker.Option // Program options tied to the environment progOpts []ProgramOption @@ -178,14 +179,14 @@ func (e *Env) Check(ast *Ast) (*Ast, *Issues) { pe, _ := AstToParsedExpr(ast) // Construct the internal checker env, erroring if there is an issue adding the declarations. - err := e.initChecker() + chk, err := e.initChecker() if err != nil { errs := common.NewErrors(ast.Source()) - errs.ReportError(common.NoLocation, e.chkErr.Error()) + errs.ReportError(common.NoLocation, err.Error()) return nil, NewIssues(errs) } - res, errs := checker.Check(pe, ast.Source(), e.chk) + res, errs := checker.Check(pe, ast.Source(), chk) if len(errs.GetErrors()) > 0 { return nil, NewIssues(errs) } @@ -239,8 +240,9 @@ func (e *Env) CompileSource(src Source) (*Ast, *Issues) { // TypeProvider are immutable, or that their underlying implementations are based on the // ref.TypeRegistry which provides a Copy method which will be invoked by this method. func (e *Env) Extend(opts ...EnvOption) (*Env, error) { - if e.chkErr != nil { - return nil, e.chkErr + chk, chkErr := e.getCheckerOrError() + if chkErr != nil { + return nil, chkErr } prsrOptsCopy := make([]parser.Option, len(e.prsrOpts)) @@ -254,10 +256,10 @@ func (e *Env) Extend(opts ...EnvOption) (*Env, error) { // Copy the declarations if needed. decsCopy := []*exprpb.Decl{} - if e.chk != nil { + if chk != nil { // If the type-checker has already been instantiated, then the e.declarations have been // validated within the chk instance. - chkOptsCopy = append(chkOptsCopy, checker.ValidatedDeclarations(e.chk)) + chkOptsCopy = append(chkOptsCopy, checker.ValidatedDeclarations(chk)) } else { // If the type-checker has not been instantiated, ensure the unvalidated declarations are // provided to the extended Env instance. @@ -460,12 +462,12 @@ func (e *Env) ResidualAst(a *Ast, details *EvalDetails) (*Ast, error) { // EstimateCost estimates the cost of a type checked CEL expression using the length estimates of input data and // extension functions provided by estimator. -func (e *Env) EstimateCost(ast *Ast, estimator checker.CostEstimator) (checker.CostEstimate, error) { +func (e *Env) EstimateCost(ast *Ast, estimator checker.CostEstimator, opts ...checker.CostOption) (checker.CostEstimate, error) { checked, err := AstToCheckedExpr(ast) if err != nil { return checker.CostEstimate{}, fmt.Errorf("EsimateCost could not inspect Ast: %v", err) } - return checker.Cost(checked, estimator), nil + return checker.Cost(checked, estimator, opts...) } // configure applies a series of EnvOptions to the current environment. @@ -509,7 +511,7 @@ func (e *Env) configure(opts []EnvOption) (*Env, error) { // Ensure that the checker init happens eagerly rather than lazily. if e.HasFeature(featureEagerlyValidateDeclarations) { - err := e.initChecker() + _, err := e.initChecker() if err != nil { return nil, err } @@ -518,7 +520,7 @@ func (e *Env) configure(opts []EnvOption) (*Env, error) { return e, nil } -func (e *Env) initChecker() error { +func (e *Env) initChecker() (*checker.Env, error) { e.chkOnce.Do(func() { chkOpts := []checker.Option{} chkOpts = append(chkOpts, e.chkOpts...) @@ -530,32 +532,47 @@ func (e *Env) initChecker() error { ce, err := checker.NewEnv(e.Container, e.provider, chkOpts...) if err != nil { - e.chkErr = err + e.setCheckerOrError(nil, err) return } // Add the statically configured declarations. err = ce.Add(e.declarations...) if err != nil { - e.chkErr = err + e.setCheckerOrError(nil, err) return } // Add the function declarations which are derived from the FunctionDecl instances. for _, fn := range e.functions { fnDecl, err := functionDeclToExprDecl(fn) if err != nil { - e.chkErr = err + e.setCheckerOrError(nil, err) return } err = ce.Add(fnDecl) if err != nil { - e.chkErr = err + e.setCheckerOrError(nil, err) return } } // Add function declarations here separately. - e.chk = ce + e.setCheckerOrError(ce, nil) }) - return e.chkErr + return e.getCheckerOrError() +} + +// setCheckerOrError sets the checker.Env or error state in a concurrency-safe manner +func (e *Env) setCheckerOrError(chk *checker.Env, chkErr error) { + e.chkMutex.Lock() + e.chk = chk + e.chkErr = chkErr + e.chkMutex.Unlock() +} + +// getCheckerOrError gets the checker.Env or error state in a concurrency-safe manner +func (e *Env) getCheckerOrError() (*checker.Env, error) { + e.chkMutex.Lock() + defer e.chkMutex.Unlock() + return e.chk, e.chkErr } // maybeApplyFeature determines whether the feature-guarded option is enabled, and if so applies diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/cel/library.go b/cluster-autoscaler/vendor/github.com/google/cel-go/cel/library.go index 072cec30e6ff..bcfd44f78a9f 100644 --- a/cluster-autoscaler/vendor/github.com/google/cel-go/cel/library.go +++ b/cluster-autoscaler/vendor/github.com/google/cel-go/cel/library.go @@ -20,6 +20,7 @@ import ( "time" "github.com/google/cel-go/checker" + "github.com/google/cel-go/common" "github.com/google/cel-go/common/operators" "github.com/google/cel-go/common/overloads" "github.com/google/cel-go/common/types" @@ -28,6 +29,18 @@ import ( "github.com/google/cel-go/interpreter" "github.com/google/cel-go/interpreter/functions" "github.com/google/cel-go/parser" + + exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" +) + +const ( + optMapMacro = "optMap" + hasValueFunc = "hasValue" + optionalNoneFunc = "optional.none" + optionalOfFunc = "optional.of" + optionalOfNonZeroValueFunc = "optional.ofNonZeroValue" + valueFunc = "value" + unusedIterVar = "#unused" ) // Library provides a collection of EnvOption and ProgramOption values used to configure a CEL @@ -130,13 +143,16 @@ func (optionalLibrary) CompileOptions() []EnvOption { // Introduce the optional type. Types(types.OptionalType), + // Configure the optMap macro. + Macros(NewReceiverMacro(optMapMacro, 2, optMap)), + // Global and member functions for working with optional values. - Function("optional.of", + Function(optionalOfFunc, Overload("optional_of", []*Type{paramTypeV}, optionalTypeV, UnaryBinding(func(value ref.Val) ref.Val { return types.OptionalOf(value) }))), - Function("optional.ofNonZeroValue", + Function(optionalOfNonZeroValueFunc, Overload("optional_ofNonZeroValue", []*Type{paramTypeV}, optionalTypeV, UnaryBinding(func(value ref.Val) ref.Val { v, isZeroer := value.(traits.Zeroer) @@ -145,18 +161,18 @@ func (optionalLibrary) CompileOptions() []EnvOption { } return types.OptionalNone }))), - Function("optional.none", + Function(optionalNoneFunc, Overload("optional_none", []*Type{}, optionalTypeV, FunctionBinding(func(values ...ref.Val) ref.Val { return types.OptionalNone }))), - Function("value", + Function(valueFunc, MemberOverload("optional_value", []*Type{optionalTypeV}, paramTypeV, UnaryBinding(func(value ref.Val) ref.Val { opt := value.(*types.Optional) return opt.GetValue() }))), - Function("hasValue", + Function(hasValueFunc, MemberOverload("optional_hasValue", []*Type{optionalTypeV}, BoolType, UnaryBinding(func(value ref.Val) ref.Val { opt := value.(*types.Optional) @@ -190,6 +206,37 @@ func (optionalLibrary) CompileOptions() []EnvOption { } } +func optMap(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) { + varIdent := args[0] + varName := "" + switch varIdent.GetExprKind().(type) { + case *exprpb.Expr_IdentExpr: + varName = varIdent.GetIdentExpr().GetName() + default: + return nil, &common.Error{ + Message: "optMap() variable name must be a simple identifier", + Location: meh.OffsetLocation(varIdent.GetId()), + } + } + mapExpr := args[1] + return meh.GlobalCall( + operators.Conditional, + meh.ReceiverCall(hasValueFunc, target), + meh.GlobalCall(optionalOfFunc, + meh.Fold( + unusedIterVar, + meh.NewList(), + varName, + meh.ReceiverCall(valueFunc, target), + meh.LiteralBool(false), + meh.Ident(varName), + mapExpr, + ), + ), + meh.GlobalCall(optionalNoneFunc), + ), nil +} + // ProgramOptions implements the Library interface method. func (optionalLibrary) ProgramOptions() []ProgramOption { return []ProgramOption{ diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/cel/options.go b/cluster-autoscaler/vendor/github.com/google/cel-go/cel/options.go index 63321b548170..07f3d6c71611 100644 --- a/cluster-autoscaler/vendor/github.com/google/cel-go/cel/options.go +++ b/cluster-autoscaler/vendor/github.com/google/cel-go/cel/options.go @@ -568,3 +568,12 @@ func ParserRecursionLimit(limit int) EnvOption { return e, nil } } + +// ParserExpressionSizeLimit adjusts the number of code points the expression parser is allowed to parse. +// Defaults defined in the parser package. +func ParserExpressionSizeLimit(limit int) EnvOption { + return func(e *Env) (*Env, error) { + e.prsrOpts = append(e.prsrOpts, parser.ExpressionSizeCodePointLimit(limit)) + return e, nil + } +} diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/checker/BUILD.bazel b/cluster-autoscaler/vendor/github.com/google/cel-go/checker/BUILD.bazel index bec40b6e695b..1c6ddb7f7da6 100644 --- a/cluster-autoscaler/vendor/github.com/google/cel-go/checker/BUILD.bazel +++ b/cluster-autoscaler/vendor/github.com/google/cel-go/checker/BUILD.bazel @@ -30,7 +30,7 @@ go_library( "//common/types/pb:go_default_library", "//common/types/ref:go_default_library", "//parser:go_default_library", - "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library", + "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", "@org_golang_google_protobuf//proto:go_default_library", "@org_golang_google_protobuf//types/known/emptypb:go_default_library", "@org_golang_google_protobuf//types/known/structpb:go_default_library", @@ -54,7 +54,7 @@ go_test( "//test:go_default_library", "//test/proto2pb:go_default_library", "//test/proto3pb:go_default_library", - "@com_github_antlr_antlr4_runtime_go_antlr//:go_default_library", + "@com_github_antlr_antlr4_runtime_go_antlr_v4//:go_default_library", "@org_golang_google_protobuf//proto:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/checker/cost.go b/cluster-autoscaler/vendor/github.com/google/cel-go/checker/cost.go index 6cf8c4fea095..8ae8d18bfce7 100644 --- a/cluster-autoscaler/vendor/github.com/google/cel-go/checker/cost.go +++ b/cluster-autoscaler/vendor/github.com/google/cel-go/checker/cost.go @@ -261,6 +261,8 @@ type coster struct { computedSizes map[int64]SizeEstimate checkedExpr *exprpb.CheckedExpr estimator CostEstimator + // presenceTestCost will either be a zero or one based on whether has() macros count against cost computations. + presenceTestCost CostEstimate } // Use a stack of iterVar -> iterRange Expr Ids to handle shadowed variable names. @@ -283,16 +285,39 @@ func (vs iterRangeScopes) peek(varName string) (int64, bool) { return 0, false } +// CostOption configures flags which affect cost computations. +type CostOption func(*coster) error + +// PresenceTestHasCost determines whether presence testing has a cost of one or zero. +// Defaults to presence test has a cost of one. +func PresenceTestHasCost(hasCost bool) CostOption { + return func(c *coster) error { + if hasCost { + c.presenceTestCost = selectAndIdentCost + return nil + } + c.presenceTestCost = CostEstimate{Min: 0, Max: 0} + return nil + } +} + // Cost estimates the cost of the parsed and type checked CEL expression. -func Cost(checker *exprpb.CheckedExpr, estimator CostEstimator) CostEstimate { - c := coster{ - checkedExpr: checker, - estimator: estimator, - exprPath: map[int64][]string{}, - iterRanges: map[string][]int64{}, - computedSizes: map[int64]SizeEstimate{}, +func Cost(checker *exprpb.CheckedExpr, estimator CostEstimator, opts ...CostOption) (CostEstimate, error) { + c := &coster{ + checkedExpr: checker, + estimator: estimator, + exprPath: map[int64][]string{}, + iterRanges: map[string][]int64{}, + computedSizes: map[int64]SizeEstimate{}, + presenceTestCost: CostEstimate{Min: 1, Max: 1}, + } + for _, opt := range opts { + err := opt(c) + if err != nil { + return CostEstimate{}, err + } } - return c.cost(checker.GetExpr()) + return c.cost(checker.GetExpr()), nil } func (c *coster) cost(e *exprpb.Expr) CostEstimate { @@ -347,6 +372,7 @@ func (c *coster) costSelect(e *exprpb.Expr) CostEstimate { // this is equivalent to how evalTestOnly increments the runtime cost counter // but does not add any additional cost for the qualifier, except here we do // the reverse (ident adds cost) + sum = sum.Add(c.presenceTestCost) sum = sum.Add(c.cost(sel.GetOperand())) return sum } diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/checker/decls/BUILD.bazel b/cluster-autoscaler/vendor/github.com/google/cel-go/checker/decls/BUILD.bazel index 5a24f1da8092..9384be4507c2 100644 --- a/cluster-autoscaler/vendor/github.com/google/cel-go/checker/decls/BUILD.bazel +++ b/cluster-autoscaler/vendor/github.com/google/cel-go/checker/decls/BUILD.bazel @@ -13,7 +13,7 @@ go_library( ], importpath = "github.com/google/cel-go/checker/decls", deps = [ - "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library", + "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", "@org_golang_google_protobuf//types/known/emptypb:go_default_library", "@org_golang_google_protobuf//types/known/structpb:go_default_library", ], diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/checker/env.go b/cluster-autoscaler/vendor/github.com/google/cel-go/checker/env.go index c7eeb04eee6e..be89d2d68d76 100644 --- a/cluster-autoscaler/vendor/github.com/google/cel-go/checker/env.go +++ b/cluster-autoscaler/vendor/github.com/google/cel-go/checker/env.go @@ -226,7 +226,7 @@ func (e *Env) setFunction(decl *exprpb.Decl) []errorMsg { newOverloads := []*exprpb.Decl_FunctionDecl_Overload{} for _, overload := range overloads { existing, found := existingOverloads[overload.GetOverloadId()] - if !found || !proto.Equal(existing, overload) { + if !found || !overloadsEqual(existing, overload) { newOverloads = append(newOverloads, overload) } } @@ -264,6 +264,31 @@ func (e *Env) isOverloadDisabled(overloadID string) bool { return found } +// overloadsEqual returns whether two overloads have identical signatures. +// +// type parameter names are ignored as they may be specified in any order and have no bearing on overload +// equivalence +func overloadsEqual(o1, o2 *exprpb.Decl_FunctionDecl_Overload) bool { + return o1.GetOverloadId() == o2.GetOverloadId() && + o1.GetIsInstanceFunction() == o2.GetIsInstanceFunction() && + paramsEqual(o1.GetParams(), o2.GetParams()) && + proto.Equal(o1.GetResultType(), o2.GetResultType()) +} + +// paramsEqual returns whether two lists have equal length and all types are equal +func paramsEqual(p1, p2 []*exprpb.Type) bool { + if len(p1) != len(p2) { + return false + } + for i, a := range p1 { + b := p2[i] + if !proto.Equal(a, b) { + return false + } + } + return true +} + // sanitizeFunction replaces well-known types referenced by message name with their equivalent // CEL built-in type instances. func sanitizeFunction(decl *exprpb.Decl) *exprpb.Decl { diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/common/BUILD.bazel b/cluster-autoscaler/vendor/github.com/google/cel-go/common/BUILD.bazel index a0058aebe07f..d6165b13af05 100644 --- a/cluster-autoscaler/vendor/github.com/google/cel-go/common/BUILD.bazel +++ b/cluster-autoscaler/vendor/github.com/google/cel-go/common/BUILD.bazel @@ -17,7 +17,7 @@ go_library( importpath = "github.com/google/cel-go/common", deps = [ "//common/runes:go_default_library", - "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library", + "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", "@org_golang_x_text//width:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/common/containers/BUILD.bazel b/cluster-autoscaler/vendor/github.com/google/cel-go/common/containers/BUILD.bazel index 18142d94ef6c..3f3f07887124 100644 --- a/cluster-autoscaler/vendor/github.com/google/cel-go/common/containers/BUILD.bazel +++ b/cluster-autoscaler/vendor/github.com/google/cel-go/common/containers/BUILD.bazel @@ -12,7 +12,7 @@ go_library( ], importpath = "github.com/google/cel-go/common/containers", deps = [ - "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library", + "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", ], ) @@ -26,6 +26,6 @@ go_test( ":go_default_library", ], deps = [ - "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library", + "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/common/debug/BUILD.bazel b/cluster-autoscaler/vendor/github.com/google/cel-go/common/debug/BUILD.bazel index cf5c5d246760..1f029839c713 100644 --- a/cluster-autoscaler/vendor/github.com/google/cel-go/common/debug/BUILD.bazel +++ b/cluster-autoscaler/vendor/github.com/google/cel-go/common/debug/BUILD.bazel @@ -13,6 +13,6 @@ go_library( importpath = "github.com/google/cel-go/common/debug", deps = [ "//common:go_default_library", - "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library", + "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/BUILD.bazel b/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/BUILD.bazel index f56700de5d62..89c4feacbfc7 100644 --- a/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/BUILD.bazel +++ b/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/BUILD.bazel @@ -39,8 +39,8 @@ go_library( "//common/types/ref:go_default_library", "//common/types/traits:go_default_library", "@com_github_stoewer_go_strcase//:go_default_library", - "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library", - "@org_golang_google_genproto//googleapis/rpc/status:go_default_library", + "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", + "@org_golang_google_genproto_googleapis_rpc//status:go_default_library", "@org_golang_google_protobuf//encoding/protojson:go_default_library", "@org_golang_google_protobuf//proto:go_default_library", "@org_golang_google_protobuf//reflect/protoreflect:go_default_library", @@ -80,7 +80,7 @@ go_test( "//common/types/ref:go_default_library", "//test:go_default_library", "//test/proto3pb:test_all_types_go_proto", - "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library", + "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", "@org_golang_google_protobuf//encoding/protojson:go_default_library", "@org_golang_google_protobuf//types/known/anypb:go_default_library", "@org_golang_google_protobuf//types/known/durationpb:go_default_library", diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/pb/BUILD.bazel b/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/pb/BUILD.bazel index f23ac9c0e252..e2b9d37b569e 100644 --- a/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/pb/BUILD.bazel +++ b/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/pb/BUILD.bazel @@ -17,7 +17,7 @@ go_library( ], importpath = "github.com/google/cel-go/common/types/pb", deps = [ - "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library", + "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", "@org_golang_google_protobuf//encoding/protowire:go_default_library", "@org_golang_google_protobuf//proto:go_default_library", "@org_golang_google_protobuf//reflect/protoreflect:go_default_library", diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/ref/BUILD.bazel b/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/ref/BUILD.bazel index 1d0f468993ba..79330c332163 100644 --- a/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/ref/BUILD.bazel +++ b/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/ref/BUILD.bazel @@ -13,7 +13,7 @@ go_library( ], importpath = "github.com/google/cel-go/common/types/ref", deps = [ - "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library", + "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", "@org_golang_google_protobuf//proto:go_default_library", "@org_golang_google_protobuf//reflect/protoreflect:go_default_library", ], diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/ref/reference.go b/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/ref/reference.go index 5921ffd81f36..e0d58145cdcd 100644 --- a/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/ref/reference.go +++ b/cluster-autoscaler/vendor/github.com/google/cel-go/common/types/ref/reference.go @@ -37,9 +37,18 @@ type Type interface { type Val interface { // ConvertToNative converts the Value to a native Go struct according to the // reflected type description, or error if the conversion is not feasible. + // + // The ConvertToNative method is intended to be used to support conversion between CEL types + // and native types during object creation expressions or by clients who need to adapt the, + // returned CEL value into an equivalent Go value instance. + // + // When implementing or using ConvertToNative, the following guidelines apply: + // - Use ConvertToNative when marshalling CEL evaluation results to native types. + // - Do not use ConvertToNative within CEL extension functions. + // - Document whether your implementation supports non-CEL field types, such as Go or Protobuf. ConvertToNative(typeDesc reflect.Type) (any, error) - // ConvertToType supports type conversions between value types supported by the expression language. + // ConvertToType supports type conversions between CEL value types supported by the expression language. ConvertToType(typeValue Type) Val // Equal returns true if the `other` value has the same type and content as the implementing struct. diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/ext/BUILD.bazel b/cluster-autoscaler/vendor/github.com/google/cel-go/ext/BUILD.bazel index 2f1003aba88a..4bcf8a283eaa 100644 --- a/cluster-autoscaler/vendor/github.com/google/cel-go/ext/BUILD.bazel +++ b/cluster-autoscaler/vendor/github.com/google/cel-go/ext/BUILD.bazel @@ -12,6 +12,7 @@ go_library( "math.go", "native.go", "protos.go", + "sets.go", "strings.go", ], importpath = "github.com/google/cel-go/ext", @@ -26,7 +27,7 @@ go_library( "//common/types/ref:go_default_library", "//common/types/traits:go_default_library", "//interpreter:go_default_library", - "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library", + "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", "@org_golang_google_protobuf//proto:go_default_library", "@org_golang_google_protobuf//reflect/protoreflect:go_default_library", "@org_golang_google_protobuf//types/known/structpb", @@ -43,6 +44,7 @@ go_test( "math_test.go", "native_test.go", "protos_test.go", + "sets_test.go", "strings_test.go", ], embed = [ @@ -58,7 +60,7 @@ go_test( "//test:go_default_library", "//test/proto2pb:go_default_library", "//test/proto3pb:go_default_library", - "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library", + "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", "@org_golang_google_protobuf//proto:go_default_library", "@org_golang_google_protobuf//types/known/wrapperspb:go_default_library", "@org_golang_google_protobuf//encoding/protojson:go_default_library", diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/ext/README.md b/cluster-autoscaler/vendor/github.com/google/cel-go/ext/README.md index c4faf59ab1d5..ef0eb2ab7f53 100644 --- a/cluster-autoscaler/vendor/github.com/google/cel-go/ext/README.md +++ b/cluster-autoscaler/vendor/github.com/google/cel-go/ext/README.md @@ -3,6 +3,30 @@ CEL extensions are a related set of constants, functions, macros, or other features which may not be covered by the core CEL spec. +## Bindings + +Returns a cel.EnvOption to configure support for local variable bindings +in expressions. + +# Cel.Bind + +Binds a simple identifier to an initialization expression which may be used +in a subsequenct result expression. Bindings may also be nested within each +other. + + cel.bind(, , ) + +Examples: + + cel.bind(a, 'hello', + cel.bind(b, 'world', a + b + b + a)) // "helloworldworldhello" + + // Avoid a list allocation within the exists comprehension. + cel.bind(valid_values, [a, b, c], + [d, e, f].exists(elem, elem in valid_values)) + +Local bindings are not guaranteed to be evaluated before use. + ## Encoders Encoding utilies for marshalling data into standardized representations. @@ -33,8 +57,7 @@ Example: ## Math -Math returns a cel.EnvOption to configure namespaced math helper macros and -functions. +Math helper macros and functions. Note, all macros use the 'math' namespace; however, at the time of macro expansion the namespace looks just like any other identifier. If you are @@ -96,8 +119,7 @@ Examples: ## Protos -Protos returns a cel.EnvOption to configure extended macros and functions for -proto manipulation. +Protos configure extended macros and functions for proto manipulation. Note, all macros use the 'proto' namespace; however, at the time of macro expansion the namespace looks just like any other identifier. If you are @@ -127,6 +149,62 @@ Example: proto.hasExt(msg, google.expr.proto2.test.int32_ext) // returns true || false +## Sets + +Sets provides set relationship tests. + +There is no set type within CEL, and while one may be introduced in the +future, there are cases where a `list` type is known to behave like a set. +For such cases, this library provides some basic functionality for +determining set containment, equivalence, and intersection. + +### Sets.Contains + +Returns whether the first list argument contains all elements in the second +list argument. The list may contain elements of any type and standard CEL +equality is used to determine whether a value exists in both lists. If the +second list is empty, the result will always return true. + + sets.contains(list(T), list(T)) -> bool + +Examples: + + sets.contains([], []) // true + sets.contains([], [1]) // false + sets.contains([1, 2, 3, 4], [2, 3]) // true + sets.contains([1, 2.0, 3u], [1.0, 2u, 3]) // true + +### Sets.Equivalent + +Returns whether the first and second list are set equivalent. Lists are set +equivalent if for every item in the first list, there is an element in the +second which is equal. The lists may not be of the same size as they do not +guarantee the elements within them are unique, so size does not factor into +the computation. + + sets.equivalent(list(T), list(T)) -> bool + +Examples: + + sets.equivalent([], []) // true + sets.equivalent([1], [1, 1]) // true + sets.equivalent([1], [1u, 1.0]) // true + sets.equivalent([1, 2, 3], [3u, 2.0, 1]) // true + +### Sets.Intersects + +Returns whether the first list has at least one element whose value is equal +to an element in the second list. If either list is empty, the result will +be false. + + sets.intersects(list(T), list(T)) -> bool + +Examples: + + sets.intersects([1], []) // false + sets.intersects([1], [1, 2]) // true + sets.intersects([[1], [2, 3]], [[1, 2], [2, 3.0]]) // true + ## Strings Extended functions for string manipulation. As a general note, all indices are diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/ext/bindings.go b/cluster-autoscaler/vendor/github.com/google/cel-go/ext/bindings.go new file mode 100644 index 000000000000..9cc3c3efe585 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/google/cel-go/ext/bindings.go @@ -0,0 +1,100 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ext + +import ( + "github.com/google/cel-go/cel" + "github.com/google/cel-go/common" + + exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" +) + +// Bindings returns a cel.EnvOption to configure support for local variable +// bindings in expressions. +// +// # Cel.Bind +// +// Binds a simple identifier to an initialization expression which may be used +// in a subsequenct result expression. Bindings may also be nested within each +// other. +// +// cel.bind(, , ) +// +// Examples: +// +// cel.bind(a, 'hello', +// cel.bind(b, 'world', a + b + b + a)) // "helloworldworldhello" +// +// // Avoid a list allocation within the exists comprehension. +// cel.bind(valid_values, [a, b, c], +// [d, e, f].exists(elem, elem in valid_values)) +// +// Local bindings are not guaranteed to be evaluated before use. +func Bindings() cel.EnvOption { + return cel.Lib(celBindings{}) +} + +const ( + celNamespace = "cel" + bindMacro = "bind" + unusedIterVar = "#unused" +) + +type celBindings struct{} + +func (celBindings) LibraryName() string { + return "cel.lib.ext.cel.bindings" +} + +func (celBindings) CompileOptions() []cel.EnvOption { + return []cel.EnvOption{ + cel.Macros( + // cel.bind(var, , ) + cel.NewReceiverMacro(bindMacro, 3, celBind), + ), + } +} + +func (celBindings) ProgramOptions() []cel.ProgramOption { + return []cel.ProgramOption{} +} + +func celBind(meh cel.MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) { + if !macroTargetMatchesNamespace(celNamespace, target) { + return nil, nil + } + varIdent := args[0] + varName := "" + switch varIdent.GetExprKind().(type) { + case *exprpb.Expr_IdentExpr: + varName = varIdent.GetIdentExpr().GetName() + default: + return nil, &common.Error{ + Message: "cel.bind() variable names must be simple identifers", + Location: meh.OffsetLocation(varIdent.GetId()), + } + } + varInit := args[1] + resultExpr := args[2] + return meh.Fold( + unusedIterVar, + meh.NewList(), + varName, + varInit, + meh.LiteralBool(false), + meh.Ident(varName), + resultExpr, + ), nil +} diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/ext/math.go b/cluster-autoscaler/vendor/github.com/google/cel-go/ext/math.go index 79b0c8bf948d..1c8ad585a176 100644 --- a/cluster-autoscaler/vendor/github.com/google/cel-go/ext/math.go +++ b/cluster-autoscaler/vendor/github.com/google/cel-go/ext/math.go @@ -91,7 +91,7 @@ func Math() cel.EnvOption { return cel.Lib(mathLib{}) } -var ( +const ( mathNamespace = "math" leastMacro = "least" greatestMacro = "greatest" diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/ext/sets.go b/cluster-autoscaler/vendor/github.com/google/cel-go/ext/sets.go new file mode 100644 index 000000000000..4820d6199e66 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/google/cel-go/ext/sets.go @@ -0,0 +1,138 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ext + +import ( + "github.com/google/cel-go/cel" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/common/types/traits" +) + +// Sets returns a cel.EnvOption to configure namespaced set relationship +// functions. +// +// There is no set type within CEL, and while one may be introduced in the +// future, there are cases where a `list` type is known to behave like a set. +// For such cases, this library provides some basic functionality for +// determining set containment, equivalence, and intersection. +// +// # Sets.Contains +// +// Returns whether the first list argument contains all elements in the second +// list argument. The list may contain elements of any type and standard CEL +// equality is used to determine whether a value exists in both lists. If the +// second list is empty, the result will always return true. +// +// sets.contains(list(T), list(T)) -> bool +// +// Examples: +// +// sets.contains([], []) // true +// sets.contains([], [1]) // false +// sets.contains([1, 2, 3, 4], [2, 3]) // true +// sets.contains([1, 2.0, 3u], [1.0, 2u, 3]) // true +// +// # Sets.Equivalent +// +// Returns whether the first and second list are set equivalent. Lists are set +// equivalent if for every item in the first list, there is an element in the +// second which is equal. The lists may not be of the same size as they do not +// guarantee the elements within them are unique, so size does not factor into +// the computation. +// +// Examples: +// +// sets.equivalent([], []) // true +// sets.equivalent([1], [1, 1]) // true +// sets.equivalent([1], [1u, 1.0]) // true +// sets.equivalent([1, 2, 3], [3u, 2.0, 1]) // true +// +// # Sets.Intersects +// +// Returns whether the first list has at least one element whose value is equal +// to an element in the second list. If either list is empty, the result will +// be false. +// +// Examples: +// +// sets.intersects([1], []) // false +// sets.intersects([1], [1, 2]) // true +// sets.intersects([[1], [2, 3]], [[1, 2], [2, 3.0]]) // true +func Sets() cel.EnvOption { + return cel.Lib(setsLib{}) +} + +type setsLib struct{} + +// LibraryName implements the SingletonLibrary interface method. +func (setsLib) LibraryName() string { + return "cel.lib.ext.sets" +} + +// CompileOptions implements the Library interface method. +func (setsLib) CompileOptions() []cel.EnvOption { + listType := cel.ListType(cel.TypeParamType("T")) + return []cel.EnvOption{ + cel.Function("sets.contains", + cel.Overload("list_sets_contains_list", []*cel.Type{listType, listType}, cel.BoolType, + cel.BinaryBinding(setsContains))), + cel.Function("sets.equivalent", + cel.Overload("list_sets_equivalent_list", []*cel.Type{listType, listType}, cel.BoolType, + cel.BinaryBinding(setsEquivalent))), + cel.Function("sets.intersects", + cel.Overload("list_sets_intersects_list", []*cel.Type{listType, listType}, cel.BoolType, + cel.BinaryBinding(setsIntersects))), + } +} + +// ProgramOptions implements the Library interface method. +func (setsLib) ProgramOptions() []cel.ProgramOption { + return []cel.ProgramOption{} +} + +func setsIntersects(listA, listB ref.Val) ref.Val { + lA := listA.(traits.Lister) + lB := listB.(traits.Lister) + it := lA.Iterator() + for it.HasNext() == types.True { + exists := lB.Contains(it.Next()) + if exists == types.True { + return types.True + } + } + return types.False +} + +func setsContains(list, sublist ref.Val) ref.Val { + l := list.(traits.Lister) + sub := sublist.(traits.Lister) + it := sub.Iterator() + for it.HasNext() == types.True { + exists := l.Contains(it.Next()) + if exists != types.True { + return exists + } + } + return types.True +} + +func setsEquivalent(listA, listB ref.Val) ref.Val { + aContainsB := setsContains(listA, listB) + if aContainsB != types.True { + return aContainsB + } + return setsContains(listB, listA) +} diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/ext/strings.go b/cluster-autoscaler/vendor/github.com/google/cel-go/ext/strings.go index 2962eb663071..8455d5829093 100644 --- a/cluster-autoscaler/vendor/github.com/google/cel-go/ext/strings.go +++ b/cluster-autoscaler/vendor/github.com/google/cel-go/ext/strings.go @@ -81,7 +81,7 @@ const ( // `%X` - same as above, but with A-F capitalized. // `%o` - substitutes an integer with its equivalent in octal. // -// .format()` -> +// .format() -> // // Examples: // @@ -431,30 +431,12 @@ func (sl *stringLib) CompileOptions() []cel.EnvOption { s := str.(types.String) return stringOrError(upperASCII(string(s))) }))), - cel.Function("join", - cel.MemberOverload("list_join", []*cel.Type{cel.ListType(cel.StringType)}, cel.StringType, - cel.UnaryBinding(func(list ref.Val) ref.Val { - l, err := list.ConvertToNative(stringListType) - if err != nil { - return types.NewErr(err.Error()) - } - return stringOrError(join(l.([]string))) - })), - cel.MemberOverload("list_join_string", []*cel.Type{cel.ListType(cel.StringType), cel.StringType}, cel.StringType, - cel.BinaryBinding(func(list, delim ref.Val) ref.Val { - l, err := list.ConvertToNative(stringListType) - if err != nil { - return types.NewErr(err.Error()) - } - d := delim.(types.String) - return stringOrError(joinSeparator(l.([]string), string(d))) - }))), } if sl.version >= 1 { opts = append(opts, cel.Function("format", cel.MemberOverload("string_format", []*cel.Type{cel.StringType, cel.ListType(cel.DynType)}, cel.StringType, cel.FunctionBinding(func(args ...ref.Val) ref.Val { - s := args[0].(types.String).Value().(string) + s := string(args[0].(types.String)) formatArgs := args[1].(traits.Lister) return stringOrError(interpreter.ParseFormatString(s, &stringFormatter{}, &stringArgList{formatArgs}, formatLocale)) }))), @@ -465,6 +447,43 @@ func (sl *stringLib) CompileOptions() []cel.EnvOption { })))) } + if sl.version >= 2 { + opts = append(opts, + cel.Function("join", + cel.MemberOverload("list_join", []*cel.Type{cel.ListType(cel.StringType)}, cel.StringType, + cel.UnaryBinding(func(list ref.Val) ref.Val { + l := list.(traits.Lister) + return stringOrError(joinValSeparator(l, "")) + })), + cel.MemberOverload("list_join_string", []*cel.Type{cel.ListType(cel.StringType), cel.StringType}, cel.StringType, + cel.BinaryBinding(func(list, delim ref.Val) ref.Val { + l := list.(traits.Lister) + d := delim.(types.String) + return stringOrError(joinValSeparator(l, string(d))) + }))), + ) + } else { + opts = append(opts, + cel.Function("join", + cel.MemberOverload("list_join", []*cel.Type{cel.ListType(cel.StringType)}, cel.StringType, + cel.UnaryBinding(func(list ref.Val) ref.Val { + l, err := list.ConvertToNative(stringListType) + if err != nil { + return types.NewErr(err.Error()) + } + return stringOrError(join(l.([]string))) + })), + cel.MemberOverload("list_join_string", []*cel.Type{cel.ListType(cel.StringType), cel.StringType}, cel.StringType, + cel.BinaryBinding(func(list, delim ref.Val) ref.Val { + l, err := list.ConvertToNative(stringListType) + if err != nil { + return types.NewErr(err.Error()) + } + d := delim.(types.String) + return stringOrError(joinSeparator(l.([]string), string(d))) + }))), + ) + } return opts } @@ -623,6 +642,23 @@ func join(strs []string) (string, error) { return strings.Join(strs, ""), nil } +func joinValSeparator(strs traits.Lister, separator string) (string, error) { + sz := strs.Size().(types.Int) + var sb strings.Builder + for i := types.Int(0); i < sz; i++ { + if i != 0 { + sb.WriteString(separator) + } + elem := strs.Get(i) + str, ok := elem.(types.String) + if !ok { + return "", fmt.Errorf("join: invalid input: %v", elem) + } + sb.WriteString(string(str)) + } + return sb.String(), nil +} + type clauseImpl func(ref.Val, string) (string, error) func clauseForType(argType ref.Type) (clauseImpl, error) { diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/interpreter/BUILD.bazel b/cluster-autoscaler/vendor/github.com/google/cel-go/interpreter/BUILD.bazel index c6a620656bf6..b6d04e000317 100644 --- a/cluster-autoscaler/vendor/github.com/google/cel-go/interpreter/BUILD.bazel +++ b/cluster-autoscaler/vendor/github.com/google/cel-go/interpreter/BUILD.bazel @@ -32,7 +32,7 @@ go_library( "//common/types/ref:go_default_library", "//common/types/traits:go_default_library", "//interpreter/functions:go_default_library", - "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library", + "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", "@org_golang_google_protobuf//proto:go_default_library", "@org_golang_google_protobuf//types/known/durationpb:go_default_library", "@org_golang_google_protobuf//types/known/structpb:go_default_library", @@ -49,6 +49,7 @@ go_test( "attributes_test.go", "interpreter_test.go", "prune_test.go", + "runtimecost_test.go", ], embed = [ ":go_default_library", @@ -65,7 +66,7 @@ go_test( "//test:go_default_library", "//test/proto2pb:go_default_library", "//test/proto3pb:go_default_library", - "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library", + "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", "@org_golang_google_protobuf//proto:go_default_library", "@org_golang_google_protobuf//types/known/anypb:go_default_library", ], diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/interpreter/attributes.go b/cluster-autoscaler/vendor/github.com/google/cel-go/interpreter/attributes.go index 5c8107ab7cbe..1b19dc2b57b8 100644 --- a/cluster-autoscaler/vendor/github.com/google/cel-go/interpreter/attributes.go +++ b/cluster-autoscaler/vendor/github.com/google/cel-go/interpreter/attributes.go @@ -16,6 +16,7 @@ package interpreter import ( "fmt" + "strings" "github.com/google/cel-go/common/containers" "github.com/google/cel-go/common/types" @@ -231,7 +232,11 @@ type absoluteAttribute struct { // ID implements the Attribute interface method. func (a *absoluteAttribute) ID() int64 { - return a.id + qualCount := len(a.qualifiers) + if qualCount == 0 { + return a.id + } + return a.qualifiers[qualCount-1].ID() } // IsOptional returns trivially false for an attribute as the attribute represents a fully @@ -289,7 +294,11 @@ func (a *absoluteAttribute) Resolve(vars Activation) (any, error) { return nil, err } if isOpt { - return types.OptionalOf(a.adapter.NativeToValue(obj)), nil + val := a.adapter.NativeToValue(obj) + if types.IsUnknown(val) { + return val, nil + } + return types.OptionalOf(val), nil } return obj, nil } @@ -301,7 +310,14 @@ func (a *absoluteAttribute) Resolve(vars Activation) (any, error) { } } } - return nil, missingAttribute(a.String()) + var attrNames strings.Builder + for i, nm := range a.namespaceNames { + if i != 0 { + attrNames.WriteString(", ") + } + attrNames.WriteString(nm) + } + return nil, missingAttribute(attrNames.String()) } type conditionalAttribute struct { @@ -315,6 +331,11 @@ type conditionalAttribute struct { // ID is an implementation of the Attribute interface method. func (a *conditionalAttribute) ID() int64 { + // There's a field access after the conditional. + if a.truthy.ID() == a.falsy.ID() { + return a.truthy.ID() + } + // Otherwise return the conditional id as the consistent id being tracked. return a.id } @@ -379,7 +400,7 @@ type maybeAttribute struct { // ID is an implementation of the Attribute interface method. func (a *maybeAttribute) ID() int64 { - return a.id + return a.attrs[0].ID() } // IsOptional returns trivially false for an attribute as the attribute represents a fully @@ -436,7 +457,9 @@ func (a *maybeAttribute) AddQualifier(qual Qualifier) (Attribute, error) { } } // Next, ensure the most specific variable / type reference is searched first. - a.attrs = append([]NamespacedAttribute{a.fac.AbsoluteAttribute(qual.ID(), augmentedNames...)}, a.attrs...) + if len(augmentedNames) != 0 { + a.attrs = append([]NamespacedAttribute{a.fac.AbsoluteAttribute(qual.ID(), augmentedNames...)}, a.attrs...) + } return a, nil } @@ -494,7 +517,11 @@ type relativeAttribute struct { // ID is an implementation of the Attribute interface method. func (a *relativeAttribute) ID() int64 { - return a.id + qualCount := len(a.qualifiers) + if qualCount == 0 { + return a.id + } + return a.qualifiers[qualCount-1].ID() } // IsOptional returns trivially false for an attribute as the attribute represents a fully @@ -535,7 +562,11 @@ func (a *relativeAttribute) Resolve(vars Activation) (any, error) { return nil, err } if isOpt { - return types.OptionalOf(a.adapter.NativeToValue(obj)), nil + val := a.adapter.NativeToValue(obj) + if types.IsUnknown(val) { + return val, nil + } + return types.OptionalOf(val), nil } return obj, nil } @@ -1148,6 +1179,9 @@ func applyQualifiers(vars Activation, obj any, qualifiers []Qualifier) (any, boo return nil, false, err } if !present { + // We return optional none here with a presence of 'false' as the layers + // above will attempt to call types.OptionalOf() on a present value if any + // of the qualifiers is optional. return types.OptionalNone, false, nil } } else { @@ -1200,6 +1234,8 @@ func refQualify(adapter ref.TypeAdapter, obj any, idx ref.Val, presenceTest, pre return nil, false, v case traits.Mapper: val, found := v.Find(idx) + // If the index is of the wrong type for the map, then it is possible + // for the Find call to produce an error. if types.IsError(val) { return nil, false, val.(*types.Err) } @@ -1211,6 +1247,8 @@ func refQualify(adapter ref.TypeAdapter, obj any, idx ref.Val, presenceTest, pre } return nil, false, missingKey(idx) case traits.Lister: + // If the index argument is not a valid numeric type, then it is possible + // for the index operation to produce an error. i, err := types.IndexOrError(idx) if err != nil { return nil, false, err @@ -1231,6 +1269,8 @@ func refQualify(adapter ref.TypeAdapter, obj any, idx ref.Val, presenceTest, pre if types.IsError(presence) { return nil, false, presence.(*types.Err) } + // If not found or presence only test, then return. + // Otherwise, if found, obtain the value later on. if presenceOnly || presence == types.False { return nil, presence == types.True, nil } @@ -1288,7 +1328,7 @@ func (e *resolutionError) Error() string { return fmt.Sprintf("index out of bounds: %v", e.missingIndex) } if e.missingAttribute != "" { - return fmt.Sprintf("no such attribute: %s", e.missingAttribute) + return fmt.Sprintf("no such attribute(s): %s", e.missingAttribute) } return "invalid attribute" } @@ -1297,17 +1337,3 @@ func (e *resolutionError) Error() string { func (e *resolutionError) Is(err error) bool { return err.Error() == e.Error() } - -func findMin(x, y int64) int64 { - if x < y { - return x - } - return y -} - -func findMax(x, y int64) int64 { - if x > y { - return x - } - return y -} diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/interpreter/interpretable.go b/cluster-autoscaler/vendor/github.com/google/cel-go/interpreter/interpretable.go index 840779175e31..32e2bcb7deaa 100644 --- a/cluster-autoscaler/vendor/github.com/google/cel-go/interpreter/interpretable.go +++ b/cluster-autoscaler/vendor/github.com/google/cel-go/interpreter/interpretable.go @@ -15,6 +15,8 @@ package interpreter import ( + "fmt" + "github.com/google/cel-go/common/operators" "github.com/google/cel-go/common/overloads" "github.com/google/cel-go/common/types" @@ -69,6 +71,7 @@ type InterpretableAttribute interface { // to whether the qualifier is present. QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) + // IsOptional indicates whether the resulting value is an optional type. IsOptional() bool // Resolve returns the value of the Attribute given the current Activation. @@ -108,10 +111,8 @@ type InterpretableConstructor interface { // Core Interpretable implementations used during the program planning phase. type evalTestOnly struct { - id int64 - attr InterpretableAttribute - qual Qualifier - field types.String + id int64 + InterpretableAttribute } // ID implements the Interpretable interface method. @@ -121,28 +122,55 @@ func (test *evalTestOnly) ID() int64 { // Eval implements the Interpretable interface method. func (test *evalTestOnly) Eval(ctx Activation) ref.Val { - val, err := test.attr.Resolve(ctx) + val, err := test.Resolve(ctx) + // Return an error if the resolve step fails if err != nil { - return types.NewErr(err.Error()) + return types.WrapErr(err) } - optVal, isOpt := val.(*types.Optional) - if isOpt { - if !optVal.HasValue() { - return types.False - } - val = optVal.GetValue() + if optVal, isOpt := val.(*types.Optional); isOpt { + return types.Bool(optVal.HasValue()) } - out, found, err := test.qual.QualifyIfPresent(ctx, val, true) + return test.Adapter().NativeToValue(val) +} + +// AddQualifier appends a qualifier that will always and only perform a presence test. +func (test *evalTestOnly) AddQualifier(q Qualifier) (Attribute, error) { + cq, ok := q.(ConstantQualifier) + if !ok { + return nil, fmt.Errorf("test only expressions must have constant qualifiers: %v", q) + } + return test.InterpretableAttribute.AddQualifier(&testOnlyQualifier{ConstantQualifier: cq}) +} + +type testOnlyQualifier struct { + ConstantQualifier +} + +// Qualify determines whether the test-only qualifier is present on the input object. +func (q *testOnlyQualifier) Qualify(vars Activation, obj any) (any, error) { + out, present, err := q.ConstantQualifier.QualifyIfPresent(vars, obj, true) if err != nil { - return types.NewErr(err.Error()) + return nil, err } if unk, isUnk := out.(types.Unknown); isUnk { - return unk + return unk, nil } - if found { - return types.True + if opt, isOpt := out.(types.Optional); isOpt { + return opt.HasValue(), nil } - return types.False + return present, nil +} + +// QualifyIfPresent returns whether the target field in the test-only expression is present. +func (q *testOnlyQualifier) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) { + // Only ever test for presence. + return q.ConstantQualifier.QualifyIfPresent(vars, obj, true) +} + +// QualifierValueEquals determines whether the test-only constant qualifier equals the input value. +func (q *testOnlyQualifier) QualifierValueEquals(value any) bool { + // The input qualifier will always be of type string + return q.ConstantQualifier.Value().Value() == value } // NewConstValue creates a new constant valued Interpretable. @@ -802,6 +830,9 @@ func (e *evalSetMembership) ID() int64 { // Eval implements the Interpretable interface method. func (e *evalSetMembership) Eval(ctx Activation) ref.Val { val := e.arg.Eval(ctx) + if types.IsUnknownOrError(val) { + return val + } if ret, found := e.valueSet[val]; found { return ret } @@ -872,7 +903,7 @@ func (e *evalWatchConstQual) Qualify(vars Activation, obj any) (any, error) { out, err := e.ConstantQualifier.Qualify(vars, obj) var val ref.Val if err != nil { - val = types.NewErr(err.Error()) + val = types.WrapErr(err) } else { val = e.adapter.NativeToValue(out) } @@ -880,6 +911,23 @@ func (e *evalWatchConstQual) Qualify(vars Activation, obj any) (any, error) { return out, err } +// QualifyIfPresent conditionally qualifies the variable and only records a value if one is present. +func (e *evalWatchConstQual) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) { + out, present, err := e.ConstantQualifier.QualifyIfPresent(vars, obj, presenceOnly) + var val ref.Val + if err != nil { + val = types.WrapErr(err) + } else if out != nil { + val = e.adapter.NativeToValue(out) + } else if presenceOnly { + val = types.Bool(present) + } + if present || presenceOnly { + e.observer(e.ID(), e.ConstantQualifier, val) + } + return out, present, err +} + // QualifierValueEquals tests whether the incoming value is equal to the qualifying constant. func (e *evalWatchConstQual) QualifierValueEquals(value any) bool { qve, ok := e.ConstantQualifier.(qualifierValueEquator) @@ -898,7 +946,7 @@ func (e *evalWatchQual) Qualify(vars Activation, obj any) (any, error) { out, err := e.Qualifier.Qualify(vars, obj) var val ref.Val if err != nil { - val = types.NewErr(err.Error()) + val = types.WrapErr(err) } else { val = e.adapter.NativeToValue(out) } @@ -906,6 +954,23 @@ func (e *evalWatchQual) Qualify(vars Activation, obj any) (any, error) { return out, err } +// QualifyIfPresent conditionally qualifies the variable and only records a value if one is present. +func (e *evalWatchQual) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) { + out, present, err := e.Qualifier.QualifyIfPresent(vars, obj, presenceOnly) + var val ref.Val + if err != nil { + val = types.WrapErr(err) + } else if out != nil { + val = e.adapter.NativeToValue(out) + } else if presenceOnly { + val = types.Bool(present) + } + if present || presenceOnly { + e.observer(e.ID(), e.Qualifier, val) + } + return out, present, err +} + // evalWatchConst describes a watcher of an instConst Interpretable. type evalWatchConst struct { InterpretableConst @@ -1025,12 +1090,12 @@ func (cond *evalExhaustiveConditional) Eval(ctx Activation) ref.Val { } if cBool { if tErr != nil { - return types.NewErr(tErr.Error()) + return types.WrapErr(tErr) } return cond.adapter.NativeToValue(tVal) } if fErr != nil { - return types.NewErr(fErr.Error()) + return types.WrapErr(fErr) } return cond.adapter.NativeToValue(fVal) } @@ -1042,6 +1107,8 @@ type evalAttr struct { optional bool } +var _ InterpretableAttribute = &evalAttr{} + // ID of the attribute instruction. func (a *evalAttr) ID() int64 { return a.attr.ID() @@ -1068,7 +1135,7 @@ func (a *evalAttr) Adapter() ref.TypeAdapter { func (a *evalAttr) Eval(ctx Activation) ref.Val { v, err := a.attr.Resolve(ctx) if err != nil { - return types.NewErr(err.Error()) + return types.WrapErr(err) } return a.adapter.NativeToValue(v) } diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/interpreter/planner.go b/cluster-autoscaler/vendor/github.com/google/cel-go/interpreter/planner.go index 9cf8e4e5c02f..0b65d0fa90d2 100644 --- a/cluster-autoscaler/vendor/github.com/google/cel-go/interpreter/planner.go +++ b/cluster-autoscaler/vendor/github.com/google/cel-go/interpreter/planner.go @@ -20,7 +20,6 @@ import ( "github.com/google/cel-go/common/containers" "github.com/google/cel-go/common/operators" - "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" "github.com/google/cel-go/interpreter/functions" @@ -217,18 +216,14 @@ func (p *planner) planSelect(expr *exprpb.Expr) (Interpretable, error) { if err != nil { return nil, err } - - // Return the test only eval expression. + // Modify the attribute to be test-only. if sel.GetTestOnly() { - return &evalTestOnly{ - id: expr.GetId(), - field: types.String(sel.GetField()), - attr: attr, - qual: qual, - }, nil + attr = &evalTestOnly{ + id: expr.GetId(), + InterpretableAttribute: attr, + } } - - // Otherwise, append the qualifier on the attribute. + // Append the qualifier on the attribute. _, err = attr.AddQualifier(qual) return attr, err } diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/interpreter/prune.go b/cluster-autoscaler/vendor/github.com/google/cel-go/interpreter/prune.go index b8b015a7a65b..d1b5d6bd6bf4 100644 --- a/cluster-autoscaler/vendor/github.com/google/cel-go/interpreter/prune.go +++ b/cluster-autoscaler/vendor/github.com/google/cel-go/interpreter/prune.go @@ -16,6 +16,7 @@ package interpreter import ( "github.com/google/cel-go/common/operators" + "github.com/google/cel-go/common/overloads" "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" "github.com/google/cel-go/common/types/traits" @@ -67,11 +68,16 @@ type astPruner struct { // fold(and thus cache results of) some external calls, then they can prepare // the overloads accordingly. func PruneAst(expr *exprpb.Expr, macroCalls map[int64]*exprpb.Expr, state EvalState) *exprpb.ParsedExpr { + pruneState := NewEvalState() + for _, id := range state.IDs() { + v, _ := state.Value(id) + pruneState.SetValue(id, v) + } pruner := &astPruner{ expr: expr, macroCalls: macroCalls, - state: state, - nextExprID: 1} + state: pruneState, + nextExprID: getMaxID(expr)} newExpr, _ := pruner.maybePrune(expr) return &exprpb.ParsedExpr{ Expr: newExpr, @@ -89,28 +95,50 @@ func (p *astPruner) createLiteral(id int64, val *exprpb.Constant) *exprpb.Expr { } func (p *astPruner) maybeCreateLiteral(id int64, val ref.Val) (*exprpb.Expr, bool) { - switch val.Type() { - case types.BoolType: + switch v := val.(type) { + case types.Bool: + p.state.SetValue(id, val) return p.createLiteral(id, - &exprpb.Constant{ConstantKind: &exprpb.Constant_BoolValue{BoolValue: val.Value().(bool)}}), true - case types.IntType: + &exprpb.Constant{ConstantKind: &exprpb.Constant_BoolValue{BoolValue: bool(v)}}), true + case types.Bytes: + p.state.SetValue(id, val) return p.createLiteral(id, - &exprpb.Constant{ConstantKind: &exprpb.Constant_Int64Value{Int64Value: val.Value().(int64)}}), true - case types.UintType: + &exprpb.Constant{ConstantKind: &exprpb.Constant_BytesValue{BytesValue: []byte(v)}}), true + case types.Double: + p.state.SetValue(id, val) return p.createLiteral(id, - &exprpb.Constant{ConstantKind: &exprpb.Constant_Uint64Value{Uint64Value: val.Value().(uint64)}}), true - case types.StringType: + &exprpb.Constant{ConstantKind: &exprpb.Constant_DoubleValue{DoubleValue: float64(v)}}), true + case types.Duration: + p.state.SetValue(id, val) + durationString := string(v.ConvertToType(types.StringType).(types.String)) + return &exprpb.Expr{ + Id: id, + ExprKind: &exprpb.Expr_CallExpr{ + CallExpr: &exprpb.Expr_Call{ + Function: overloads.TypeConvertDuration, + Args: []*exprpb.Expr{ + p.createLiteral(p.nextID(), + &exprpb.Constant{ConstantKind: &exprpb.Constant_StringValue{StringValue: durationString}}), + }, + }, + }, + }, true + case types.Int: + p.state.SetValue(id, val) return p.createLiteral(id, - &exprpb.Constant{ConstantKind: &exprpb.Constant_StringValue{StringValue: val.Value().(string)}}), true - case types.DoubleType: + &exprpb.Constant{ConstantKind: &exprpb.Constant_Int64Value{Int64Value: int64(v)}}), true + case types.Uint: + p.state.SetValue(id, val) return p.createLiteral(id, - &exprpb.Constant{ConstantKind: &exprpb.Constant_DoubleValue{DoubleValue: val.Value().(float64)}}), true - case types.BytesType: + &exprpb.Constant{ConstantKind: &exprpb.Constant_Uint64Value{Uint64Value: uint64(v)}}), true + case types.String: + p.state.SetValue(id, val) return p.createLiteral(id, - &exprpb.Constant{ConstantKind: &exprpb.Constant_BytesValue{BytesValue: val.Value().([]byte)}}), true - case types.NullType: + &exprpb.Constant{ConstantKind: &exprpb.Constant_StringValue{StringValue: string(v)}}), true + case types.Null: + p.state.SetValue(id, val) return p.createLiteral(id, - &exprpb.Constant{ConstantKind: &exprpb.Constant_NullValue{NullValue: val.Value().(structpb.NullValue)}}), true + &exprpb.Constant{ConstantKind: &exprpb.Constant_NullValue{NullValue: v.Value().(structpb.NullValue)}}), true } // Attempt to build a list literal. @@ -128,6 +156,7 @@ func (p *astPruner) maybeCreateLiteral(id int64, val ref.Val) (*exprpb.Expr, boo } elemExprs[i] = elemExpr } + p.state.SetValue(id, val) return &exprpb.Expr{ Id: id, ExprKind: &exprpb.Expr_ListExpr{ @@ -167,6 +196,7 @@ func (p *astPruner) maybeCreateLiteral(id int64, val ref.Val) (*exprpb.Expr, boo entries[i] = entry i++ } + p.state.SetValue(id, val) return &exprpb.Expr{ Id: id, ExprKind: &exprpb.Expr_StructExpr{ @@ -182,75 +212,144 @@ func (p *astPruner) maybeCreateLiteral(id int64, val ref.Val) (*exprpb.Expr, boo return nil, false } -func (p *astPruner) maybePruneAndOr(node *exprpb.Expr) (*exprpb.Expr, bool) { - if !p.existsWithUnknownValue(node.GetId()) { +func (p *astPruner) maybePruneOptional(elem *exprpb.Expr) (*exprpb.Expr, bool) { + elemVal, found := p.value(elem.GetId()) + if found && elemVal.Type() == types.OptionalType { + opt := elemVal.(*types.Optional) + if !opt.HasValue() { + return nil, true + } + if newElem, pruned := p.maybeCreateLiteral(elem.GetId(), opt.GetValue()); pruned { + return newElem, true + } + } + return elem, false +} + +func (p *astPruner) maybePruneIn(node *exprpb.Expr) (*exprpb.Expr, bool) { + // elem in list + call := node.GetCallExpr() + val, exists := p.maybeValue(call.GetArgs()[1].GetId()) + if !exists { return nil, false } + if sz, ok := val.(traits.Sizer); ok && sz.Size() == types.IntZero { + return p.maybeCreateLiteral(node.GetId(), types.False) + } + return nil, false +} + +func (p *astPruner) maybePruneLogicalNot(node *exprpb.Expr) (*exprpb.Expr, bool) { + call := node.GetCallExpr() + arg := call.GetArgs()[0] + val, exists := p.maybeValue(arg.GetId()) + if !exists { + return nil, false + } + if b, ok := val.(types.Bool); ok { + return p.maybeCreateLiteral(node.GetId(), !b) + } + return nil, false +} +func (p *astPruner) maybePruneOr(node *exprpb.Expr) (*exprpb.Expr, bool) { call := node.GetCallExpr() // We know result is unknown, so we have at least one unknown arg // and if one side is a known value, we know we can ignore it. - if p.existsWithKnownValue(call.Args[0].GetId()) { - return call.Args[1], true + if v, exists := p.maybeValue(call.GetArgs()[0].GetId()); exists { + if v == types.True { + return p.maybeCreateLiteral(node.GetId(), types.True) + } + return call.GetArgs()[1], true } - if p.existsWithKnownValue(call.Args[1].GetId()) { - return call.Args[0], true + if v, exists := p.maybeValue(call.GetArgs()[1].GetId()); exists { + if v == types.True { + return p.maybeCreateLiteral(node.GetId(), types.True) + } + return call.GetArgs()[0], true } return nil, false } -func (p *astPruner) maybePruneConditional(node *exprpb.Expr) (*exprpb.Expr, bool) { - if !p.existsWithUnknownValue(node.GetId()) { - return nil, false +func (p *astPruner) maybePruneAnd(node *exprpb.Expr) (*exprpb.Expr, bool) { + call := node.GetCallExpr() + // We know result is unknown, so we have at least one unknown arg + // and if one side is a known value, we know we can ignore it. + if v, exists := p.maybeValue(call.GetArgs()[0].GetId()); exists { + if v == types.False { + return p.maybeCreateLiteral(node.GetId(), types.False) + } + return call.GetArgs()[1], true } + if v, exists := p.maybeValue(call.GetArgs()[1].GetId()); exists { + if v == types.False { + return p.maybeCreateLiteral(node.GetId(), types.False) + } + return call.GetArgs()[0], true + } + return nil, false +} +func (p *astPruner) maybePruneConditional(node *exprpb.Expr) (*exprpb.Expr, bool) { call := node.GetCallExpr() - condVal, condValueExists := p.value(call.Args[0].GetId()) - if !condValueExists || types.IsUnknownOrError(condVal) { + cond, exists := p.maybeValue(call.GetArgs()[0].GetId()) + if !exists { return nil, false } - - if condVal.Value().(bool) { - return call.Args[1], true + if cond.Value().(bool) { + return call.GetArgs()[1], true } - return call.Args[2], true + return call.GetArgs()[2], true } func (p *astPruner) maybePruneFunction(node *exprpb.Expr) (*exprpb.Expr, bool) { + if _, exists := p.value(node.GetId()); !exists { + return nil, false + } call := node.GetCallExpr() - if call.Function == operators.LogicalOr || call.Function == operators.LogicalAnd { - return p.maybePruneAndOr(node) + if call.Function == operators.LogicalOr { + return p.maybePruneOr(node) + } + if call.Function == operators.LogicalAnd { + return p.maybePruneAnd(node) } if call.Function == operators.Conditional { return p.maybePruneConditional(node) } - + if call.Function == operators.In { + return p.maybePruneIn(node) + } + if call.Function == operators.LogicalNot { + return p.maybePruneLogicalNot(node) + } return nil, false } func (p *astPruner) maybePrune(node *exprpb.Expr) (*exprpb.Expr, bool) { - out, pruned := p.prune(node) - if pruned { - delete(p.macroCalls, node.GetId()) - } - return out, pruned + return p.prune(node) } func (p *astPruner) prune(node *exprpb.Expr) (*exprpb.Expr, bool) { if node == nil { return node, false } - val, valueExists := p.value(node.GetId()) - if valueExists && !types.IsUnknownOrError(val) { + val, valueExists := p.maybeValue(node.GetId()) + if valueExists { if newNode, ok := p.maybeCreateLiteral(node.GetId(), val); ok { + delete(p.macroCalls, node.GetId()) return newNode, true } } + if macro, found := p.macroCalls[node.GetId()]; found { + // prune the expression in terms of the macro call instead of the expanded form. + if newMacro, pruned := p.prune(macro); pruned { + p.macroCalls[node.GetId()] = newMacro + } + } // We have either an unknown/error value, or something we don't want to // transform, or expression was not evaluated. If possible, drill down // more. - switch node.GetExprKind().(type) { case *exprpb.Expr_SelectExpr: if operand, pruned := p.maybePrune(node.GetSelectExpr().GetOperand()); pruned { @@ -266,10 +365,6 @@ func (p *astPruner) prune(node *exprpb.Expr) (*exprpb.Expr, bool) { }, true } case *exprpb.Expr_CallExpr: - if newExpr, pruned := p.maybePruneFunction(node); pruned { - newExpr, _ = p.maybePrune(newExpr) - return newExpr, true - } var prunedCall bool call := node.GetCallExpr() args := call.GetArgs() @@ -290,31 +385,66 @@ func (p *astPruner) prune(node *exprpb.Expr) (*exprpb.Expr, bool) { prunedCall = true newCall.Target = newTarget } + newNode := &exprpb.Expr{ + Id: node.GetId(), + ExprKind: &exprpb.Expr_CallExpr{ + CallExpr: newCall, + }, + } + if newExpr, pruned := p.maybePruneFunction(newNode); pruned { + newExpr, _ = p.maybePrune(newExpr) + return newExpr, true + } if prunedCall { - return &exprpb.Expr{ - Id: node.GetId(), - ExprKind: &exprpb.Expr_CallExpr{ - CallExpr: newCall, - }, - }, true + return newNode, true } case *exprpb.Expr_ListExpr: elems := node.GetListExpr().GetElements() - newElems := make([]*exprpb.Expr, len(elems)) + optIndices := node.GetListExpr().GetOptionalIndices() + optIndexMap := map[int32]bool{} + for _, i := range optIndices { + optIndexMap[i] = true + } + newOptIndexMap := make(map[int32]bool, len(optIndexMap)) + newElems := make([]*exprpb.Expr, 0, len(elems)) var prunedList bool + + prunedIdx := 0 for i, elem := range elems { - newElems[i] = elem + _, isOpt := optIndexMap[int32(i)] + if isOpt { + newElem, pruned := p.maybePruneOptional(elem) + if pruned { + prunedList = true + if newElem != nil { + newElems = append(newElems, newElem) + prunedIdx++ + } + continue + } + newOptIndexMap[int32(prunedIdx)] = true + } if newElem, prunedElem := p.maybePrune(elem); prunedElem { - newElems[i] = newElem + newElems = append(newElems, newElem) prunedList = true + } else { + newElems = append(newElems, elem) } + prunedIdx++ + } + optIndices = make([]int32, len(newOptIndexMap)) + idx := 0 + for i := range newOptIndexMap { + optIndices[idx] = i + idx++ } if prunedList { return &exprpb.Expr{ Id: node.GetId(), ExprKind: &exprpb.Expr_ListExpr{ ListExpr: &exprpb.Expr_CreateList{ - Elements: newElems, + Elements: newElems, + OptionalIndices: optIndices, }, }, }, true @@ -344,6 +474,7 @@ func (p *astPruner) prune(node *exprpb.Expr) (*exprpb.Expr, bool) { MapKey: newKey, } } + newEntry.OptionalEntry = entry.GetOptionalEntry() newEntries[i] = newEntry } if prunedStruct { @@ -357,27 +488,6 @@ func (p *astPruner) prune(node *exprpb.Expr) (*exprpb.Expr, bool) { }, }, true } - case *exprpb.Expr_ComprehensionExpr: - compre := node.GetComprehensionExpr() - // Only the range of the comprehension is pruned since the state tracking only records - // the last iteration of the comprehension and not each step in the evaluation which - // means that the any residuals computed in between might be inaccurate. - if newRange, pruned := p.maybePrune(compre.GetIterRange()); pruned { - return &exprpb.Expr{ - Id: node.GetId(), - ExprKind: &exprpb.Expr_ComprehensionExpr{ - ComprehensionExpr: &exprpb.Expr_Comprehension{ - IterVar: compre.GetIterVar(), - IterRange: newRange, - AccuVar: compre.GetAccuVar(), - AccuInit: compre.GetAccuInit(), - LoopCondition: compre.GetLoopCondition(), - LoopStep: compre.GetLoopStep(), - Result: compre.GetResult(), - }, - }, - }, true - } } return node, false } @@ -387,24 +497,82 @@ func (p *astPruner) value(id int64) (ref.Val, bool) { return val, (found && val != nil) } -func (p *astPruner) existsWithUnknownValue(id int64) bool { - val, valueExists := p.value(id) - return valueExists && types.IsUnknown(val) +func (p *astPruner) maybeValue(id int64) (ref.Val, bool) { + val, found := p.value(id) + if !found || types.IsUnknownOrError(val) { + return nil, false + } + return val, true } -func (p *astPruner) existsWithKnownValue(id int64) bool { - val, valueExists := p.value(id) - return valueExists && !types.IsUnknown(val) +func (p *astPruner) nextID() int64 { + next := p.nextExprID + p.nextExprID++ + return next } -func (p *astPruner) nextID() int64 { - for { - _, found := p.state.Value(p.nextExprID) - if !found { - next := p.nextExprID - p.nextExprID++ - return next +type astVisitor struct { + // visitEntry is called on every expr node, including those within a map/struct entry. + visitExpr func(expr *exprpb.Expr) + // visitEntry is called before entering the key, value of a map/struct entry. + visitEntry func(entry *exprpb.Expr_CreateStruct_Entry) +} + +func getMaxID(expr *exprpb.Expr) int64 { + maxID := int64(1) + visit(expr, maxIDVisitor(&maxID)) + return maxID +} + +func maxIDVisitor(maxID *int64) astVisitor { + return astVisitor{ + visitExpr: func(e *exprpb.Expr) { + if e.GetId() >= *maxID { + *maxID = e.GetId() + 1 + } + }, + visitEntry: func(e *exprpb.Expr_CreateStruct_Entry) { + if e.GetId() >= *maxID { + *maxID = e.GetId() + 1 + } + }, + } +} + +func visit(expr *exprpb.Expr, visitor astVisitor) { + exprs := []*exprpb.Expr{expr} + for len(exprs) != 0 { + e := exprs[0] + visitor.visitExpr(e) + exprs = exprs[1:] + switch e.GetExprKind().(type) { + case *exprpb.Expr_SelectExpr: + exprs = append(exprs, e.GetSelectExpr().GetOperand()) + case *exprpb.Expr_CallExpr: + call := e.GetCallExpr() + if call.GetTarget() != nil { + exprs = append(exprs, call.GetTarget()) + } + exprs = append(exprs, call.GetArgs()...) + case *exprpb.Expr_ComprehensionExpr: + compre := e.GetComprehensionExpr() + exprs = append(exprs, + compre.GetIterRange(), + compre.GetAccuInit(), + compre.GetLoopCondition(), + compre.GetLoopStep(), + compre.GetResult()) + case *exprpb.Expr_ListExpr: + list := e.GetListExpr() + exprs = append(exprs, list.GetElements()...) + case *exprpb.Expr_StructExpr: + for _, entry := range e.GetStructExpr().GetEntries() { + visitor.visitEntry(entry) + if entry.GetMapKey() != nil { + exprs = append(exprs, entry.GetMapKey()) + } + exprs = append(exprs, entry.GetValue()) + } } - p.nextExprID++ } } diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/interpreter/runtimecost.go b/cluster-autoscaler/vendor/github.com/google/cel-go/interpreter/runtimecost.go index e7daf011fc71..80e7f6134496 100644 --- a/cluster-autoscaler/vendor/github.com/google/cel-go/interpreter/runtimecost.go +++ b/cluster-autoscaler/vendor/github.com/google/cel-go/interpreter/runtimecost.go @@ -53,6 +53,11 @@ func CostObserver(tracker *CostTracker) EvalObserver { tracker.stack.drop(t.Attr().ID()) tracker.cost += common.SelectAndIdentCost } + if !tracker.presenceTestHasCost { + if _, isTestOnly := programStep.(*evalTestOnly); isTestOnly { + tracker.cost -= common.SelectAndIdentCost + } + } case *evalExhaustiveConditional: // Ternary has no direct cost. All cost is from the conditional and the true/false branch expressions. tracker.stack.drop(t.attr.falsy.ID(), t.attr.truthy.ID(), t.attr.expr.ID()) @@ -69,8 +74,6 @@ func CostObserver(tracker *CostTracker) EvalObserver { tracker.stack.drop(t.rhs.ID(), t.lhs.ID()) case *evalFold: tracker.stack.drop(t.iterRange.ID()) - case *evalTestOnly: - tracker.cost += common.SelectAndIdentCost case Qualifier: tracker.cost++ case InterpretableCall: @@ -97,21 +100,58 @@ func CostObserver(tracker *CostTracker) EvalObserver { return observer } -// CostTracker represents the information needed for tacking runtime cost +// CostTrackerOption configures the behavior of CostTracker objects. +type CostTrackerOption func(*CostTracker) error + +// CostTrackerLimit sets the runtime limit on the evaluation cost during execution and will terminate the expression +// evaluation if the limit is exceeded. +func CostTrackerLimit(limit uint64) CostTrackerOption { + return func(tracker *CostTracker) error { + tracker.Limit = &limit + return nil + } +} + +// PresenceTestHasCost determines whether presence testing has a cost of one or zero. +// Defaults to presence test has a cost of one. +func PresenceTestHasCost(hasCost bool) CostTrackerOption { + return func(tracker *CostTracker) error { + tracker.presenceTestHasCost = hasCost + return nil + } +} + +// NewCostTracker creates a new CostTracker with a given estimator and a set of functional CostTrackerOption values. +func NewCostTracker(estimator ActualCostEstimator, opts ...CostTrackerOption) (*CostTracker, error) { + tracker := &CostTracker{ + Estimator: estimator, + presenceTestHasCost: true, + } + for _, opt := range opts { + err := opt(tracker) + if err != nil { + return nil, err + } + } + return tracker, nil +} + +// CostTracker represents the information needed for tracking runtime cost. type CostTracker struct { - Estimator ActualCostEstimator - Limit *uint64 + Estimator ActualCostEstimator + Limit *uint64 + presenceTestHasCost bool cost uint64 stack refValStack } // ActualCost returns the runtime cost -func (c CostTracker) ActualCost() uint64 { +func (c *CostTracker) ActualCost() uint64 { return c.cost } -func (c CostTracker) costCall(call InterpretableCall, argValues []ref.Val, result ref.Val) uint64 { +func (c *CostTracker) costCall(call InterpretableCall, argValues []ref.Val, result ref.Val) uint64 { var cost uint64 if c.Estimator != nil { callCost := c.Estimator.CallCost(call.Function(), call.OverloadID(), argValues, result) @@ -181,7 +221,7 @@ func (c CostTracker) costCall(call InterpretableCall, argValues []ref.Val, resul } // actualSize returns the size of value -func (c CostTracker) actualSize(value ref.Val) uint64 { +func (c *CostTracker) actualSize(value ref.Val) uint64 { if sz, ok := value.(traits.Sizer); ok { return uint64(sz.Size().(types.Int)) } diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/parser/BUILD.bazel b/cluster-autoscaler/vendor/github.com/google/cel-go/parser/BUILD.bazel index b5c15fa570da..67ecc9554397 100644 --- a/cluster-autoscaler/vendor/github.com/google/cel-go/parser/BUILD.bazel +++ b/cluster-autoscaler/vendor/github.com/google/cel-go/parser/BUILD.bazel @@ -23,8 +23,8 @@ go_library( "//common/operators:go_default_library", "//common/runes:go_default_library", "//parser/gen:go_default_library", - "@com_github_antlr_antlr4_runtime_go_antlr//:go_default_library", - "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library", + "@com_github_antlr_antlr4_runtime_go_antlr_v4//:go_default_library", + "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", "@org_golang_google_protobuf//proto:go_default_library", "@org_golang_google_protobuf//types/known/structpb:go_default_library", ], @@ -46,7 +46,7 @@ go_test( "//common/debug:go_default_library", "//parser/gen:go_default_library", "//test:go_default_library", - "@com_github_antlr_antlr4_runtime_go_antlr//:go_default_library", + "@com_github_antlr_antlr4_runtime_go_antlr_v4//:go_default_library", "@org_golang_google_protobuf//proto:go_default_library", "@org_golang_google_protobuf//testing/protocmp:go_default_library", ], diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/parser/gen/BUILD.bazel b/cluster-autoscaler/vendor/github.com/google/cel-go/parser/gen/BUILD.bazel index 22711310ce0f..654d1de7aad3 100644 --- a/cluster-autoscaler/vendor/github.com/google/cel-go/parser/gen/BUILD.bazel +++ b/cluster-autoscaler/vendor/github.com/google/cel-go/parser/gen/BUILD.bazel @@ -21,6 +21,6 @@ go_library( ], importpath = "github.com/google/cel-go/parser/gen", deps = [ - "@com_github_antlr_antlr4_runtime_go_antlr//:go_default_library", + "@com_github_antlr_antlr4_runtime_go_antlr_v4//:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/parser/helper.go b/cluster-autoscaler/vendor/github.com/google/cel-go/parser/helper.go index 52d7fd8bbeb4..8f8f478ed12e 100644 --- a/cluster-autoscaler/vendor/github.com/google/cel-go/parser/helper.go +++ b/cluster-autoscaler/vendor/github.com/google/cel-go/parser/helper.go @@ -259,7 +259,8 @@ func (p *parserHelper) buildMacroCallArg(expr *exprpb.Expr) *exprpb.Expr { Id: expr.GetId(), ExprKind: &exprpb.Expr_ListExpr{ ListExpr: &exprpb.Expr_CreateList{ - Elements: macroListArgs, + Elements: macroListArgs, + OptionalIndices: listExpr.GetOptionalIndices(), }, }, } diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/parser/options.go b/cluster-autoscaler/vendor/github.com/google/cel-go/parser/options.go index 8bfdae55b919..674c697c5cd7 100644 --- a/cluster-autoscaler/vendor/github.com/google/cel-go/parser/options.go +++ b/cluster-autoscaler/vendor/github.com/google/cel-go/parser/options.go @@ -18,6 +18,7 @@ import "fmt" type options struct { maxRecursionDepth int + errorReportingLimit int errorRecoveryTokenLookaheadLimit int errorRecoveryLimit int expressionSizeCodePointLimit int @@ -46,7 +47,7 @@ func MaxRecursionDepth(limit int) Option { // successfully resume. In some pathological cases, the parser can look through quite a large set of input which // in turn generates a lot of back-tracking and performance degredation. // -// The limit must be > 1, and is recommended to be less than the default of 256. +// The limit must be >= 1, and is recommended to be less than the default of 256. func ErrorRecoveryLookaheadTokenLimit(limit int) Option { return func(opts *options) error { if limit < 1 { @@ -68,6 +69,19 @@ func ErrorRecoveryLimit(limit int) Option { } } +// ErrorReportingLimit limits the number of syntax error reports before terminating parsing. +// +// The limit must be at least 1. If unset, the limit will be 100. +func ErrorReportingLimit(limit int) Option { + return func(opts *options) error { + if limit < 1 { + return fmt.Errorf("error reporting limit must be at least 1: %d", limit) + } + opts.errorReportingLimit = limit + return nil + } +} + // ExpressionSizeCodePointLimit is an option which limits the maximum code point count of an // expression. func ExpressionSizeCodePointLimit(expressionSizeCodePointLimit int) Option { diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/parser/parser.go b/cluster-autoscaler/vendor/github.com/google/cel-go/parser/parser.go index da4481776f18..e6f70f9060e0 100644 --- a/cluster-autoscaler/vendor/github.com/google/cel-go/parser/parser.go +++ b/cluster-autoscaler/vendor/github.com/google/cel-go/parser/parser.go @@ -47,6 +47,9 @@ func NewParser(opts ...Option) (*Parser, error) { return nil, err } } + if p.errorReportingLimit == 0 { + p.errorReportingLimit = 100 + } if p.maxRecursionDepth == 0 { p.maxRecursionDepth = 250 } @@ -91,6 +94,7 @@ func (p *Parser) Parse(source common.Source) (*exprpb.ParsedExpr, *common.Errors helper: newParserHelper(source), macros: p.macros, maxRecursionDepth: p.maxRecursionDepth, + errorReportingLimit: p.errorReportingLimit, errorRecoveryLimit: p.errorRecoveryLimit, errorRecoveryLookaheadTokenLimit: p.errorRecoveryTokenLookaheadLimit, populateMacroCalls: p.populateMacroCalls, @@ -200,6 +204,16 @@ func (rl *recursionListener) ExitEveryRule(ctx antlr.ParserRuleContext) { var _ antlr.ParseTreeListener = &recursionListener{} +type tooManyErrors struct { + errorReportingLimit int +} + +func (t *tooManyErrors) Error() string { + return fmt.Sprintf("More than %d syntax errors", t.errorReportingLimit) +} + +var _ error = &tooManyErrors{} + type recoveryLimitError struct { message string } @@ -274,7 +288,9 @@ type parser struct { helper *parserHelper macros map[string]Macro recursionDepth int + errorReports int maxRecursionDepth int + errorReportingLimit int errorRecoveryLimit int errorRecoveryLookaheadTokenLimit int populateMacroCalls bool @@ -306,14 +322,14 @@ func (p *parser) parse(expr runes.Buffer, desc string) *exprpb.Expr { lexer := lexerPool.Get().(*gen.CELLexer) prsr := parserPool.Get().(*gen.CELParser) - // Unfortunately ANTLR Go runtime is missing (*antlr.BaseParser).RemoveParseListeners, so this is - // good enough until that is exported. prsrListener := &recursionListener{ maxDepth: p.maxRecursionDepth, ruleTypeDepth: map[int]*int{}, } defer func() { + // Unfortunately ANTLR Go runtime is missing (*antlr.BaseParser).RemoveParseListeners, + // so this is good enough until that is exported. // Reset the lexer and parser before putting them back in the pool. lexer.RemoveErrorListeners() prsr.RemoveParseListener(prsrListener) @@ -344,6 +360,8 @@ func (p *parser) parse(expr runes.Buffer, desc string) *exprpb.Expr { p.errors.ReportError(common.NoLocation, err.Error()) case *recursionError: p.errors.ReportError(common.NoLocation, err.Error()) + case *tooManyErrors: + // do nothing case *recoveryLimitError: // do nothing, listeners already notified and error reported. default: @@ -866,7 +884,6 @@ func (p *parser) reportError(ctx any, format string, args ...any) *exprpb.Expr { // ANTLR Parse listener implementations func (p *parser) SyntaxError(recognizer antlr.Recognizer, offendingSymbol any, line, column int, msg string, e antlr.RecognitionException) { - // TODO: Snippet l := p.helper.source.NewLocation(line, column) // Hack to keep existing error messages consistent with previous versions of CEL when a reserved word // is used as an identifier. This behavior needs to be overhauled to provide consistent, normalized error @@ -874,7 +891,16 @@ func (p *parser) SyntaxError(recognizer antlr.Recognizer, offendingSymbol any, l if strings.Contains(msg, "no viable alternative") { msg = reservedIdentifier.ReplaceAllString(msg, mismatchedReservedIdentifier) } - p.errors.syntaxError(l, msg) + // Ensure that no more than 100 syntax errors are reported as this will halt attempts to recover from a + // seriously broken expression. + if p.errorReports < p.errorReportingLimit { + p.errorReports++ + p.errors.syntaxError(l, msg) + } else { + tme := &tooManyErrors{errorReportingLimit: p.errorReportingLimit} + p.errors.syntaxError(l, tme.Error()) + panic(tme) + } } func (p *parser) ReportAmbiguity(recognizer antlr.Parser, dfa *antlr.DFA, startIndex, stopIndex int, exact bool, ambigAlts *antlr.BitSet, configs antlr.ATNConfigSet) { diff --git a/cluster-autoscaler/vendor/github.com/google/cel-go/parser/unparser.go b/cluster-autoscaler/vendor/github.com/google/cel-go/parser/unparser.go index 5ff979aa6009..c3c40a0dd36a 100644 --- a/cluster-autoscaler/vendor/github.com/google/cel-go/parser/unparser.go +++ b/cluster-autoscaler/vendor/github.com/google/cel-go/parser/unparser.go @@ -276,6 +276,9 @@ func (un *unparser) visitConst(expr *exprpb.Expr) error { // represent the float using the minimum required digits d := strconv.FormatFloat(c.GetDoubleValue(), 'g', -1, 64) un.str.WriteString(d) + if !strings.Contains(d, ".") { + un.str.WriteString(".0") + } case *exprpb.Constant_Int64Value: i := strconv.FormatInt(c.GetInt64Value(), 10) un.str.WriteString(i) diff --git a/cluster-autoscaler/vendor/github.com/google/gnostic/LICENSE b/cluster-autoscaler/vendor/github.com/google/gnostic-models/LICENSE similarity index 100% rename from cluster-autoscaler/vendor/github.com/google/gnostic/LICENSE rename to cluster-autoscaler/vendor/github.com/google/gnostic-models/LICENSE diff --git a/cluster-autoscaler/vendor/github.com/google/gnostic/compiler/README.md b/cluster-autoscaler/vendor/github.com/google/gnostic-models/compiler/README.md similarity index 100% rename from cluster-autoscaler/vendor/github.com/google/gnostic/compiler/README.md rename to cluster-autoscaler/vendor/github.com/google/gnostic-models/compiler/README.md diff --git a/cluster-autoscaler/vendor/github.com/google/gnostic/compiler/context.go b/cluster-autoscaler/vendor/github.com/google/gnostic-models/compiler/context.go similarity index 100% rename from cluster-autoscaler/vendor/github.com/google/gnostic/compiler/context.go rename to cluster-autoscaler/vendor/github.com/google/gnostic-models/compiler/context.go diff --git a/cluster-autoscaler/vendor/github.com/google/gnostic/compiler/error.go b/cluster-autoscaler/vendor/github.com/google/gnostic-models/compiler/error.go similarity index 100% rename from cluster-autoscaler/vendor/github.com/google/gnostic/compiler/error.go rename to cluster-autoscaler/vendor/github.com/google/gnostic-models/compiler/error.go diff --git a/cluster-autoscaler/vendor/github.com/google/gnostic/compiler/extensions.go b/cluster-autoscaler/vendor/github.com/google/gnostic-models/compiler/extensions.go similarity index 97% rename from cluster-autoscaler/vendor/github.com/google/gnostic/compiler/extensions.go rename to cluster-autoscaler/vendor/github.com/google/gnostic-models/compiler/extensions.go index 5b5a916d2ece..250c81e8c854 100644 --- a/cluster-autoscaler/vendor/github.com/google/gnostic/compiler/extensions.go +++ b/cluster-autoscaler/vendor/github.com/google/gnostic-models/compiler/extensions.go @@ -24,7 +24,7 @@ import ( "github.com/golang/protobuf/ptypes/any" yaml "gopkg.in/yaml.v3" - extensions "github.com/google/gnostic/extensions" + extensions "github.com/google/gnostic-models/extensions" ) // ExtensionHandler describes a binary that is called by the compiler to handle specification extensions. diff --git a/cluster-autoscaler/vendor/github.com/google/gnostic/compiler/helpers.go b/cluster-autoscaler/vendor/github.com/google/gnostic-models/compiler/helpers.go similarity index 99% rename from cluster-autoscaler/vendor/github.com/google/gnostic/compiler/helpers.go rename to cluster-autoscaler/vendor/github.com/google/gnostic-models/compiler/helpers.go index 97ffaa5131ae..975d65e8f8de 100644 --- a/cluster-autoscaler/vendor/github.com/google/gnostic/compiler/helpers.go +++ b/cluster-autoscaler/vendor/github.com/google/gnostic-models/compiler/helpers.go @@ -22,7 +22,7 @@ import ( "gopkg.in/yaml.v3" - "github.com/google/gnostic/jsonschema" + "github.com/google/gnostic-models/jsonschema" ) // compiler helper functions, usually called from generated code diff --git a/cluster-autoscaler/vendor/github.com/google/gnostic/compiler/main.go b/cluster-autoscaler/vendor/github.com/google/gnostic-models/compiler/main.go similarity index 100% rename from cluster-autoscaler/vendor/github.com/google/gnostic/compiler/main.go rename to cluster-autoscaler/vendor/github.com/google/gnostic-models/compiler/main.go diff --git a/cluster-autoscaler/vendor/github.com/google/gnostic/compiler/reader.go b/cluster-autoscaler/vendor/github.com/google/gnostic-models/compiler/reader.go similarity index 100% rename from cluster-autoscaler/vendor/github.com/google/gnostic/compiler/reader.go rename to cluster-autoscaler/vendor/github.com/google/gnostic-models/compiler/reader.go diff --git a/cluster-autoscaler/vendor/github.com/google/gnostic/extensions/README.md b/cluster-autoscaler/vendor/github.com/google/gnostic-models/extensions/README.md similarity index 100% rename from cluster-autoscaler/vendor/github.com/google/gnostic/extensions/README.md rename to cluster-autoscaler/vendor/github.com/google/gnostic-models/extensions/README.md diff --git a/cluster-autoscaler/vendor/github.com/google/gnostic/extensions/extension.pb.go b/cluster-autoscaler/vendor/github.com/google/gnostic-models/extensions/extension.pb.go similarity index 99% rename from cluster-autoscaler/vendor/github.com/google/gnostic/extensions/extension.pb.go rename to cluster-autoscaler/vendor/github.com/google/gnostic-models/extensions/extension.pb.go index a6a4ccca6cf6..a71df8abecc6 100644 --- a/cluster-autoscaler/vendor/github.com/google/gnostic/extensions/extension.pb.go +++ b/cluster-autoscaler/vendor/github.com/google/gnostic-models/extensions/extension.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 -// protoc v3.18.1 +// protoc-gen-go v1.27.1 +// protoc v3.19.3 // source: extensions/extension.proto package gnostic_extension_v1 diff --git a/cluster-autoscaler/vendor/github.com/google/gnostic/extensions/extension.proto b/cluster-autoscaler/vendor/github.com/google/gnostic-models/extensions/extension.proto similarity index 100% rename from cluster-autoscaler/vendor/github.com/google/gnostic/extensions/extension.proto rename to cluster-autoscaler/vendor/github.com/google/gnostic-models/extensions/extension.proto diff --git a/cluster-autoscaler/vendor/github.com/google/gnostic/extensions/extensions.go b/cluster-autoscaler/vendor/github.com/google/gnostic-models/extensions/extensions.go similarity index 100% rename from cluster-autoscaler/vendor/github.com/google/gnostic/extensions/extensions.go rename to cluster-autoscaler/vendor/github.com/google/gnostic-models/extensions/extensions.go diff --git a/cluster-autoscaler/vendor/github.com/google/gnostic/jsonschema/README.md b/cluster-autoscaler/vendor/github.com/google/gnostic-models/jsonschema/README.md similarity index 100% rename from cluster-autoscaler/vendor/github.com/google/gnostic/jsonschema/README.md rename to cluster-autoscaler/vendor/github.com/google/gnostic-models/jsonschema/README.md diff --git a/cluster-autoscaler/vendor/github.com/google/gnostic/jsonschema/base.go b/cluster-autoscaler/vendor/github.com/google/gnostic-models/jsonschema/base.go similarity index 90% rename from cluster-autoscaler/vendor/github.com/google/gnostic/jsonschema/base.go rename to cluster-autoscaler/vendor/github.com/google/gnostic-models/jsonschema/base.go index 0af8b148b9c0..5fcc4885a03c 100644 --- a/cluster-autoscaler/vendor/github.com/google/gnostic/jsonschema/base.go +++ b/cluster-autoscaler/vendor/github.com/google/gnostic-models/jsonschema/base.go @@ -1,3 +1,16 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. // THIS FILE IS AUTOMATICALLY GENERATED. @@ -81,4 +94,4 @@ YXkiIH0sCiAgICAgICAgImFueU9mIjogeyAiJHJlZiI6ICIjL2RlZmluaXRpb25zL3NjaGVtYUFycmF5 IiB9LAogICAgICAgICJvbmVPZiI6IHsgIiRyZWYiOiAiIy9kZWZpbml0aW9ucy9zY2hlbWFBcnJheSIg fSwKICAgICAgICAibm90IjogeyAiJHJlZiI6ICIjIiB9CiAgICB9LAogICAgImRlcGVuZGVuY2llcyI6 IHsKICAgICAgICAiZXhjbHVzaXZlTWF4aW11bSI6IFsgIm1heGltdW0iIF0sCiAgICAgICAgImV4Y2x1 -c2l2ZU1pbmltdW0iOiBbICJtaW5pbXVtIiBdCiAgICB9LAogICAgImRlZmF1bHQiOiB7fQp9Cg==`)} \ No newline at end of file +c2l2ZU1pbmltdW0iOiBbICJtaW5pbXVtIiBdCiAgICB9LAogICAgImRlZmF1bHQiOiB7fQp9Cg==`)} diff --git a/cluster-autoscaler/vendor/github.com/google/gnostic/jsonschema/display.go b/cluster-autoscaler/vendor/github.com/google/gnostic-models/jsonschema/display.go similarity index 92% rename from cluster-autoscaler/vendor/github.com/google/gnostic/jsonschema/display.go rename to cluster-autoscaler/vendor/github.com/google/gnostic-models/jsonschema/display.go index 8677ed49a0ea..028a760a91b5 100644 --- a/cluster-autoscaler/vendor/github.com/google/gnostic/jsonschema/display.go +++ b/cluster-autoscaler/vendor/github.com/google/gnostic-models/jsonschema/display.go @@ -46,23 +46,8 @@ func (schema *Schema) describeSchema(indent string) string { if schema.Schema != nil { result += indent + "$schema: " + *(schema.Schema) + "\n" } - if schema.ReadOnly != nil && *schema.ReadOnly { - result += indent + fmt.Sprintf("readOnly: %+v\n", *(schema.ReadOnly)) - } - if schema.WriteOnly != nil && *schema.WriteOnly { - result += indent + fmt.Sprintf("writeOnly: %+v\n", *(schema.WriteOnly)) - } if schema.ID != nil { - switch strings.TrimSuffix(*schema.Schema, "#") { - case "http://json-schema.org/draft-04/schema#": - fallthrough - case "#": - fallthrough - case "": - result += indent + "id: " + *(schema.ID) + "\n" - default: - result += indent + "$id: " + *(schema.ID) + "\n" - } + result += indent + "id: " + *(schema.ID) + "\n" } if schema.MultipleOf != nil { result += indent + fmt.Sprintf("multipleOf: %+v\n", *(schema.MultipleOf)) diff --git a/cluster-autoscaler/vendor/github.com/google/gnostic/jsonschema/models.go b/cluster-autoscaler/vendor/github.com/google/gnostic-models/jsonschema/models.go similarity index 97% rename from cluster-autoscaler/vendor/github.com/google/gnostic/jsonschema/models.go rename to cluster-autoscaler/vendor/github.com/google/gnostic-models/jsonschema/models.go index 0d877249abb1..4781bdc5f500 100644 --- a/cluster-autoscaler/vendor/github.com/google/gnostic/jsonschema/models.go +++ b/cluster-autoscaler/vendor/github.com/google/gnostic-models/jsonschema/models.go @@ -23,11 +23,9 @@ import "gopkg.in/yaml.v3" // All fields are pointers and are nil if the associated values // are not specified. type Schema struct { - Schema *string // $schema - ID *string // id keyword used for $ref resolution scope - Ref *string // $ref, i.e. JSON Pointers - ReadOnly *bool - WriteOnly *bool + Schema *string // $schema + ID *string // id keyword used for $ref resolution scope + Ref *string // $ref, i.e. JSON Pointers // http://json-schema.org/latest/json-schema-validation.html // 5.1. Validation keywords for numeric instances (number and integer) diff --git a/cluster-autoscaler/vendor/github.com/google/gnostic/jsonschema/operations.go b/cluster-autoscaler/vendor/github.com/google/gnostic-models/jsonschema/operations.go similarity index 100% rename from cluster-autoscaler/vendor/github.com/google/gnostic/jsonschema/operations.go rename to cluster-autoscaler/vendor/github.com/google/gnostic-models/jsonschema/operations.go diff --git a/cluster-autoscaler/vendor/github.com/google/gnostic/jsonschema/reader.go b/cluster-autoscaler/vendor/github.com/google/gnostic-models/jsonschema/reader.go similarity index 99% rename from cluster-autoscaler/vendor/github.com/google/gnostic/jsonschema/reader.go rename to cluster-autoscaler/vendor/github.com/google/gnostic-models/jsonschema/reader.go index a909a34128b7..b8583d466023 100644 --- a/cluster-autoscaler/vendor/github.com/google/gnostic/jsonschema/reader.go +++ b/cluster-autoscaler/vendor/github.com/google/gnostic-models/jsonschema/reader.go @@ -165,6 +165,7 @@ func NewSchemaFromObject(jsonData *yaml.Node) *Schema { default: fmt.Printf("schemaValue: unexpected node %+v\n", jsonData) + return nil } return nil diff --git a/cluster-autoscaler/vendor/github.com/google/gnostic/jsonschema/schema.json b/cluster-autoscaler/vendor/github.com/google/gnostic-models/jsonschema/schema.json similarity index 100% rename from cluster-autoscaler/vendor/github.com/google/gnostic/jsonschema/schema.json rename to cluster-autoscaler/vendor/github.com/google/gnostic-models/jsonschema/schema.json diff --git a/cluster-autoscaler/vendor/github.com/google/gnostic/jsonschema/writer.go b/cluster-autoscaler/vendor/github.com/google/gnostic-models/jsonschema/writer.go similarity index 92% rename from cluster-autoscaler/vendor/github.com/google/gnostic/jsonschema/writer.go rename to cluster-autoscaler/vendor/github.com/google/gnostic-models/jsonschema/writer.go index 15b1f905067a..340dc5f93306 100644 --- a/cluster-autoscaler/vendor/github.com/google/gnostic/jsonschema/writer.go +++ b/cluster-autoscaler/vendor/github.com/google/gnostic-models/jsonschema/writer.go @@ -16,7 +16,6 @@ package jsonschema import ( "fmt" - "strings" "gopkg.in/yaml.v3" ) @@ -34,11 +33,7 @@ func renderMappingNode(node *yaml.Node, indent string) (result string) { value := node.Content[i+1] switch value.Kind { case yaml.ScalarNode: - if value.Tag == "!!bool" { - result += value.Value - } else { - result += "\"" + value.Value + "\"" - } + result += "\"" + value.Value + "\"" case yaml.MappingNode: result += renderMappingNode(value, innerIndent) case yaml.SequenceNode: @@ -63,11 +58,7 @@ func renderSequenceNode(node *yaml.Node, indent string) (result string) { item := node.Content[i] switch item.Kind { case yaml.ScalarNode: - if item.Tag == "!!bool" { - result += innerIndent + item.Value - } else { - result += innerIndent + "\"" + item.Value + "\"" - } + result += innerIndent + "\"" + item.Value + "\"" case yaml.MappingNode: result += innerIndent + renderMappingNode(item, innerIndent) + "" default: @@ -269,26 +260,11 @@ func (schema *Schema) nodeValue() *yaml.Node { content = appendPair(content, "title", nodeForString(*schema.Title)) } if schema.ID != nil { - switch strings.TrimSuffix(*schema.Schema, "#") { - case "http://json-schema.org/draft-04/schema": - fallthrough - case "#": - fallthrough - case "": - content = appendPair(content, "id", nodeForString(*schema.ID)) - default: - content = appendPair(content, "$id", nodeForString(*schema.ID)) - } + content = appendPair(content, "id", nodeForString(*schema.ID)) } if schema.Schema != nil { content = appendPair(content, "$schema", nodeForString(*schema.Schema)) } - if schema.ReadOnly != nil && *schema.ReadOnly { - content = appendPair(content, "readOnly", nodeForBoolean(*schema.ReadOnly)) - } - if schema.WriteOnly != nil && *schema.WriteOnly { - content = appendPair(content, "writeOnly", nodeForBoolean(*schema.WriteOnly)) - } if schema.Type != nil { content = appendPair(content, "type", schema.Type.nodeValue()) } diff --git a/cluster-autoscaler/vendor/github.com/google/gnostic/openapiv2/OpenAPIv2.go b/cluster-autoscaler/vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.go similarity index 99% rename from cluster-autoscaler/vendor/github.com/google/gnostic/openapiv2/OpenAPIv2.go rename to cluster-autoscaler/vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.go index 28c2777d5115..d71fe6d5451e 100644 --- a/cluster-autoscaler/vendor/github.com/google/gnostic/openapiv2/OpenAPIv2.go +++ b/cluster-autoscaler/vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.go @@ -23,7 +23,7 @@ import ( "gopkg.in/yaml.v3" - "github.com/google/gnostic/compiler" + "github.com/google/gnostic-models/compiler" ) // Version returns the package name (and OpenAPI version). @@ -7887,12 +7887,7 @@ func (m *Oauth2Scopes) ToRawInfo() *yaml.Node { if m == nil { return info } - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Value)) - } - } + // &{Name:additionalProperties Type:NamedString StringEnumValues:[] MapType:string Repeated:true Pattern: Implicit:true Description:} return info } diff --git a/cluster-autoscaler/vendor/github.com/google/gnostic/openapiv2/OpenAPIv2.pb.go b/cluster-autoscaler/vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.pb.go similarity index 99% rename from cluster-autoscaler/vendor/github.com/google/gnostic/openapiv2/OpenAPIv2.pb.go rename to cluster-autoscaler/vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.pb.go index 06b60157c144..65c4c913ce70 100644 --- a/cluster-autoscaler/vendor/github.com/google/gnostic/openapiv2/OpenAPIv2.pb.go +++ b/cluster-autoscaler/vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.pb.go @@ -16,8 +16,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 -// protoc v3.18.1 +// protoc-gen-go v1.27.1 +// protoc v3.19.3 // source: openapiv2/OpenAPIv2.proto package openapi_v2 diff --git a/cluster-autoscaler/vendor/github.com/google/gnostic/openapiv2/OpenAPIv2.proto b/cluster-autoscaler/vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.proto similarity index 100% rename from cluster-autoscaler/vendor/github.com/google/gnostic/openapiv2/OpenAPIv2.proto rename to cluster-autoscaler/vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.proto diff --git a/cluster-autoscaler/vendor/github.com/google/gnostic/openapiv2/README.md b/cluster-autoscaler/vendor/github.com/google/gnostic-models/openapiv2/README.md similarity index 100% rename from cluster-autoscaler/vendor/github.com/google/gnostic/openapiv2/README.md rename to cluster-autoscaler/vendor/github.com/google/gnostic-models/openapiv2/README.md diff --git a/cluster-autoscaler/vendor/github.com/google/gnostic/openapiv2/document.go b/cluster-autoscaler/vendor/github.com/google/gnostic-models/openapiv2/document.go similarity index 96% rename from cluster-autoscaler/vendor/github.com/google/gnostic/openapiv2/document.go rename to cluster-autoscaler/vendor/github.com/google/gnostic-models/openapiv2/document.go index 0021ae871a6f..e96ac0d6dac2 100644 --- a/cluster-autoscaler/vendor/github.com/google/gnostic/openapiv2/document.go +++ b/cluster-autoscaler/vendor/github.com/google/gnostic-models/openapiv2/document.go @@ -17,7 +17,7 @@ package openapi_v2 import ( "gopkg.in/yaml.v3" - "github.com/google/gnostic/compiler" + "github.com/google/gnostic-models/compiler" ) // ParseDocument reads an OpenAPI v2 description from a YAML/JSON representation. diff --git a/cluster-autoscaler/vendor/github.com/google/gnostic/openapiv2/openapi-2.0.json b/cluster-autoscaler/vendor/github.com/google/gnostic-models/openapiv2/openapi-2.0.json similarity index 100% rename from cluster-autoscaler/vendor/github.com/google/gnostic/openapiv2/openapi-2.0.json rename to cluster-autoscaler/vendor/github.com/google/gnostic-models/openapiv2/openapi-2.0.json diff --git a/cluster-autoscaler/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.go b/cluster-autoscaler/vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.go similarity index 99% rename from cluster-autoscaler/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.go rename to cluster-autoscaler/vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.go index d54a84db7c0e..4b1131ce1c2e 100644 --- a/cluster-autoscaler/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.go +++ b/cluster-autoscaler/vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.go @@ -23,7 +23,7 @@ import ( "gopkg.in/yaml.v3" - "github.com/google/gnostic/compiler" + "github.com/google/gnostic-models/compiler" ) // Version returns the package name (and OpenAPI version). @@ -8560,12 +8560,7 @@ func (m *Strings) ToRawInfo() *yaml.Node { if m == nil { return info } - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Value)) - } - } + // &{Name:additionalProperties Type:NamedString StringEnumValues:[] MapType:string Repeated:true Pattern: Implicit:true Description:} return info } diff --git a/cluster-autoscaler/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.pb.go b/cluster-autoscaler/vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.pb.go similarity index 99% rename from cluster-autoscaler/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.pb.go rename to cluster-autoscaler/vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.pb.go index 90a56f5526b2..945b8d11ff59 100644 --- a/cluster-autoscaler/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.pb.go +++ b/cluster-autoscaler/vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.pb.go @@ -16,8 +16,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.27.1 +// protoc v3.19.3 // source: openapiv3/OpenAPIv3.proto package openapi_v3 @@ -6760,13 +6760,12 @@ var file_openapiv3_OpenAPIv3_proto_rawDesc = []byte{ 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x16, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x56, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x3e, 0x0a, 0x0e, 0x6f, 0x72, 0x67, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x33, 0x42, 0x0c, 0x4f, 0x70, 0x65, 0x6e, 0x41, 0x50, 0x49, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, - 0x5a, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2f, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x2f, 0x6f, 0x70, 0x65, 0x6e, - 0x61, 0x70, 0x69, 0x76, 0x33, 0x3b, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x33, - 0xa2, 0x02, 0x03, 0x4f, 0x41, 0x53, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x5a, 0x16, 0x2e, 0x2f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x33, 0x3b, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x33, 0xa2, 0x02, 0x03, 0x4f, 0x41, 0x53, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/cluster-autoscaler/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.proto b/cluster-autoscaler/vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.proto similarity index 99% rename from cluster-autoscaler/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.proto rename to cluster-autoscaler/vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.proto index 7aede5ed9091..1be335b89ba0 100644 --- a/cluster-autoscaler/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.proto +++ b/cluster-autoscaler/vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.proto @@ -42,7 +42,7 @@ option java_package = "org.openapi_v3"; option objc_class_prefix = "OAS"; // The Go package name. -option go_package = "github.com/google/gnostic/openapiv3;openapi_v3"; +option go_package = "./openapiv3;openapi_v3"; message AdditionalPropertiesItem { oneof oneof { diff --git a/cluster-autoscaler/vendor/github.com/google/gnostic/openapiv3/README.md b/cluster-autoscaler/vendor/github.com/google/gnostic-models/openapiv3/README.md similarity index 89% rename from cluster-autoscaler/vendor/github.com/google/gnostic/openapiv3/README.md rename to cluster-autoscaler/vendor/github.com/google/gnostic-models/openapiv3/README.md index 83603b82aab7..5ee12d92e24e 100644 --- a/cluster-autoscaler/vendor/github.com/google/gnostic/openapiv3/README.md +++ b/cluster-autoscaler/vendor/github.com/google/gnostic-models/openapiv3/README.md @@ -19,7 +19,3 @@ for OpenAPI. The schema-generator directory contains support code which generates openapi-3.1.json from the OpenAPI 3.1 specification document (Markdown). - -### How to rebuild - -`protoc -I=. -I=third_party --go_out=. --go_opt=paths=source_relative openapiv3/*.proto` \ No newline at end of file diff --git a/cluster-autoscaler/vendor/github.com/google/gnostic/openapiv3/document.go b/cluster-autoscaler/vendor/github.com/google/gnostic-models/openapiv3/document.go similarity index 96% rename from cluster-autoscaler/vendor/github.com/google/gnostic/openapiv3/document.go rename to cluster-autoscaler/vendor/github.com/google/gnostic-models/openapiv3/document.go index ef10d1d90964..1cee4677350e 100644 --- a/cluster-autoscaler/vendor/github.com/google/gnostic/openapiv3/document.go +++ b/cluster-autoscaler/vendor/github.com/google/gnostic-models/openapiv3/document.go @@ -17,7 +17,7 @@ package openapi_v3 import ( "gopkg.in/yaml.v3" - "github.com/google/gnostic/compiler" + "github.com/google/gnostic-models/compiler" ) // ParseDocument reads an OpenAPI v3 description from a YAML/JSON representation. diff --git a/cluster-autoscaler/vendor/github.com/google/gnostic/openapiv3/annotations.pb.go b/cluster-autoscaler/vendor/github.com/google/gnostic/openapiv3/annotations.pb.go deleted file mode 100644 index ae242f304315..000000000000 --- a/cluster-autoscaler/vendor/github.com/google/gnostic/openapiv3/annotations.pb.go +++ /dev/null @@ -1,183 +0,0 @@ -// Copyright 2022 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 -// source: openapiv3/annotations.proto - -package openapi_v3 - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - descriptorpb "google.golang.org/protobuf/types/descriptorpb" - reflect "reflect" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -var file_openapiv3_annotations_proto_extTypes = []protoimpl.ExtensionInfo{ - { - ExtendedType: (*descriptorpb.FileOptions)(nil), - ExtensionType: (*Document)(nil), - Field: 1143, - Name: "openapi.v3.document", - Tag: "bytes,1143,opt,name=document", - Filename: "openapiv3/annotations.proto", - }, - { - ExtendedType: (*descriptorpb.MethodOptions)(nil), - ExtensionType: (*Operation)(nil), - Field: 1143, - Name: "openapi.v3.operation", - Tag: "bytes,1143,opt,name=operation", - Filename: "openapiv3/annotations.proto", - }, - { - ExtendedType: (*descriptorpb.MessageOptions)(nil), - ExtensionType: (*Schema)(nil), - Field: 1143, - Name: "openapi.v3.schema", - Tag: "bytes,1143,opt,name=schema", - Filename: "openapiv3/annotations.proto", - }, - { - ExtendedType: (*descriptorpb.FieldOptions)(nil), - ExtensionType: (*Schema)(nil), - Field: 1143, - Name: "openapi.v3.property", - Tag: "bytes,1143,opt,name=property", - Filename: "openapiv3/annotations.proto", - }, -} - -// Extension fields to descriptorpb.FileOptions. -var ( - // optional openapi.v3.Document document = 1143; - E_Document = &file_openapiv3_annotations_proto_extTypes[0] -) - -// Extension fields to descriptorpb.MethodOptions. -var ( - // optional openapi.v3.Operation operation = 1143; - E_Operation = &file_openapiv3_annotations_proto_extTypes[1] -) - -// Extension fields to descriptorpb.MessageOptions. -var ( - // optional openapi.v3.Schema schema = 1143; - E_Schema = &file_openapiv3_annotations_proto_extTypes[2] -) - -// Extension fields to descriptorpb.FieldOptions. -var ( - // optional openapi.v3.Schema property = 1143; - E_Property = &file_openapiv3_annotations_proto_extTypes[3] -) - -var File_openapiv3_annotations_proto protoreflect.FileDescriptor - -var file_openapiv3_annotations_proto_rawDesc = []byte{ - 0x0a, 0x1b, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, - 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x6f, - 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x1a, 0x19, 0x6f, 0x70, 0x65, 0x6e, 0x61, - 0x70, 0x69, 0x76, 0x33, 0x2f, 0x4f, 0x70, 0x65, 0x6e, 0x41, 0x50, 0x49, 0x76, 0x33, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x3a, 0x4f, 0x0a, 0x08, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, - 0x6e, 0x74, 0x12, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x18, 0xf7, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, - 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x08, 0x64, - 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x3a, 0x54, 0x0a, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xf7, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, - 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x4c, 0x0a, - 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xf7, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x12, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x3a, 0x4e, 0x0a, 0x08, 0x70, - 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xf7, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, - 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x42, 0x5a, 0x0a, 0x0e, 0x6f, - 0x72, 0x67, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x33, 0x42, 0x10, 0x41, - 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, - 0x01, 0x5a, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x2f, 0x6f, 0x70, 0x65, - 0x6e, 0x61, 0x70, 0x69, 0x76, 0x33, 0x3b, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, - 0x33, 0xa2, 0x02, 0x03, 0x4f, 0x41, 0x53, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var file_openapiv3_annotations_proto_goTypes = []interface{}{ - (*descriptorpb.FileOptions)(nil), // 0: google.protobuf.FileOptions - (*descriptorpb.MethodOptions)(nil), // 1: google.protobuf.MethodOptions - (*descriptorpb.MessageOptions)(nil), // 2: google.protobuf.MessageOptions - (*descriptorpb.FieldOptions)(nil), // 3: google.protobuf.FieldOptions - (*Document)(nil), // 4: openapi.v3.Document - (*Operation)(nil), // 5: openapi.v3.Operation - (*Schema)(nil), // 6: openapi.v3.Schema -} -var file_openapiv3_annotations_proto_depIdxs = []int32{ - 0, // 0: openapi.v3.document:extendee -> google.protobuf.FileOptions - 1, // 1: openapi.v3.operation:extendee -> google.protobuf.MethodOptions - 2, // 2: openapi.v3.schema:extendee -> google.protobuf.MessageOptions - 3, // 3: openapi.v3.property:extendee -> google.protobuf.FieldOptions - 4, // 4: openapi.v3.document:type_name -> openapi.v3.Document - 5, // 5: openapi.v3.operation:type_name -> openapi.v3.Operation - 6, // 6: openapi.v3.schema:type_name -> openapi.v3.Schema - 6, // 7: openapi.v3.property:type_name -> openapi.v3.Schema - 8, // [8:8] is the sub-list for method output_type - 8, // [8:8] is the sub-list for method input_type - 4, // [4:8] is the sub-list for extension type_name - 0, // [0:4] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_openapiv3_annotations_proto_init() } -func file_openapiv3_annotations_proto_init() { - if File_openapiv3_annotations_proto != nil { - return - } - file_openapiv3_OpenAPIv3_proto_init() - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_openapiv3_annotations_proto_rawDesc, - NumEnums: 0, - NumMessages: 0, - NumExtensions: 4, - NumServices: 0, - }, - GoTypes: file_openapiv3_annotations_proto_goTypes, - DependencyIndexes: file_openapiv3_annotations_proto_depIdxs, - ExtensionInfos: file_openapiv3_annotations_proto_extTypes, - }.Build() - File_openapiv3_annotations_proto = out.File - file_openapiv3_annotations_proto_rawDesc = nil - file_openapiv3_annotations_proto_goTypes = nil - file_openapiv3_annotations_proto_depIdxs = nil -} diff --git a/cluster-autoscaler/vendor/github.com/google/gnostic/openapiv3/annotations.proto b/cluster-autoscaler/vendor/github.com/google/gnostic/openapiv3/annotations.proto deleted file mode 100644 index 0bd87810db60..000000000000 --- a/cluster-autoscaler/vendor/github.com/google/gnostic/openapiv3/annotations.proto +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2022 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package openapi.v3; - -import "openapiv3/OpenAPIv3.proto"; -import "google/protobuf/descriptor.proto"; - -// This option lets the proto compiler generate Java code inside the package -// name (see below) instead of inside an outer class. It creates a simpler -// developer experience by reducing one-level of name nesting and be -// consistent with most programming languages that don't support outer classes. -option java_multiple_files = true; - -// The Java outer classname should be the filename in UpperCamelCase. This -// class is only used to hold proto descriptor, so developers don't need to -// work with it directly. -option java_outer_classname = "AnnotationsProto"; - -// The Java package name must be proto package name with proper prefix. -option java_package = "org.openapi_v3"; - -// A reasonable prefix for the Objective-C symbols generated from the package. -// It should at a minimum be 3 characters long, all uppercase, and convention -// is to use an abbreviation of the package name. Something short, but -// hopefully unique enough to not conflict with things that may come along in -// the future. 'GPB' is reserved for the protocol buffer implementation itself. -option objc_class_prefix = "OAS"; - -// The Go package name. -option go_package = "github.com/google/gnostic/openapiv3;openapi_v3"; - -extend google.protobuf.FileOptions { - Document document = 1143; -} - -extend google.protobuf.MethodOptions { - Operation operation = 1143; -} - -extend google.protobuf.MessageOptions { - Schema schema = 1143; -} - -extend google.protobuf.FieldOptions { - Schema property = 1143; -} \ No newline at end of file diff --git a/cluster-autoscaler/vendor/github.com/google/gnostic/openapiv3/openapi-3.0.json b/cluster-autoscaler/vendor/github.com/google/gnostic/openapiv3/openapi-3.0.json deleted file mode 100644 index d5caed162d8f..000000000000 --- a/cluster-autoscaler/vendor/github.com/google/gnostic/openapiv3/openapi-3.0.json +++ /dev/null @@ -1,1251 +0,0 @@ -{ - "title": "A JSON Schema for OpenAPI 3.0.", - "id": "http://openapis.org/v3/schema.json#", - "$schema": "http://json-schema.org/draft-04/schema#", - "type": "object", - "description": "This is the root document object of the OpenAPI document.", - "required": [ - "openapi", - "info", - "paths" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "openapi": { - "type": "string" - }, - "info": { - "$ref": "#/definitions/info" - }, - "servers": { - "type": "array", - "items": { - "$ref": "#/definitions/server" - }, - "uniqueItems": true - }, - "paths": { - "$ref": "#/definitions/paths" - }, - "components": { - "$ref": "#/definitions/components" - }, - "security": { - "type": "array", - "items": { - "$ref": "#/definitions/securityRequirement" - }, - "uniqueItems": true - }, - "tags": { - "type": "array", - "items": { - "$ref": "#/definitions/tag" - }, - "uniqueItems": true - }, - "externalDocs": { - "$ref": "#/definitions/externalDocs" - } - }, - "definitions": { - "info": { - "type": "object", - "description": "The object provides metadata about the API. The metadata MAY be used by the clients if needed, and MAY be presented in editing or documentation generation tools for convenience.", - "required": [ - "title", - "version" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "title": { - "type": "string" - }, - "description": { - "type": "string" - }, - "termsOfService": { - "type": "string" - }, - "contact": { - "$ref": "#/definitions/contact" - }, - "license": { - "$ref": "#/definitions/license" - }, - "version": { - "type": "string" - } - } - }, - "contact": { - "type": "object", - "description": "Contact information for the exposed API.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string", - "format": "uri" - }, - "email": { - "type": "string", - "format": "email" - } - } - }, - "license": { - "type": "object", - "description": "License information for the exposed API.", - "required": [ - "name" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - } - } - }, - "server": { - "type": "object", - "description": "An object representing a Server.", - "required": [ - "url" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "url": { - "type": "string" - }, - "description": { - "type": "string" - }, - "variables": { - "$ref": "#/definitions/serverVariables" - } - } - }, - "serverVariable": { - "type": "object", - "description": "An object representing a Server Variable for server URL template substitution.", - "required": [ - "default" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "enum": { - "type": "array", - "items": { - "type": "string" - }, - "uniqueItems": true - }, - "default": { - "type": "string" - }, - "description": { - "type": "string" - } - } - }, - "components": { - "type": "object", - "description": "Holds a set of reusable objects for different aspects of the OAS. All objects defined within the components object will have no effect on the API unless they are explicitly referenced from properties outside the components object.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "schemas": { - "$ref": "#/definitions/schemasOrReferences" - }, - "responses": { - "$ref": "#/definitions/responsesOrReferences" - }, - "parameters": { - "$ref": "#/definitions/parametersOrReferences" - }, - "examples": { - "$ref": "#/definitions/examplesOrReferences" - }, - "requestBodies": { - "$ref": "#/definitions/requestBodiesOrReferences" - }, - "headers": { - "$ref": "#/definitions/headersOrReferences" - }, - "securitySchemes": { - "$ref": "#/definitions/securitySchemesOrReferences" - }, - "links": { - "$ref": "#/definitions/linksOrReferences" - }, - "callbacks": { - "$ref": "#/definitions/callbacksOrReferences" - } - } - }, - "paths": { - "type": "object", - "description": "Holds the relative paths to the individual endpoints and their operations. The path is appended to the URL from the `Server Object` in order to construct the full URL. The Paths MAY be empty, due to ACL constraints.", - "additionalProperties": false, - "patternProperties": { - "^/": { - "$ref": "#/definitions/pathItem" - }, - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - } - }, - "pathItem": { - "type": "object", - "description": "Describes the operations available on a single path. A Path Item MAY be empty, due to ACL constraints. The path itself is still exposed to the documentation viewer but they will not know which operations and parameters are available.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "$ref": { - "type": "string" - }, - "summary": { - "type": "string" - }, - "description": { - "type": "string" - }, - "get": { - "$ref": "#/definitions/operation" - }, - "put": { - "$ref": "#/definitions/operation" - }, - "post": { - "$ref": "#/definitions/operation" - }, - "delete": { - "$ref": "#/definitions/operation" - }, - "options": { - "$ref": "#/definitions/operation" - }, - "head": { - "$ref": "#/definitions/operation" - }, - "patch": { - "$ref": "#/definitions/operation" - }, - "trace": { - "$ref": "#/definitions/operation" - }, - "servers": { - "type": "array", - "items": { - "$ref": "#/definitions/server" - }, - "uniqueItems": true - }, - "parameters": { - "type": "array", - "items": { - "$ref": "#/definitions/parameterOrReference" - }, - "uniqueItems": true - } - } - }, - "operation": { - "type": "object", - "description": "Describes a single API operation on a path.", - "required": [ - "responses" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "tags": { - "type": "array", - "items": { - "type": "string" - }, - "uniqueItems": true - }, - "summary": { - "type": "string" - }, - "description": { - "type": "string" - }, - "externalDocs": { - "$ref": "#/definitions/externalDocs" - }, - "operationId": { - "type": "string" - }, - "parameters": { - "type": "array", - "items": { - "$ref": "#/definitions/parameterOrReference" - }, - "uniqueItems": true - }, - "requestBody": { - "$ref": "#/definitions/requestBodyOrReference" - }, - "responses": { - "$ref": "#/definitions/responses" - }, - "callbacks": { - "$ref": "#/definitions/callbacksOrReferences" - }, - "deprecated": { - "type": "boolean" - }, - "security": { - "type": "array", - "items": { - "$ref": "#/definitions/securityRequirement" - }, - "uniqueItems": true - }, - "servers": { - "type": "array", - "items": { - "$ref": "#/definitions/server" - }, - "uniqueItems": true - } - } - }, - "externalDocs": { - "type": "object", - "description": "Allows referencing an external resource for extended documentation.", - "required": [ - "url" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "description": { - "type": "string" - }, - "url": { - "type": "string" - } - } - }, - "parameter": { - "type": "object", - "description": "Describes a single operation parameter. A unique parameter is defined by a combination of a name and location.", - "required": [ - "name", - "in" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "name": { - "type": "string" - }, - "in": { - "type": "string" - }, - "description": { - "type": "string" - }, - "required": { - "type": "boolean" - }, - "deprecated": { - "type": "boolean" - }, - "allowEmptyValue": { - "type": "boolean" - }, - "style": { - "type": "string" - }, - "explode": { - "type": "boolean" - }, - "allowReserved": { - "type": "boolean" - }, - "schema": { - "$ref": "#/definitions/schemaOrReference" - }, - "example": { - "$ref": "#/definitions/any" - }, - "examples": { - "$ref": "#/definitions/examplesOrReferences" - }, - "content": { - "$ref": "#/definitions/mediaTypes" - } - } - }, - "requestBody": { - "type": "object", - "description": "Describes a single request body.", - "required": [ - "content" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "description": { - "type": "string" - }, - "content": { - "$ref": "#/definitions/mediaTypes" - }, - "required": { - "type": "boolean" - } - } - }, - "mediaType": { - "type": "object", - "description": "Each Media Type Object provides schema and examples for the media type identified by its key.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "schema": { - "$ref": "#/definitions/schemaOrReference" - }, - "example": { - "$ref": "#/definitions/any" - }, - "examples": { - "$ref": "#/definitions/examplesOrReferences" - }, - "encoding": { - "$ref": "#/definitions/encodings" - } - } - }, - "encoding": { - "type": "object", - "description": "A single encoding definition applied to a single schema property.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "contentType": { - "type": "string" - }, - "headers": { - "$ref": "#/definitions/headersOrReferences" - }, - "style": { - "type": "string" - }, - "explode": { - "type": "boolean" - }, - "allowReserved": { - "type": "boolean" - } - } - }, - "responses": { - "type": "object", - "description": "A container for the expected responses of an operation. The container maps a HTTP response code to the expected response. The documentation is not necessarily expected to cover all possible HTTP response codes because they may not be known in advance. However, documentation is expected to cover a successful operation response and any known errors. The `default` MAY be used as a default response object for all HTTP codes that are not covered individually by the specification. The `Responses Object` MUST contain at least one response code, and it SHOULD be the response for a successful operation call.", - "additionalProperties": false, - "patternProperties": { - "^([0-9X]{3})$": { - "$ref": "#/definitions/responseOrReference" - }, - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "default": { - "$ref": "#/definitions/responseOrReference" - } - } - }, - "response": { - "type": "object", - "description": "Describes a single response from an API Operation, including design-time, static `links` to operations based on the response.", - "required": [ - "description" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "description": { - "type": "string" - }, - "headers": { - "$ref": "#/definitions/headersOrReferences" - }, - "content": { - "$ref": "#/definitions/mediaTypes" - }, - "links": { - "$ref": "#/definitions/linksOrReferences" - } - } - }, - "callback": { - "type": "object", - "description": "A map of possible out-of band callbacks related to the parent operation. Each value in the map is a Path Item Object that describes a set of requests that may be initiated by the API provider and the expected responses. The key value used to identify the callback object is an expression, evaluated at runtime, that identifies a URL to use for the callback operation.", - "additionalProperties": false, - "patternProperties": { - "^": { - "$ref": "#/definitions/pathItem" - }, - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - } - }, - "example": { - "type": "object", - "description": "", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "summary": { - "type": "string" - }, - "description": { - "type": "string" - }, - "value": { - "$ref": "#/definitions/any" - }, - "externalValue": { - "type": "string" - } - } - }, - "link": { - "type": "object", - "description": "The `Link object` represents a possible design-time link for a response. The presence of a link does not guarantee the caller's ability to successfully invoke it, rather it provides a known relationship and traversal mechanism between responses and other operations. Unlike _dynamic_ links (i.e. links provided **in** the response payload), the OAS linking mechanism does not require link information in the runtime response. For computing links, and providing instructions to execute them, a runtime expression is used for accessing values in an operation and using them as parameters while invoking the linked operation.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "operationRef": { - "type": "string" - }, - "operationId": { - "type": "string" - }, - "parameters": { - "$ref": "#/definitions/anysOrExpressions" - }, - "requestBody": { - "$ref": "#/definitions/anyOrExpression" - }, - "description": { - "type": "string" - }, - "server": { - "$ref": "#/definitions/server" - } - } - }, - "header": { - "type": "object", - "description": "The Header Object follows the structure of the Parameter Object with the following changes: 1. `name` MUST NOT be specified, it is given in the corresponding `headers` map. 1. `in` MUST NOT be specified, it is implicitly in `header`. 1. All traits that are affected by the location MUST be applicable to a location of `header` (for example, `style`).", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "description": { - "type": "string" - }, - "required": { - "type": "boolean" - }, - "deprecated": { - "type": "boolean" - }, - "allowEmptyValue": { - "type": "boolean" - }, - "style": { - "type": "string" - }, - "explode": { - "type": "boolean" - }, - "allowReserved": { - "type": "boolean" - }, - "schema": { - "$ref": "#/definitions/schemaOrReference" - }, - "example": { - "$ref": "#/definitions/any" - }, - "examples": { - "$ref": "#/definitions/examplesOrReferences" - }, - "content": { - "$ref": "#/definitions/mediaTypes" - } - } - }, - "tag": { - "type": "object", - "description": "Adds metadata to a single tag that is used by the Operation Object. It is not mandatory to have a Tag Object per tag defined in the Operation Object instances.", - "required": [ - "name" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "name": { - "type": "string" - }, - "description": { - "type": "string" - }, - "externalDocs": { - "$ref": "#/definitions/externalDocs" - } - } - }, - "reference": { - "type": "object", - "description": "A simple object to allow referencing other components in the specification, internally and externally. The Reference Object is defined by JSON Reference and follows the same structure, behavior and rules. For this specification, reference resolution is accomplished as defined by the JSON Reference specification and not by the JSON Schema specification.", - "required": [ - "$ref" - ], - "additionalProperties": false, - "properties": { - "$ref": { - "type": "string" - }, - "summary": { - "type": "string" - }, - "description": { - "type": "string" - } - } - }, - "schema": { - "type": "object", - "description": "The Schema Object allows the definition of input and output data types. These types can be objects, but also primitives and arrays. This object is an extended subset of the JSON Schema Specification Wright Draft 00. For more information about the properties, see JSON Schema Core and JSON Schema Validation. Unless stated otherwise, the property definitions follow the JSON Schema.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "nullable": { - "type": "boolean" - }, - "discriminator": { - "$ref": "#/definitions/discriminator" - }, - "readOnly": { - "type": "boolean" - }, - "writeOnly": { - "type": "boolean" - }, - "xml": { - "$ref": "#/definitions/xml" - }, - "externalDocs": { - "$ref": "#/definitions/externalDocs" - }, - "example": { - "$ref": "#/definitions/any" - }, - "deprecated": { - "type": "boolean" - }, - "title": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/title" - }, - "multipleOf": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf" - }, - "maximum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum" - }, - "exclusiveMaximum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum" - }, - "minimum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum" - }, - "exclusiveMinimum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum" - }, - "maxLength": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/maxLength" - }, - "minLength": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/minLength" - }, - "pattern": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern" - }, - "maxItems": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/maxItems" - }, - "minItems": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/minItems" - }, - "uniqueItems": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems" - }, - "maxProperties": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/maxProperties" - }, - "minProperties": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/minProperties" - }, - "required": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/required" - }, - "enum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/enum" - }, - "type": { - "type": "string" - }, - "allOf": { - "type": "array", - "items": { - "$ref": "#/definitions/schemaOrReference" - }, - "minItems": 1 - }, - "oneOf": { - "type": "array", - "items": { - "$ref": "#/definitions/schemaOrReference" - }, - "minItems": 1 - }, - "anyOf": { - "type": "array", - "items": { - "$ref": "#/definitions/schemaOrReference" - }, - "minItems": 1 - }, - "not": { - "$ref": "#/definitions/schema" - }, - "items": { - "anyOf": [ - { - "$ref": "#/definitions/schemaOrReference" - }, - { - "type": "array", - "items": { - "$ref": "#/definitions/schemaOrReference" - }, - "minItems": 1 - } - ] - }, - "properties": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/schemaOrReference" - } - }, - "additionalProperties": { - "oneOf": [ - { - "$ref": "#/definitions/schemaOrReference" - }, - { - "type": "boolean" - } - ] - }, - "default": { - "$ref": "#/definitions/defaultType" - }, - "description": { - "type": "string" - }, - "format": { - "type": "string" - } - } - }, - "discriminator": { - "type": "object", - "description": "When request bodies or response payloads may be one of a number of different schemas, a `discriminator` object can be used to aid in serialization, deserialization, and validation. The discriminator is a specific object in a schema which is used to inform the consumer of the specification of an alternative schema based on the value associated with it. When using the discriminator, _inline_ schemas will not be considered.", - "required": [ - "propertyName" - ], - "additionalProperties": false, - "properties": { - "propertyName": { - "type": "string" - }, - "mapping": { - "$ref": "#/definitions/strings" - } - } - }, - "xml": { - "type": "object", - "description": "A metadata object that allows for more fine-tuned XML model definitions. When using arrays, XML element names are *not* inferred (for singular/plural forms) and the `name` property SHOULD be used to add that information. See examples for expected behavior.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "name": { - "type": "string" - }, - "namespace": { - "type": "string" - }, - "prefix": { - "type": "string" - }, - "attribute": { - "type": "boolean" - }, - "wrapped": { - "type": "boolean" - } - } - }, - "securityScheme": { - "type": "object", - "description": "Defines a security scheme that can be used by the operations. Supported schemes are HTTP authentication, an API key (either as a header or as a query parameter), OAuth2's common flows (implicit, password, application and access code) as defined in RFC6749, and OpenID Connect Discovery.", - "required": [ - "type" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "type": { - "type": "string" - }, - "description": { - "type": "string" - }, - "name": { - "type": "string" - }, - "in": { - "type": "string" - }, - "scheme": { - "type": "string" - }, - "bearerFormat": { - "type": "string" - }, - "flows": { - "$ref": "#/definitions/oauthFlows" - }, - "openIdConnectUrl": { - "type": "string" - } - } - }, - "oauthFlows": { - "type": "object", - "description": "Allows configuration of the supported OAuth Flows.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "implicit": { - "$ref": "#/definitions/oauthFlow" - }, - "password": { - "$ref": "#/definitions/oauthFlow" - }, - "clientCredentials": { - "$ref": "#/definitions/oauthFlow" - }, - "authorizationCode": { - "$ref": "#/definitions/oauthFlow" - } - } - }, - "oauthFlow": { - "type": "object", - "description": "Configuration details for a supported OAuth Flow", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "authorizationUrl": { - "type": "string" - }, - "tokenUrl": { - "type": "string" - }, - "refreshUrl": { - "type": "string" - }, - "scopes": { - "$ref": "#/definitions/strings" - } - } - }, - "securityRequirement": { - "type": "object", - "description": "Lists the required security schemes to execute this operation. The name used for each property MUST correspond to a security scheme declared in the Security Schemes under the Components Object. Security Requirement Objects that contain multiple schemes require that all schemes MUST be satisfied for a request to be authorized. This enables support for scenarios where multiple query parameters or HTTP headers are required to convey security information. When a list of Security Requirement Objects is defined on the Open API object or Operation Object, only one of Security Requirement Objects in the list needs to be satisfied to authorize the request.", - "additionalProperties": false, - "patternProperties": { - "^[a-zA-Z0-9\\.\\-_]+$": { - "type": "array", - "items": { - "type": "string" - }, - "uniqueItems": true - } - } - }, - "anyOrExpression": { - "oneOf": [ - { - "$ref": "#/definitions/any" - }, - { - "$ref": "#/definitions/expression" - } - ] - }, - "callbackOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/callback" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "exampleOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/example" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "headerOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/header" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "linkOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/link" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "parameterOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/parameter" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "requestBodyOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/requestBody" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "responseOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/response" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "schemaOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/schema" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "securitySchemeOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/securityScheme" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "anysOrExpressions": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/anyOrExpression" - } - }, - "callbacksOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/callbackOrReference" - } - }, - "encodings": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/encoding" - } - }, - "examplesOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/exampleOrReference" - } - }, - "headersOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/headerOrReference" - } - }, - "linksOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/linkOrReference" - } - }, - "mediaTypes": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/mediaType" - } - }, - "parametersOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/parameterOrReference" - } - }, - "requestBodiesOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/requestBodyOrReference" - } - }, - "responsesOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/responseOrReference" - } - }, - "schemasOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/schemaOrReference" - } - }, - "securitySchemesOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/securitySchemeOrReference" - } - }, - "serverVariables": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/serverVariable" - } - }, - "strings": { - "type": "object", - "additionalProperties": { - "type": "string" - } - }, - "object": { - "type": "object", - "additionalProperties": true - }, - "any": { - "additionalProperties": true - }, - "expression": { - "type": "object", - "additionalProperties": true - }, - "specificationExtension": { - "description": "Any property starting with x- is valid.", - "oneOf": [ - { - "type": "null" - }, - { - "type": "number" - }, - { - "type": "boolean" - }, - { - "type": "string" - }, - { - "type": "object" - }, - { - "type": "array" - } - ] - }, - "defaultType": { - "oneOf": [ - { - "type": "null" - }, - { - "type": "array" - }, - { - "type": "object" - }, - { - "type": "number" - }, - { - "type": "boolean" - }, - { - "type": "string" - } - ] - } - } -} diff --git a/cluster-autoscaler/vendor/github.com/google/gnostic/openapiv3/openapi-3.1.json b/cluster-autoscaler/vendor/github.com/google/gnostic/openapiv3/openapi-3.1.json deleted file mode 100644 index ed0b83adf4dc..000000000000 --- a/cluster-autoscaler/vendor/github.com/google/gnostic/openapiv3/openapi-3.1.json +++ /dev/null @@ -1,1250 +0,0 @@ -{ - "title": "A JSON Schema for OpenAPI 3.0.", - "id": "http://openapis.org/v3/schema.json#", - "$schema": "http://json-schema.org/draft-04/schema#", - "type": "object", - "description": "This is the root document object of the OpenAPI document.", - "required": [ - "openapi", - "info", - "paths" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "openapi": { - "type": "string" - }, - "info": { - "$ref": "#/definitions/info" - }, - "servers": { - "type": "array", - "items": { - "$ref": "#/definitions/server" - }, - "uniqueItems": true - }, - "paths": { - "$ref": "#/definitions/paths" - }, - "components": { - "$ref": "#/definitions/components" - }, - "security": { - "type": "array", - "items": { - "$ref": "#/definitions/securityRequirement" - }, - "uniqueItems": true - }, - "tags": { - "type": "array", - "items": { - "$ref": "#/definitions/tag" - }, - "uniqueItems": true - }, - "externalDocs": { - "$ref": "#/definitions/externalDocs" - } - }, - "definitions": { - "info": { - "type": "object", - "description": "The object provides metadata about the API. The metadata MAY be used by the clients if needed, and MAY be presented in editing or documentation generation tools for convenience.", - "required": [ - "title", - "version" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "title": { - "type": "string" - }, - "description": { - "type": "string" - }, - "termsOfService": { - "type": "string" - }, - "contact": { - "$ref": "#/definitions/contact" - }, - "license": { - "$ref": "#/definitions/license" - }, - "version": { - "type": "string" - }, - "summary": { - "type": "string" - } - } - }, - "contact": { - "type": "object", - "description": "Contact information for the exposed API.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string", - "format": "uri" - }, - "email": { - "type": "string", - "format": "email" - } - } - }, - "license": { - "type": "object", - "description": "License information for the exposed API.", - "required": [ - "name" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - } - } - }, - "server": { - "type": "object", - "description": "An object representing a Server.", - "required": [ - "url" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "url": { - "type": "string" - }, - "description": { - "type": "string" - }, - "variables": { - "$ref": "#/definitions/serverVariables" - } - } - }, - "serverVariable": { - "type": "object", - "description": "An object representing a Server Variable for server URL template substitution.", - "required": [ - "default" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "enum": { - "type": "array", - "items": { - "type": "string" - }, - "uniqueItems": true - }, - "default": { - "type": "string" - }, - "description": { - "type": "string" - } - } - }, - "components": { - "type": "object", - "description": "Holds a set of reusable objects for different aspects of the OAS. All objects defined within the components object will have no effect on the API unless they are explicitly referenced from properties outside the components object.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "schemas": { - "$ref": "#/definitions/schemasOrReferences" - }, - "responses": { - "$ref": "#/definitions/responsesOrReferences" - }, - "parameters": { - "$ref": "#/definitions/parametersOrReferences" - }, - "examples": { - "$ref": "#/definitions/examplesOrReferences" - }, - "requestBodies": { - "$ref": "#/definitions/requestBodiesOrReferences" - }, - "headers": { - "$ref": "#/definitions/headersOrReferences" - }, - "securitySchemes": { - "$ref": "#/definitions/securitySchemesOrReferences" - }, - "links": { - "$ref": "#/definitions/linksOrReferences" - }, - "callbacks": { - "$ref": "#/definitions/callbacksOrReferences" - } - } - }, - "paths": { - "type": "object", - "description": "Holds the relative paths to the individual endpoints and their operations. The path is appended to the URL from the `Server Object` in order to construct the full URL. The Paths MAY be empty, due to ACL constraints.", - "additionalProperties": false, - "patternProperties": { - "^/": { - "$ref": "#/definitions/pathItem" - }, - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - } - }, - "pathItem": { - "type": "object", - "description": "Describes the operations available on a single path. A Path Item MAY be empty, due to ACL constraints. The path itself is still exposed to the documentation viewer but they will not know which operations and parameters are available.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "$ref": { - "type": "string" - }, - "summary": { - "type": "string" - }, - "description": { - "type": "string" - }, - "get": { - "$ref": "#/definitions/operation" - }, - "put": { - "$ref": "#/definitions/operation" - }, - "post": { - "$ref": "#/definitions/operation" - }, - "delete": { - "$ref": "#/definitions/operation" - }, - "options": { - "$ref": "#/definitions/operation" - }, - "head": { - "$ref": "#/definitions/operation" - }, - "patch": { - "$ref": "#/definitions/operation" - }, - "trace": { - "$ref": "#/definitions/operation" - }, - "servers": { - "type": "array", - "items": { - "$ref": "#/definitions/server" - }, - "uniqueItems": true - }, - "parameters": { - "type": "array", - "items": { - "$ref": "#/definitions/parameterOrReference" - }, - "uniqueItems": true - } - } - }, - "operation": { - "type": "object", - "description": "Describes a single API operation on a path.", - "required": [ - "responses" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "tags": { - "type": "array", - "items": { - "type": "string" - }, - "uniqueItems": true - }, - "summary": { - "type": "string" - }, - "description": { - "type": "string" - }, - "externalDocs": { - "$ref": "#/definitions/externalDocs" - }, - "operationId": { - "type": "string" - }, - "parameters": { - "type": "array", - "items": { - "$ref": "#/definitions/parameterOrReference" - }, - "uniqueItems": true - }, - "requestBody": { - "$ref": "#/definitions/requestBodyOrReference" - }, - "responses": { - "$ref": "#/definitions/responses" - }, - "callbacks": { - "$ref": "#/definitions/callbacksOrReferences" - }, - "deprecated": { - "type": "boolean" - }, - "security": { - "type": "array", - "items": { - "$ref": "#/definitions/securityRequirement" - }, - "uniqueItems": true - }, - "servers": { - "type": "array", - "items": { - "$ref": "#/definitions/server" - }, - "uniqueItems": true - } - } - }, - "externalDocs": { - "type": "object", - "description": "Allows referencing an external resource for extended documentation.", - "required": [ - "url" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "description": { - "type": "string" - }, - "url": { - "type": "string" - } - } - }, - "parameter": { - "type": "object", - "description": "Describes a single operation parameter. A unique parameter is defined by a combination of a name and location.", - "required": [ - "name", - "in" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "name": { - "type": "string" - }, - "in": { - "type": "string" - }, - "description": { - "type": "string" - }, - "required": { - "type": "boolean" - }, - "deprecated": { - "type": "boolean" - }, - "allowEmptyValue": { - "type": "boolean" - }, - "style": { - "type": "string" - }, - "explode": { - "type": "boolean" - }, - "allowReserved": { - "type": "boolean" - }, - "schema": { - "$ref": "#/definitions/schemaOrReference" - }, - "example": { - "$ref": "#/definitions/any" - }, - "examples": { - "$ref": "#/definitions/examplesOrReferences" - }, - "content": { - "$ref": "#/definitions/mediaTypes" - } - } - }, - "requestBody": { - "type": "object", - "description": "Describes a single request body.", - "required": [ - "content" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "description": { - "type": "string" - }, - "content": { - "$ref": "#/definitions/mediaTypes" - }, - "required": { - "type": "boolean" - } - } - }, - "mediaType": { - "type": "object", - "description": "Each Media Type Object provides schema and examples for the media type identified by its key.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "schema": { - "$ref": "#/definitions/schemaOrReference" - }, - "example": { - "$ref": "#/definitions/any" - }, - "examples": { - "$ref": "#/definitions/examplesOrReferences" - }, - "encoding": { - "$ref": "#/definitions/encodings" - } - } - }, - "encoding": { - "type": "object", - "description": "A single encoding definition applied to a single schema property.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "contentType": { - "type": "string" - }, - "headers": { - "$ref": "#/definitions/headersOrReferences" - }, - "style": { - "type": "string" - }, - "explode": { - "type": "boolean" - }, - "allowReserved": { - "type": "boolean" - } - } - }, - "responses": { - "type": "object", - "description": "A container for the expected responses of an operation. The container maps a HTTP response code to the expected response. The documentation is not necessarily expected to cover all possible HTTP response codes because they may not be known in advance. However, documentation is expected to cover a successful operation response and any known errors. The `default` MAY be used as a default response object for all HTTP codes that are not covered individually by the specification. The `Responses Object` MUST contain at least one response code, and it SHOULD be the response for a successful operation call.", - "additionalProperties": false, - "patternProperties": { - "^([0-9X]{3})$": { - "$ref": "#/definitions/responseOrReference" - }, - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "default": { - "$ref": "#/definitions/responseOrReference" - } - } - }, - "response": { - "type": "object", - "description": "Describes a single response from an API Operation, including design-time, static `links` to operations based on the response.", - "required": [ - "description" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "description": { - "type": "string" - }, - "headers": { - "$ref": "#/definitions/headersOrReferences" - }, - "content": { - "$ref": "#/definitions/mediaTypes" - }, - "links": { - "$ref": "#/definitions/linksOrReferences" - } - } - }, - "callback": { - "type": "object", - "description": "A map of possible out-of band callbacks related to the parent operation. Each value in the map is a Path Item Object that describes a set of requests that may be initiated by the API provider and the expected responses. The key value used to identify the callback object is an expression, evaluated at runtime, that identifies a URL to use for the callback operation.", - "additionalProperties": false, - "patternProperties": { - "^": { - "$ref": "#/definitions/pathItem" - }, - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - } - }, - "example": { - "type": "object", - "description": "", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "summary": { - "type": "string" - }, - "description": { - "type": "string" - }, - "value": { - "$ref": "#/definitions/any" - }, - "externalValue": { - "type": "string" - } - } - }, - "link": { - "type": "object", - "description": "The `Link object` represents a possible design-time link for a response. The presence of a link does not guarantee the caller's ability to successfully invoke it, rather it provides a known relationship and traversal mechanism between responses and other operations. Unlike _dynamic_ links (i.e. links provided **in** the response payload), the OAS linking mechanism does not require link information in the runtime response. For computing links, and providing instructions to execute them, a runtime expression is used for accessing values in an operation and using them as parameters while invoking the linked operation.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "operationRef": { - "type": "string" - }, - "operationId": { - "type": "string" - }, - "parameters": { - "$ref": "#/definitions/anyOrExpression" - }, - "requestBody": { - "$ref": "#/definitions/anyOrExpression" - }, - "description": { - "type": "string" - }, - "server": { - "$ref": "#/definitions/server" - } - } - }, - "header": { - "type": "object", - "description": "The Header Object follows the structure of the Parameter Object with the following changes: 1. `name` MUST NOT be specified, it is given in the corresponding `headers` map. 1. `in` MUST NOT be specified, it is implicitly in `header`. 1. All traits that are affected by the location MUST be applicable to a location of `header` (for example, `style`).", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "description": { - "type": "string" - }, - "required": { - "type": "boolean" - }, - "deprecated": { - "type": "boolean" - }, - "allowEmptyValue": { - "type": "boolean" - }, - "style": { - "type": "string" - }, - "explode": { - "type": "boolean" - }, - "allowReserved": { - "type": "boolean" - }, - "schema": { - "$ref": "#/definitions/schemaOrReference" - }, - "example": { - "$ref": "#/definitions/any" - }, - "examples": { - "$ref": "#/definitions/examplesOrReferences" - }, - "content": { - "$ref": "#/definitions/mediaTypes" - } - } - }, - "tag": { - "type": "object", - "description": "Adds metadata to a single tag that is used by the Operation Object. It is not mandatory to have a Tag Object per tag defined in the Operation Object instances.", - "required": [ - "name" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "name": { - "type": "string" - }, - "description": { - "type": "string" - }, - "externalDocs": { - "$ref": "#/definitions/externalDocs" - } - } - }, - "reference": { - "type": "object", - "description": "A simple object to allow referencing other components in the specification, internally and externally. The Reference Object is defined by JSON Reference and follows the same structure, behavior and rules. For this specification, reference resolution is accomplished as defined by the JSON Reference specification and not by the JSON Schema specification.", - "required": [ - "$ref" - ], - "additionalProperties": false, - "properties": { - "$ref": { - "type": "string" - }, - "summary": { - "type": "string" - }, - "description": { - "type": "string" - } - } - }, - "schema": { - "type": "object", - "description": "The Schema Object allows the definition of input and output data types. These types can be objects, but also primitives and arrays. This object is an extended subset of the JSON Schema Specification Wright Draft 00. For more information about the properties, see JSON Schema Core and JSON Schema Validation. Unless stated otherwise, the property definitions follow the JSON Schema.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "nullable": { - "type": "boolean" - }, - "discriminator": { - "$ref": "#/definitions/discriminator" - }, - "readOnly": { - "type": "boolean" - }, - "writeOnly": { - "type": "boolean" - }, - "xml": { - "$ref": "#/definitions/xml" - }, - "externalDocs": { - "$ref": "#/definitions/externalDocs" - }, - "example": { - "$ref": "#/definitions/any" - }, - "deprecated": { - "type": "boolean" - }, - "title": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/title" - }, - "multipleOf": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf" - }, - "maximum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum" - }, - "exclusiveMaximum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum" - }, - "minimum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum" - }, - "exclusiveMinimum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum" - }, - "maxLength": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/maxLength" - }, - "minLength": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/minLength" - }, - "pattern": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern" - }, - "maxItems": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/maxItems" - }, - "minItems": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/minItems" - }, - "uniqueItems": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems" - }, - "maxProperties": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/maxProperties" - }, - "minProperties": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/minProperties" - }, - "required": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/required" - }, - "enum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/enum" - }, - "type": { - "type": "string" - }, - "allOf": { - "type": "array", - "items": { - "$ref": "#/definitions/schemaOrReference" - }, - "minItems": 1 - }, - "oneOf": { - "type": "array", - "items": { - "$ref": "#/definitions/schemaOrReference" - }, - "minItems": 1 - }, - "anyOf": { - "type": "array", - "items": { - "$ref": "#/definitions/schemaOrReference" - }, - "minItems": 1 - }, - "not": { - "$ref": "#/definitions/schema" - }, - "items": { - "anyOf": [ - { - "$ref": "#/definitions/schemaOrReference" - }, - { - "type": "array", - "items": { - "$ref": "#/definitions/schemaOrReference" - }, - "minItems": 1 - } - ] - }, - "properties": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/schemaOrReference" - } - }, - "additionalProperties": { - "oneOf": [ - { - "$ref": "#/definitions/schemaOrReference" - }, - { - "type": "boolean" - } - ] - }, - "default": { - "$ref": "#/definitions/defaultType" - }, - "description": { - "type": "string" - }, - "format": { - "type": "string" - } - } - }, - "discriminator": { - "type": "object", - "description": "When request bodies or response payloads may be one of a number of different schemas, a `discriminator` object can be used to aid in serialization, deserialization, and validation. The discriminator is a specific object in a schema which is used to inform the consumer of the specification of an alternative schema based on the value associated with it. When using the discriminator, _inline_ schemas will not be considered.", - "required": [ - "propertyName" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "propertyName": { - "type": "string" - }, - "mapping": { - "$ref": "#/definitions/strings" - } - } - }, - "xml": { - "type": "object", - "description": "A metadata object that allows for more fine-tuned XML model definitions. When using arrays, XML element names are *not* inferred (for singular/plural forms) and the `name` property SHOULD be used to add that information. See examples for expected behavior.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "name": { - "type": "string" - }, - "namespace": { - "type": "string" - }, - "prefix": { - "type": "string" - }, - "attribute": { - "type": "boolean" - }, - "wrapped": { - "type": "boolean" - } - } - }, - "securityScheme": { - "type": "object", - "description": "Defines a security scheme that can be used by the operations. Supported schemes are HTTP authentication, an API key (either as a header, a cookie parameter or as a query parameter), mutual TLS (use of a client certificate), OAuth2's common flows (implicit, password, application and access code) as defined in RFC6749, and OpenID Connect. Please note that currently (2019) the implicit flow is about to be deprecated OAuth 2.0 Security Best Current Practice. Recommended for most use case is Authorization Code Grant flow with PKCE.", - "required": [ - "type" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "type": { - "type": "string" - }, - "description": { - "type": "string" - }, - "name": { - "type": "string" - }, - "in": { - "type": "string" - }, - "scheme": { - "type": "string" - }, - "bearerFormat": { - "type": "string" - }, - "flows": { - "$ref": "#/definitions/oauthFlows" - }, - "openIdConnectUrl": { - "type": "string" - } - } - }, - "oauthFlows": { - "type": "object", - "description": "Allows configuration of the supported OAuth Flows.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "implicit": { - "$ref": "#/definitions/oauthFlow" - }, - "password": { - "$ref": "#/definitions/oauthFlow" - }, - "clientCredentials": { - "$ref": "#/definitions/oauthFlow" - }, - "authorizationCode": { - "$ref": "#/definitions/oauthFlow" - } - } - }, - "oauthFlow": { - "type": "object", - "description": "Configuration details for a supported OAuth Flow", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "authorizationUrl": { - "type": "string" - }, - "tokenUrl": { - "type": "string" - }, - "refreshUrl": { - "type": "string" - }, - "scopes": { - "$ref": "#/definitions/strings" - } - } - }, - "securityRequirement": { - "type": "object", - "description": "Lists the required security schemes to execute this operation. The name used for each property MUST correspond to a security scheme declared in the Security Schemes under the Components Object. Security Requirement Objects that contain multiple schemes require that all schemes MUST be satisfied for a request to be authorized. This enables support for scenarios where multiple query parameters or HTTP headers are required to convey security information. When a list of Security Requirement Objects is defined on the OpenAPI Object or Operation Object, only one of the Security Requirement Objects in the list needs to be satisfied to authorize the request.", - "additionalProperties": { - "type": "array", - "items": { - "type": "string" - }, - "uniqueItems": true - } - }, - "anyOrExpression": { - "oneOf": [ - { - "$ref": "#/definitions/any" - }, - { - "$ref": "#/definitions/expression" - } - ] - }, - "callbackOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/callback" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "exampleOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/example" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "headerOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/header" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "linkOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/link" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "parameterOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/parameter" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "requestBodyOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/requestBody" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "responseOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/response" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "schemaOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/schema" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "securitySchemeOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/securityScheme" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "callbacksOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/callbackOrReference" - } - }, - "encodings": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/encoding" - } - }, - "examplesOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/exampleOrReference" - } - }, - "headersOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/headerOrReference" - } - }, - "linksOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/linkOrReference" - } - }, - "mediaTypes": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/mediaType" - } - }, - "parametersOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/parameterOrReference" - } - }, - "requestBodiesOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/requestBodyOrReference" - } - }, - "responsesOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/responseOrReference" - } - }, - "schemasOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/schemaOrReference" - } - }, - "securitySchemesOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/securitySchemeOrReference" - } - }, - "serverVariables": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/serverVariable" - } - }, - "strings": { - "type": "object", - "additionalProperties": { - "type": "string" - } - }, - "object": { - "type": "object", - "additionalProperties": true - }, - "any": { - "additionalProperties": true - }, - "expression": { - "type": "object", - "additionalProperties": true - }, - "specificationExtension": { - "description": "Any property starting with x- is valid.", - "oneOf": [ - { - "type": "null" - }, - { - "type": "number" - }, - { - "type": "boolean" - }, - { - "type": "string" - }, - { - "type": "object" - }, - { - "type": "array" - } - ] - }, - "defaultType": { - "oneOf": [ - { - "type": "null" - }, - { - "type": "array" - }, - { - "type": "object" - }, - { - "type": "number" - }, - { - "type": "boolean" - }, - { - "type": "string" - } - ] - } - } -} diff --git a/cluster-autoscaler/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md b/cluster-autoscaler/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md deleted file mode 100644 index c758234904ec..000000000000 --- a/cluster-autoscaler/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md +++ /dev/null @@ -1,96 +0,0 @@ -## 1.5.0 - -* New option `IgnoreUntaggedFields` to ignore decoding to any fields - without `mapstructure` (or the configured tag name) set [GH-277] -* New option `ErrorUnset` which makes it an error if any fields - in a target struct are not set by the decoding process. [GH-225] -* New function `OrComposeDecodeHookFunc` to help compose decode hooks. [GH-240] -* Decoding to slice from array no longer crashes [GH-265] -* Decode nested struct pointers to map [GH-271] -* Fix issue where `,squash` was ignored if `Squash` option was set. [GH-280] -* Fix issue where fields with `,omitempty` would sometimes decode - into a map with an empty string key [GH-281] - -## 1.4.3 - -* Fix cases where `json.Number` didn't decode properly [GH-261] - -## 1.4.2 - -* Custom name matchers to support any sort of casing, formatting, etc. for - field names. [GH-250] -* Fix possible panic in ComposeDecodeHookFunc [GH-251] - -## 1.4.1 - -* Fix regression where `*time.Time` value would be set to empty and not be sent - to decode hooks properly [GH-232] - -## 1.4.0 - -* A new decode hook type `DecodeHookFuncValue` has been added that has - access to the full values. [GH-183] -* Squash is now supported with embedded fields that are struct pointers [GH-205] -* Empty strings will convert to 0 for all numeric types when weakly decoding [GH-206] - -## 1.3.3 - -* Decoding maps from maps creates a settable value for decode hooks [GH-203] - -## 1.3.2 - -* Decode into interface type with a struct value is supported [GH-187] - -## 1.3.1 - -* Squash should only squash embedded structs. [GH-194] - -## 1.3.0 - -* Added `",omitempty"` support. This will ignore zero values in the source - structure when encoding. [GH-145] - -## 1.2.3 - -* Fix duplicate entries in Keys list with pointer values. [GH-185] - -## 1.2.2 - -* Do not add unsettable (unexported) values to the unused metadata key - or "remain" value. [GH-150] - -## 1.2.1 - -* Go modules checksum mismatch fix - -## 1.2.0 - -* Added support to capture unused values in a field using the `",remain"` value - in the mapstructure tag. There is an example to showcase usage. -* Added `DecoderConfig` option to always squash embedded structs -* `json.Number` can decode into `uint` types -* Empty slices are preserved and not replaced with nil slices -* Fix panic that can occur in when decoding a map into a nil slice of structs -* Improved package documentation for godoc - -## 1.1.2 - -* Fix error when decode hook decodes interface implementation into interface - type. [GH-140] - -## 1.1.1 - -* Fix panic that can happen in `decodePtr` - -## 1.1.0 - -* Added `StringToIPHookFunc` to convert `string` to `net.IP` and `net.IPNet` [GH-133] -* Support struct to struct decoding [GH-137] -* If source map value is nil, then destination map value is nil (instead of empty) -* If source slice value is nil, then destination slice value is nil (instead of empty) -* If source pointer is nil, then destination pointer is set to nil (instead of - allocated zero value of type) - -## 1.0.0 - -* Initial tagged stable release. diff --git a/cluster-autoscaler/vendor/github.com/mitchellh/mapstructure/LICENSE b/cluster-autoscaler/vendor/github.com/mitchellh/mapstructure/LICENSE deleted file mode 100644 index f9c841a51e0d..000000000000 --- a/cluster-autoscaler/vendor/github.com/mitchellh/mapstructure/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2013 Mitchell Hashimoto - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/cluster-autoscaler/vendor/github.com/mitchellh/mapstructure/README.md b/cluster-autoscaler/vendor/github.com/mitchellh/mapstructure/README.md deleted file mode 100644 index 0018dc7d9f94..000000000000 --- a/cluster-autoscaler/vendor/github.com/mitchellh/mapstructure/README.md +++ /dev/null @@ -1,46 +0,0 @@ -# mapstructure [![Godoc](https://godoc.org/github.com/mitchellh/mapstructure?status.svg)](https://godoc.org/github.com/mitchellh/mapstructure) - -mapstructure is a Go library for decoding generic map values to structures -and vice versa, while providing helpful error handling. - -This library is most useful when decoding values from some data stream (JSON, -Gob, etc.) where you don't _quite_ know the structure of the underlying data -until you read a part of it. You can therefore read a `map[string]interface{}` -and use this library to decode it into the proper underlying native Go -structure. - -## Installation - -Standard `go get`: - -``` -$ go get github.com/mitchellh/mapstructure -``` - -## Usage & Example - -For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/mapstructure). - -The `Decode` function has examples associated with it there. - -## But Why?! - -Go offers fantastic standard libraries for decoding formats such as JSON. -The standard method is to have a struct pre-created, and populate that struct -from the bytes of the encoded format. This is great, but the problem is if -you have configuration or an encoding that changes slightly depending on -specific fields. For example, consider this JSON: - -```json -{ - "type": "person", - "name": "Mitchell" -} -``` - -Perhaps we can't populate a specific structure without first reading -the "type" field from the JSON. We could always do two passes over the -decoding of the JSON (reading the "type" first, and the rest later). -However, it is much simpler to just decode this into a `map[string]interface{}` -structure, read the "type" key, then use something like this library -to decode it into the proper structure. diff --git a/cluster-autoscaler/vendor/github.com/mitchellh/mapstructure/decode_hooks.go b/cluster-autoscaler/vendor/github.com/mitchellh/mapstructure/decode_hooks.go deleted file mode 100644 index 3a754ca72484..000000000000 --- a/cluster-autoscaler/vendor/github.com/mitchellh/mapstructure/decode_hooks.go +++ /dev/null @@ -1,279 +0,0 @@ -package mapstructure - -import ( - "encoding" - "errors" - "fmt" - "net" - "reflect" - "strconv" - "strings" - "time" -) - -// typedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns -// it into the proper DecodeHookFunc type, such as DecodeHookFuncType. -func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc { - // Create variables here so we can reference them with the reflect pkg - var f1 DecodeHookFuncType - var f2 DecodeHookFuncKind - var f3 DecodeHookFuncValue - - // Fill in the variables into this interface and the rest is done - // automatically using the reflect package. - potential := []interface{}{f1, f2, f3} - - v := reflect.ValueOf(h) - vt := v.Type() - for _, raw := range potential { - pt := reflect.ValueOf(raw).Type() - if vt.ConvertibleTo(pt) { - return v.Convert(pt).Interface() - } - } - - return nil -} - -// DecodeHookExec executes the given decode hook. This should be used -// since it'll naturally degrade to the older backwards compatible DecodeHookFunc -// that took reflect.Kind instead of reflect.Type. -func DecodeHookExec( - raw DecodeHookFunc, - from reflect.Value, to reflect.Value) (interface{}, error) { - - switch f := typedDecodeHook(raw).(type) { - case DecodeHookFuncType: - return f(from.Type(), to.Type(), from.Interface()) - case DecodeHookFuncKind: - return f(from.Kind(), to.Kind(), from.Interface()) - case DecodeHookFuncValue: - return f(from, to) - default: - return nil, errors.New("invalid decode hook signature") - } -} - -// ComposeDecodeHookFunc creates a single DecodeHookFunc that -// automatically composes multiple DecodeHookFuncs. -// -// The composed funcs are called in order, with the result of the -// previous transformation. -func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc { - return func(f reflect.Value, t reflect.Value) (interface{}, error) { - var err error - data := f.Interface() - - newFrom := f - for _, f1 := range fs { - data, err = DecodeHookExec(f1, newFrom, t) - if err != nil { - return nil, err - } - newFrom = reflect.ValueOf(data) - } - - return data, nil - } -} - -// OrComposeDecodeHookFunc executes all input hook functions until one of them returns no error. In that case its value is returned. -// If all hooks return an error, OrComposeDecodeHookFunc returns an error concatenating all error messages. -func OrComposeDecodeHookFunc(ff ...DecodeHookFunc) DecodeHookFunc { - return func(a, b reflect.Value) (interface{}, error) { - var allErrs string - var out interface{} - var err error - - for _, f := range ff { - out, err = DecodeHookExec(f, a, b) - if err != nil { - allErrs += err.Error() + "\n" - continue - } - - return out, nil - } - - return nil, errors.New(allErrs) - } -} - -// StringToSliceHookFunc returns a DecodeHookFunc that converts -// string to []string by splitting on the given sep. -func StringToSliceHookFunc(sep string) DecodeHookFunc { - return func( - f reflect.Kind, - t reflect.Kind, - data interface{}) (interface{}, error) { - if f != reflect.String || t != reflect.Slice { - return data, nil - } - - raw := data.(string) - if raw == "" { - return []string{}, nil - } - - return strings.Split(raw, sep), nil - } -} - -// StringToTimeDurationHookFunc returns a DecodeHookFunc that converts -// strings to time.Duration. -func StringToTimeDurationHookFunc() DecodeHookFunc { - return func( - f reflect.Type, - t reflect.Type, - data interface{}) (interface{}, error) { - if f.Kind() != reflect.String { - return data, nil - } - if t != reflect.TypeOf(time.Duration(5)) { - return data, nil - } - - // Convert it by parsing - return time.ParseDuration(data.(string)) - } -} - -// StringToIPHookFunc returns a DecodeHookFunc that converts -// strings to net.IP -func StringToIPHookFunc() DecodeHookFunc { - return func( - f reflect.Type, - t reflect.Type, - data interface{}) (interface{}, error) { - if f.Kind() != reflect.String { - return data, nil - } - if t != reflect.TypeOf(net.IP{}) { - return data, nil - } - - // Convert it by parsing - ip := net.ParseIP(data.(string)) - if ip == nil { - return net.IP{}, fmt.Errorf("failed parsing ip %v", data) - } - - return ip, nil - } -} - -// StringToIPNetHookFunc returns a DecodeHookFunc that converts -// strings to net.IPNet -func StringToIPNetHookFunc() DecodeHookFunc { - return func( - f reflect.Type, - t reflect.Type, - data interface{}) (interface{}, error) { - if f.Kind() != reflect.String { - return data, nil - } - if t != reflect.TypeOf(net.IPNet{}) { - return data, nil - } - - // Convert it by parsing - _, net, err := net.ParseCIDR(data.(string)) - return net, err - } -} - -// StringToTimeHookFunc returns a DecodeHookFunc that converts -// strings to time.Time. -func StringToTimeHookFunc(layout string) DecodeHookFunc { - return func( - f reflect.Type, - t reflect.Type, - data interface{}) (interface{}, error) { - if f.Kind() != reflect.String { - return data, nil - } - if t != reflect.TypeOf(time.Time{}) { - return data, nil - } - - // Convert it by parsing - return time.Parse(layout, data.(string)) - } -} - -// WeaklyTypedHook is a DecodeHookFunc which adds support for weak typing to -// the decoder. -// -// Note that this is significantly different from the WeaklyTypedInput option -// of the DecoderConfig. -func WeaklyTypedHook( - f reflect.Kind, - t reflect.Kind, - data interface{}) (interface{}, error) { - dataVal := reflect.ValueOf(data) - switch t { - case reflect.String: - switch f { - case reflect.Bool: - if dataVal.Bool() { - return "1", nil - } - return "0", nil - case reflect.Float32: - return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil - case reflect.Int: - return strconv.FormatInt(dataVal.Int(), 10), nil - case reflect.Slice: - dataType := dataVal.Type() - elemKind := dataType.Elem().Kind() - if elemKind == reflect.Uint8 { - return string(dataVal.Interface().([]uint8)), nil - } - case reflect.Uint: - return strconv.FormatUint(dataVal.Uint(), 10), nil - } - } - - return data, nil -} - -func RecursiveStructToMapHookFunc() DecodeHookFunc { - return func(f reflect.Value, t reflect.Value) (interface{}, error) { - if f.Kind() != reflect.Struct { - return f.Interface(), nil - } - - var i interface{} = struct{}{} - if t.Type() != reflect.TypeOf(&i).Elem() { - return f.Interface(), nil - } - - m := make(map[string]interface{}) - t.Set(reflect.ValueOf(m)) - - return f.Interface(), nil - } -} - -// TextUnmarshallerHookFunc returns a DecodeHookFunc that applies -// strings to the UnmarshalText function, when the target type -// implements the encoding.TextUnmarshaler interface -func TextUnmarshallerHookFunc() DecodeHookFuncType { - return func( - f reflect.Type, - t reflect.Type, - data interface{}) (interface{}, error) { - if f.Kind() != reflect.String { - return data, nil - } - result := reflect.New(t).Interface() - unmarshaller, ok := result.(encoding.TextUnmarshaler) - if !ok { - return data, nil - } - if err := unmarshaller.UnmarshalText([]byte(data.(string))); err != nil { - return nil, err - } - return result, nil - } -} diff --git a/cluster-autoscaler/vendor/github.com/mitchellh/mapstructure/error.go b/cluster-autoscaler/vendor/github.com/mitchellh/mapstructure/error.go deleted file mode 100644 index 47a99e5af3f1..000000000000 --- a/cluster-autoscaler/vendor/github.com/mitchellh/mapstructure/error.go +++ /dev/null @@ -1,50 +0,0 @@ -package mapstructure - -import ( - "errors" - "fmt" - "sort" - "strings" -) - -// Error implements the error interface and can represents multiple -// errors that occur in the course of a single decode. -type Error struct { - Errors []string -} - -func (e *Error) Error() string { - points := make([]string, len(e.Errors)) - for i, err := range e.Errors { - points[i] = fmt.Sprintf("* %s", err) - } - - sort.Strings(points) - return fmt.Sprintf( - "%d error(s) decoding:\n\n%s", - len(e.Errors), strings.Join(points, "\n")) -} - -// WrappedErrors implements the errwrap.Wrapper interface to make this -// return value more useful with the errwrap and go-multierror libraries. -func (e *Error) WrappedErrors() []error { - if e == nil { - return nil - } - - result := make([]error, len(e.Errors)) - for i, e := range e.Errors { - result[i] = errors.New(e) - } - - return result -} - -func appendErrors(errors []string, err error) []string { - switch e := err.(type) { - case *Error: - return append(errors, e.Errors...) - default: - return append(errors, e.Error()) - } -} diff --git a/cluster-autoscaler/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/cluster-autoscaler/vendor/github.com/mitchellh/mapstructure/mapstructure.go deleted file mode 100644 index 1efb22ac3610..000000000000 --- a/cluster-autoscaler/vendor/github.com/mitchellh/mapstructure/mapstructure.go +++ /dev/null @@ -1,1540 +0,0 @@ -// Package mapstructure exposes functionality to convert one arbitrary -// Go type into another, typically to convert a map[string]interface{} -// into a native Go structure. -// -// The Go structure can be arbitrarily complex, containing slices, -// other structs, etc. and the decoder will properly decode nested -// maps and so on into the proper structures in the native Go struct. -// See the examples to see what the decoder is capable of. -// -// The simplest function to start with is Decode. -// -// Field Tags -// -// When decoding to a struct, mapstructure will use the field name by -// default to perform the mapping. For example, if a struct has a field -// "Username" then mapstructure will look for a key in the source value -// of "username" (case insensitive). -// -// type User struct { -// Username string -// } -// -// You can change the behavior of mapstructure by using struct tags. -// The default struct tag that mapstructure looks for is "mapstructure" -// but you can customize it using DecoderConfig. -// -// Renaming Fields -// -// To rename the key that mapstructure looks for, use the "mapstructure" -// tag and set a value directly. For example, to change the "username" example -// above to "user": -// -// type User struct { -// Username string `mapstructure:"user"` -// } -// -// Embedded Structs and Squashing -// -// Embedded structs are treated as if they're another field with that name. -// By default, the two structs below are equivalent when decoding with -// mapstructure: -// -// type Person struct { -// Name string -// } -// -// type Friend struct { -// Person -// } -// -// type Friend struct { -// Person Person -// } -// -// This would require an input that looks like below: -// -// map[string]interface{}{ -// "person": map[string]interface{}{"name": "alice"}, -// } -// -// If your "person" value is NOT nested, then you can append ",squash" to -// your tag value and mapstructure will treat it as if the embedded struct -// were part of the struct directly. Example: -// -// type Friend struct { -// Person `mapstructure:",squash"` -// } -// -// Now the following input would be accepted: -// -// map[string]interface{}{ -// "name": "alice", -// } -// -// When decoding from a struct to a map, the squash tag squashes the struct -// fields into a single map. Using the example structs from above: -// -// Friend{Person: Person{Name: "alice"}} -// -// Will be decoded into a map: -// -// map[string]interface{}{ -// "name": "alice", -// } -// -// DecoderConfig has a field that changes the behavior of mapstructure -// to always squash embedded structs. -// -// Remainder Values -// -// If there are any unmapped keys in the source value, mapstructure by -// default will silently ignore them. You can error by setting ErrorUnused -// in DecoderConfig. If you're using Metadata you can also maintain a slice -// of the unused keys. -// -// You can also use the ",remain" suffix on your tag to collect all unused -// values in a map. The field with this tag MUST be a map type and should -// probably be a "map[string]interface{}" or "map[interface{}]interface{}". -// See example below: -// -// type Friend struct { -// Name string -// Other map[string]interface{} `mapstructure:",remain"` -// } -// -// Given the input below, Other would be populated with the other -// values that weren't used (everything but "name"): -// -// map[string]interface{}{ -// "name": "bob", -// "address": "123 Maple St.", -// } -// -// Omit Empty Values -// -// When decoding from a struct to any other value, you may use the -// ",omitempty" suffix on your tag to omit that value if it equates to -// the zero value. The zero value of all types is specified in the Go -// specification. -// -// For example, the zero type of a numeric type is zero ("0"). If the struct -// field value is zero and a numeric type, the field is empty, and it won't -// be encoded into the destination type. -// -// type Source struct { -// Age int `mapstructure:",omitempty"` -// } -// -// Unexported fields -// -// Since unexported (private) struct fields cannot be set outside the package -// where they are defined, the decoder will simply skip them. -// -// For this output type definition: -// -// type Exported struct { -// private string // this unexported field will be skipped -// Public string -// } -// -// Using this map as input: -// -// map[string]interface{}{ -// "private": "I will be ignored", -// "Public": "I made it through!", -// } -// -// The following struct will be decoded: -// -// type Exported struct { -// private: "" // field is left with an empty string (zero value) -// Public: "I made it through!" -// } -// -// Other Configuration -// -// mapstructure is highly configurable. See the DecoderConfig struct -// for other features and options that are supported. -package mapstructure - -import ( - "encoding/json" - "errors" - "fmt" - "reflect" - "sort" - "strconv" - "strings" -) - -// DecodeHookFunc is the callback function that can be used for -// data transformations. See "DecodeHook" in the DecoderConfig -// struct. -// -// The type must be one of DecodeHookFuncType, DecodeHookFuncKind, or -// DecodeHookFuncValue. -// Values are a superset of Types (Values can return types), and Types are a -// superset of Kinds (Types can return Kinds) and are generally a richer thing -// to use, but Kinds are simpler if you only need those. -// -// The reason DecodeHookFunc is multi-typed is for backwards compatibility: -// we started with Kinds and then realized Types were the better solution, -// but have a promise to not break backwards compat so we now support -// both. -type DecodeHookFunc interface{} - -// DecodeHookFuncType is a DecodeHookFunc which has complete information about -// the source and target types. -type DecodeHookFuncType func(reflect.Type, reflect.Type, interface{}) (interface{}, error) - -// DecodeHookFuncKind is a DecodeHookFunc which knows only the Kinds of the -// source and target types. -type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error) - -// DecodeHookFuncValue is a DecodeHookFunc which has complete access to both the source and target -// values. -type DecodeHookFuncValue func(from reflect.Value, to reflect.Value) (interface{}, error) - -// DecoderConfig is the configuration that is used to create a new decoder -// and allows customization of various aspects of decoding. -type DecoderConfig struct { - // DecodeHook, if set, will be called before any decoding and any - // type conversion (if WeaklyTypedInput is on). This lets you modify - // the values before they're set down onto the resulting struct. The - // DecodeHook is called for every map and value in the input. This means - // that if a struct has embedded fields with squash tags the decode hook - // is called only once with all of the input data, not once for each - // embedded struct. - // - // If an error is returned, the entire decode will fail with that error. - DecodeHook DecodeHookFunc - - // If ErrorUnused is true, then it is an error for there to exist - // keys in the original map that were unused in the decoding process - // (extra keys). - ErrorUnused bool - - // If ErrorUnset is true, then it is an error for there to exist - // fields in the result that were not set in the decoding process - // (extra fields). This only applies to decoding to a struct. This - // will affect all nested structs as well. - ErrorUnset bool - - // ZeroFields, if set to true, will zero fields before writing them. - // For example, a map will be emptied before decoded values are put in - // it. If this is false, a map will be merged. - ZeroFields bool - - // If WeaklyTypedInput is true, the decoder will make the following - // "weak" conversions: - // - // - bools to string (true = "1", false = "0") - // - numbers to string (base 10) - // - bools to int/uint (true = 1, false = 0) - // - strings to int/uint (base implied by prefix) - // - int to bool (true if value != 0) - // - string to bool (accepts: 1, t, T, TRUE, true, True, 0, f, F, - // FALSE, false, False. Anything else is an error) - // - empty array = empty map and vice versa - // - negative numbers to overflowed uint values (base 10) - // - slice of maps to a merged map - // - single values are converted to slices if required. Each - // element is weakly decoded. For example: "4" can become []int{4} - // if the target type is an int slice. - // - WeaklyTypedInput bool - - // Squash will squash embedded structs. A squash tag may also be - // added to an individual struct field using a tag. For example: - // - // type Parent struct { - // Child `mapstructure:",squash"` - // } - Squash bool - - // Metadata is the struct that will contain extra metadata about - // the decoding. If this is nil, then no metadata will be tracked. - Metadata *Metadata - - // Result is a pointer to the struct that will contain the decoded - // value. - Result interface{} - - // The tag name that mapstructure reads for field names. This - // defaults to "mapstructure" - TagName string - - // IgnoreUntaggedFields ignores all struct fields without explicit - // TagName, comparable to `mapstructure:"-"` as default behaviour. - IgnoreUntaggedFields bool - - // MatchName is the function used to match the map key to the struct - // field name or tag. Defaults to `strings.EqualFold`. This can be used - // to implement case-sensitive tag values, support snake casing, etc. - MatchName func(mapKey, fieldName string) bool -} - -// A Decoder takes a raw interface value and turns it into structured -// data, keeping track of rich error information along the way in case -// anything goes wrong. Unlike the basic top-level Decode method, you can -// more finely control how the Decoder behaves using the DecoderConfig -// structure. The top-level Decode method is just a convenience that sets -// up the most basic Decoder. -type Decoder struct { - config *DecoderConfig -} - -// Metadata contains information about decoding a structure that -// is tedious or difficult to get otherwise. -type Metadata struct { - // Keys are the keys of the structure which were successfully decoded - Keys []string - - // Unused is a slice of keys that were found in the raw value but - // weren't decoded since there was no matching field in the result interface - Unused []string - - // Unset is a slice of field names that were found in the result interface - // but weren't set in the decoding process since there was no matching value - // in the input - Unset []string -} - -// Decode takes an input structure and uses reflection to translate it to -// the output structure. output must be a pointer to a map or struct. -func Decode(input interface{}, output interface{}) error { - config := &DecoderConfig{ - Metadata: nil, - Result: output, - } - - decoder, err := NewDecoder(config) - if err != nil { - return err - } - - return decoder.Decode(input) -} - -// WeakDecode is the same as Decode but is shorthand to enable -// WeaklyTypedInput. See DecoderConfig for more info. -func WeakDecode(input, output interface{}) error { - config := &DecoderConfig{ - Metadata: nil, - Result: output, - WeaklyTypedInput: true, - } - - decoder, err := NewDecoder(config) - if err != nil { - return err - } - - return decoder.Decode(input) -} - -// DecodeMetadata is the same as Decode, but is shorthand to -// enable metadata collection. See DecoderConfig for more info. -func DecodeMetadata(input interface{}, output interface{}, metadata *Metadata) error { - config := &DecoderConfig{ - Metadata: metadata, - Result: output, - } - - decoder, err := NewDecoder(config) - if err != nil { - return err - } - - return decoder.Decode(input) -} - -// WeakDecodeMetadata is the same as Decode, but is shorthand to -// enable both WeaklyTypedInput and metadata collection. See -// DecoderConfig for more info. -func WeakDecodeMetadata(input interface{}, output interface{}, metadata *Metadata) error { - config := &DecoderConfig{ - Metadata: metadata, - Result: output, - WeaklyTypedInput: true, - } - - decoder, err := NewDecoder(config) - if err != nil { - return err - } - - return decoder.Decode(input) -} - -// NewDecoder returns a new decoder for the given configuration. Once -// a decoder has been returned, the same configuration must not be used -// again. -func NewDecoder(config *DecoderConfig) (*Decoder, error) { - val := reflect.ValueOf(config.Result) - if val.Kind() != reflect.Ptr { - return nil, errors.New("result must be a pointer") - } - - val = val.Elem() - if !val.CanAddr() { - return nil, errors.New("result must be addressable (a pointer)") - } - - if config.Metadata != nil { - if config.Metadata.Keys == nil { - config.Metadata.Keys = make([]string, 0) - } - - if config.Metadata.Unused == nil { - config.Metadata.Unused = make([]string, 0) - } - - if config.Metadata.Unset == nil { - config.Metadata.Unset = make([]string, 0) - } - } - - if config.TagName == "" { - config.TagName = "mapstructure" - } - - if config.MatchName == nil { - config.MatchName = strings.EqualFold - } - - result := &Decoder{ - config: config, - } - - return result, nil -} - -// Decode decodes the given raw interface to the target pointer specified -// by the configuration. -func (d *Decoder) Decode(input interface{}) error { - return d.decode("", input, reflect.ValueOf(d.config.Result).Elem()) -} - -// Decodes an unknown data type into a specific reflection value. -func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) error { - var inputVal reflect.Value - if input != nil { - inputVal = reflect.ValueOf(input) - - // We need to check here if input is a typed nil. Typed nils won't - // match the "input == nil" below so we check that here. - if inputVal.Kind() == reflect.Ptr && inputVal.IsNil() { - input = nil - } - } - - if input == nil { - // If the data is nil, then we don't set anything, unless ZeroFields is set - // to true. - if d.config.ZeroFields { - outVal.Set(reflect.Zero(outVal.Type())) - - if d.config.Metadata != nil && name != "" { - d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) - } - } - return nil - } - - if !inputVal.IsValid() { - // If the input value is invalid, then we just set the value - // to be the zero value. - outVal.Set(reflect.Zero(outVal.Type())) - if d.config.Metadata != nil && name != "" { - d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) - } - return nil - } - - if d.config.DecodeHook != nil { - // We have a DecodeHook, so let's pre-process the input. - var err error - input, err = DecodeHookExec(d.config.DecodeHook, inputVal, outVal) - if err != nil { - return fmt.Errorf("error decoding '%s': %s", name, err) - } - } - - var err error - outputKind := getKind(outVal) - addMetaKey := true - switch outputKind { - case reflect.Bool: - err = d.decodeBool(name, input, outVal) - case reflect.Interface: - err = d.decodeBasic(name, input, outVal) - case reflect.String: - err = d.decodeString(name, input, outVal) - case reflect.Int: - err = d.decodeInt(name, input, outVal) - case reflect.Uint: - err = d.decodeUint(name, input, outVal) - case reflect.Float32: - err = d.decodeFloat(name, input, outVal) - case reflect.Struct: - err = d.decodeStruct(name, input, outVal) - case reflect.Map: - err = d.decodeMap(name, input, outVal) - case reflect.Ptr: - addMetaKey, err = d.decodePtr(name, input, outVal) - case reflect.Slice: - err = d.decodeSlice(name, input, outVal) - case reflect.Array: - err = d.decodeArray(name, input, outVal) - case reflect.Func: - err = d.decodeFunc(name, input, outVal) - default: - // If we reached this point then we weren't able to decode it - return fmt.Errorf("%s: unsupported type: %s", name, outputKind) - } - - // If we reached here, then we successfully decoded SOMETHING, so - // mark the key as used if we're tracking metainput. - if addMetaKey && d.config.Metadata != nil && name != "" { - d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) - } - - return err -} - -// This decodes a basic type (bool, int, string, etc.) and sets the -// value to "data" of that type. -func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value) error { - if val.IsValid() && val.Elem().IsValid() { - elem := val.Elem() - - // If we can't address this element, then its not writable. Instead, - // we make a copy of the value (which is a pointer and therefore - // writable), decode into that, and replace the whole value. - copied := false - if !elem.CanAddr() { - copied = true - - // Make *T - copy := reflect.New(elem.Type()) - - // *T = elem - copy.Elem().Set(elem) - - // Set elem so we decode into it - elem = copy - } - - // Decode. If we have an error then return. We also return right - // away if we're not a copy because that means we decoded directly. - if err := d.decode(name, data, elem); err != nil || !copied { - return err - } - - // If we're a copy, we need to set te final result - val.Set(elem.Elem()) - return nil - } - - dataVal := reflect.ValueOf(data) - - // If the input data is a pointer, and the assigned type is the dereference - // of that exact pointer, then indirect it so that we can assign it. - // Example: *string to string - if dataVal.Kind() == reflect.Ptr && dataVal.Type().Elem() == val.Type() { - dataVal = reflect.Indirect(dataVal) - } - - if !dataVal.IsValid() { - dataVal = reflect.Zero(val.Type()) - } - - dataValType := dataVal.Type() - if !dataValType.AssignableTo(val.Type()) { - return fmt.Errorf( - "'%s' expected type '%s', got '%s'", - name, val.Type(), dataValType) - } - - val.Set(dataVal) - return nil -} - -func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - dataKind := getKind(dataVal) - - converted := true - switch { - case dataKind == reflect.String: - val.SetString(dataVal.String()) - case dataKind == reflect.Bool && d.config.WeaklyTypedInput: - if dataVal.Bool() { - val.SetString("1") - } else { - val.SetString("0") - } - case dataKind == reflect.Int && d.config.WeaklyTypedInput: - val.SetString(strconv.FormatInt(dataVal.Int(), 10)) - case dataKind == reflect.Uint && d.config.WeaklyTypedInput: - val.SetString(strconv.FormatUint(dataVal.Uint(), 10)) - case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: - val.SetString(strconv.FormatFloat(dataVal.Float(), 'f', -1, 64)) - case dataKind == reflect.Slice && d.config.WeaklyTypedInput, - dataKind == reflect.Array && d.config.WeaklyTypedInput: - dataType := dataVal.Type() - elemKind := dataType.Elem().Kind() - switch elemKind { - case reflect.Uint8: - var uints []uint8 - if dataKind == reflect.Array { - uints = make([]uint8, dataVal.Len(), dataVal.Len()) - for i := range uints { - uints[i] = dataVal.Index(i).Interface().(uint8) - } - } else { - uints = dataVal.Interface().([]uint8) - } - val.SetString(string(uints)) - default: - converted = false - } - default: - converted = false - } - - if !converted { - return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", - name, val.Type(), dataVal.Type(), data) - } - - return nil -} - -func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - dataKind := getKind(dataVal) - dataType := dataVal.Type() - - switch { - case dataKind == reflect.Int: - val.SetInt(dataVal.Int()) - case dataKind == reflect.Uint: - val.SetInt(int64(dataVal.Uint())) - case dataKind == reflect.Float32: - val.SetInt(int64(dataVal.Float())) - case dataKind == reflect.Bool && d.config.WeaklyTypedInput: - if dataVal.Bool() { - val.SetInt(1) - } else { - val.SetInt(0) - } - case dataKind == reflect.String && d.config.WeaklyTypedInput: - str := dataVal.String() - if str == "" { - str = "0" - } - - i, err := strconv.ParseInt(str, 0, val.Type().Bits()) - if err == nil { - val.SetInt(i) - } else { - return fmt.Errorf("cannot parse '%s' as int: %s", name, err) - } - case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": - jn := data.(json.Number) - i, err := jn.Int64() - if err != nil { - return fmt.Errorf( - "error decoding json.Number into %s: %s", name, err) - } - val.SetInt(i) - default: - return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", - name, val.Type(), dataVal.Type(), data) - } - - return nil -} - -func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - dataKind := getKind(dataVal) - dataType := dataVal.Type() - - switch { - case dataKind == reflect.Int: - i := dataVal.Int() - if i < 0 && !d.config.WeaklyTypedInput { - return fmt.Errorf("cannot parse '%s', %d overflows uint", - name, i) - } - val.SetUint(uint64(i)) - case dataKind == reflect.Uint: - val.SetUint(dataVal.Uint()) - case dataKind == reflect.Float32: - f := dataVal.Float() - if f < 0 && !d.config.WeaklyTypedInput { - return fmt.Errorf("cannot parse '%s', %f overflows uint", - name, f) - } - val.SetUint(uint64(f)) - case dataKind == reflect.Bool && d.config.WeaklyTypedInput: - if dataVal.Bool() { - val.SetUint(1) - } else { - val.SetUint(0) - } - case dataKind == reflect.String && d.config.WeaklyTypedInput: - str := dataVal.String() - if str == "" { - str = "0" - } - - i, err := strconv.ParseUint(str, 0, val.Type().Bits()) - if err == nil { - val.SetUint(i) - } else { - return fmt.Errorf("cannot parse '%s' as uint: %s", name, err) - } - case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": - jn := data.(json.Number) - i, err := strconv.ParseUint(string(jn), 0, 64) - if err != nil { - return fmt.Errorf( - "error decoding json.Number into %s: %s", name, err) - } - val.SetUint(i) - default: - return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", - name, val.Type(), dataVal.Type(), data) - } - - return nil -} - -func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - dataKind := getKind(dataVal) - - switch { - case dataKind == reflect.Bool: - val.SetBool(dataVal.Bool()) - case dataKind == reflect.Int && d.config.WeaklyTypedInput: - val.SetBool(dataVal.Int() != 0) - case dataKind == reflect.Uint && d.config.WeaklyTypedInput: - val.SetBool(dataVal.Uint() != 0) - case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: - val.SetBool(dataVal.Float() != 0) - case dataKind == reflect.String && d.config.WeaklyTypedInput: - b, err := strconv.ParseBool(dataVal.String()) - if err == nil { - val.SetBool(b) - } else if dataVal.String() == "" { - val.SetBool(false) - } else { - return fmt.Errorf("cannot parse '%s' as bool: %s", name, err) - } - default: - return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", - name, val.Type(), dataVal.Type(), data) - } - - return nil -} - -func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - dataKind := getKind(dataVal) - dataType := dataVal.Type() - - switch { - case dataKind == reflect.Int: - val.SetFloat(float64(dataVal.Int())) - case dataKind == reflect.Uint: - val.SetFloat(float64(dataVal.Uint())) - case dataKind == reflect.Float32: - val.SetFloat(dataVal.Float()) - case dataKind == reflect.Bool && d.config.WeaklyTypedInput: - if dataVal.Bool() { - val.SetFloat(1) - } else { - val.SetFloat(0) - } - case dataKind == reflect.String && d.config.WeaklyTypedInput: - str := dataVal.String() - if str == "" { - str = "0" - } - - f, err := strconv.ParseFloat(str, val.Type().Bits()) - if err == nil { - val.SetFloat(f) - } else { - return fmt.Errorf("cannot parse '%s' as float: %s", name, err) - } - case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": - jn := data.(json.Number) - i, err := jn.Float64() - if err != nil { - return fmt.Errorf( - "error decoding json.Number into %s: %s", name, err) - } - val.SetFloat(i) - default: - return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", - name, val.Type(), dataVal.Type(), data) - } - - return nil -} - -func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) error { - valType := val.Type() - valKeyType := valType.Key() - valElemType := valType.Elem() - - // By default we overwrite keys in the current map - valMap := val - - // If the map is nil or we're purposely zeroing fields, make a new map - if valMap.IsNil() || d.config.ZeroFields { - // Make a new map to hold our result - mapType := reflect.MapOf(valKeyType, valElemType) - valMap = reflect.MakeMap(mapType) - } - - // Check input type and based on the input type jump to the proper func - dataVal := reflect.Indirect(reflect.ValueOf(data)) - switch dataVal.Kind() { - case reflect.Map: - return d.decodeMapFromMap(name, dataVal, val, valMap) - - case reflect.Struct: - return d.decodeMapFromStruct(name, dataVal, val, valMap) - - case reflect.Array, reflect.Slice: - if d.config.WeaklyTypedInput { - return d.decodeMapFromSlice(name, dataVal, val, valMap) - } - - fallthrough - - default: - return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind()) - } -} - -func (d *Decoder) decodeMapFromSlice(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { - // Special case for BC reasons (covered by tests) - if dataVal.Len() == 0 { - val.Set(valMap) - return nil - } - - for i := 0; i < dataVal.Len(); i++ { - err := d.decode( - name+"["+strconv.Itoa(i)+"]", - dataVal.Index(i).Interface(), val) - if err != nil { - return err - } - } - - return nil -} - -func (d *Decoder) decodeMapFromMap(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { - valType := val.Type() - valKeyType := valType.Key() - valElemType := valType.Elem() - - // Accumulate errors - errors := make([]string, 0) - - // If the input data is empty, then we just match what the input data is. - if dataVal.Len() == 0 { - if dataVal.IsNil() { - if !val.IsNil() { - val.Set(dataVal) - } - } else { - // Set to empty allocated value - val.Set(valMap) - } - - return nil - } - - for _, k := range dataVal.MapKeys() { - fieldName := name + "[" + k.String() + "]" - - // First decode the key into the proper type - currentKey := reflect.Indirect(reflect.New(valKeyType)) - if err := d.decode(fieldName, k.Interface(), currentKey); err != nil { - errors = appendErrors(errors, err) - continue - } - - // Next decode the data into the proper type - v := dataVal.MapIndex(k).Interface() - currentVal := reflect.Indirect(reflect.New(valElemType)) - if err := d.decode(fieldName, v, currentVal); err != nil { - errors = appendErrors(errors, err) - continue - } - - valMap.SetMapIndex(currentKey, currentVal) - } - - // Set the built up map to the value - val.Set(valMap) - - // If we had errors, return those - if len(errors) > 0 { - return &Error{errors} - } - - return nil -} - -func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { - typ := dataVal.Type() - for i := 0; i < typ.NumField(); i++ { - // Get the StructField first since this is a cheap operation. If the - // field is unexported, then ignore it. - f := typ.Field(i) - if f.PkgPath != "" { - continue - } - - // Next get the actual value of this field and verify it is assignable - // to the map value. - v := dataVal.Field(i) - if !v.Type().AssignableTo(valMap.Type().Elem()) { - return fmt.Errorf("cannot assign type '%s' to map value field of type '%s'", v.Type(), valMap.Type().Elem()) - } - - tagValue := f.Tag.Get(d.config.TagName) - keyName := f.Name - - if tagValue == "" && d.config.IgnoreUntaggedFields { - continue - } - - // If Squash is set in the config, we squash the field down. - squash := d.config.Squash && v.Kind() == reflect.Struct && f.Anonymous - - v = dereferencePtrToStructIfNeeded(v, d.config.TagName) - - // Determine the name of the key in the map - if index := strings.Index(tagValue, ","); index != -1 { - if tagValue[:index] == "-" { - continue - } - // If "omitempty" is specified in the tag, it ignores empty values. - if strings.Index(tagValue[index+1:], "omitempty") != -1 && isEmptyValue(v) { - continue - } - - // If "squash" is specified in the tag, we squash the field down. - squash = squash || strings.Index(tagValue[index+1:], "squash") != -1 - if squash { - // When squashing, the embedded type can be a pointer to a struct. - if v.Kind() == reflect.Ptr && v.Elem().Kind() == reflect.Struct { - v = v.Elem() - } - - // The final type must be a struct - if v.Kind() != reflect.Struct { - return fmt.Errorf("cannot squash non-struct type '%s'", v.Type()) - } - } - if keyNameTagValue := tagValue[:index]; keyNameTagValue != "" { - keyName = keyNameTagValue - } - } else if len(tagValue) > 0 { - if tagValue == "-" { - continue - } - keyName = tagValue - } - - switch v.Kind() { - // this is an embedded struct, so handle it differently - case reflect.Struct: - x := reflect.New(v.Type()) - x.Elem().Set(v) - - vType := valMap.Type() - vKeyType := vType.Key() - vElemType := vType.Elem() - mType := reflect.MapOf(vKeyType, vElemType) - vMap := reflect.MakeMap(mType) - - // Creating a pointer to a map so that other methods can completely - // overwrite the map if need be (looking at you decodeMapFromMap). The - // indirection allows the underlying map to be settable (CanSet() == true) - // where as reflect.MakeMap returns an unsettable map. - addrVal := reflect.New(vMap.Type()) - reflect.Indirect(addrVal).Set(vMap) - - err := d.decode(keyName, x.Interface(), reflect.Indirect(addrVal)) - if err != nil { - return err - } - - // the underlying map may have been completely overwritten so pull - // it indirectly out of the enclosing value. - vMap = reflect.Indirect(addrVal) - - if squash { - for _, k := range vMap.MapKeys() { - valMap.SetMapIndex(k, vMap.MapIndex(k)) - } - } else { - valMap.SetMapIndex(reflect.ValueOf(keyName), vMap) - } - - default: - valMap.SetMapIndex(reflect.ValueOf(keyName), v) - } - } - - if val.CanAddr() { - val.Set(valMap) - } - - return nil -} - -func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) (bool, error) { - // If the input data is nil, then we want to just set the output - // pointer to be nil as well. - isNil := data == nil - if !isNil { - switch v := reflect.Indirect(reflect.ValueOf(data)); v.Kind() { - case reflect.Chan, - reflect.Func, - reflect.Interface, - reflect.Map, - reflect.Ptr, - reflect.Slice: - isNil = v.IsNil() - } - } - if isNil { - if !val.IsNil() && val.CanSet() { - nilValue := reflect.New(val.Type()).Elem() - val.Set(nilValue) - } - - return true, nil - } - - // Create an element of the concrete (non pointer) type and decode - // into that. Then set the value of the pointer to this type. - valType := val.Type() - valElemType := valType.Elem() - if val.CanSet() { - realVal := val - if realVal.IsNil() || d.config.ZeroFields { - realVal = reflect.New(valElemType) - } - - if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil { - return false, err - } - - val.Set(realVal) - } else { - if err := d.decode(name, data, reflect.Indirect(val)); err != nil { - return false, err - } - } - return false, nil -} - -func (d *Decoder) decodeFunc(name string, data interface{}, val reflect.Value) error { - // Create an element of the concrete (non pointer) type and decode - // into that. Then set the value of the pointer to this type. - dataVal := reflect.Indirect(reflect.ValueOf(data)) - if val.Type() != dataVal.Type() { - return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", - name, val.Type(), dataVal.Type(), data) - } - val.Set(dataVal) - return nil -} - -func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - dataValKind := dataVal.Kind() - valType := val.Type() - valElemType := valType.Elem() - sliceType := reflect.SliceOf(valElemType) - - // If we have a non array/slice type then we first attempt to convert. - if dataValKind != reflect.Array && dataValKind != reflect.Slice { - if d.config.WeaklyTypedInput { - switch { - // Slice and array we use the normal logic - case dataValKind == reflect.Slice, dataValKind == reflect.Array: - break - - // Empty maps turn into empty slices - case dataValKind == reflect.Map: - if dataVal.Len() == 0 { - val.Set(reflect.MakeSlice(sliceType, 0, 0)) - return nil - } - // Create slice of maps of other sizes - return d.decodeSlice(name, []interface{}{data}, val) - - case dataValKind == reflect.String && valElemType.Kind() == reflect.Uint8: - return d.decodeSlice(name, []byte(dataVal.String()), val) - - // All other types we try to convert to the slice type - // and "lift" it into it. i.e. a string becomes a string slice. - default: - // Just re-try this function with data as a slice. - return d.decodeSlice(name, []interface{}{data}, val) - } - } - - return fmt.Errorf( - "'%s': source data must be an array or slice, got %s", name, dataValKind) - } - - // If the input value is nil, then don't allocate since empty != nil - if dataValKind != reflect.Array && dataVal.IsNil() { - return nil - } - - valSlice := val - if valSlice.IsNil() || d.config.ZeroFields { - // Make a new slice to hold our result, same size as the original data. - valSlice = reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len()) - } - - // Accumulate any errors - errors := make([]string, 0) - - for i := 0; i < dataVal.Len(); i++ { - currentData := dataVal.Index(i).Interface() - for valSlice.Len() <= i { - valSlice = reflect.Append(valSlice, reflect.Zero(valElemType)) - } - currentField := valSlice.Index(i) - - fieldName := name + "[" + strconv.Itoa(i) + "]" - if err := d.decode(fieldName, currentData, currentField); err != nil { - errors = appendErrors(errors, err) - } - } - - // Finally, set the value to the slice we built up - val.Set(valSlice) - - // If there were errors, we return those - if len(errors) > 0 { - return &Error{errors} - } - - return nil -} - -func (d *Decoder) decodeArray(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - dataValKind := dataVal.Kind() - valType := val.Type() - valElemType := valType.Elem() - arrayType := reflect.ArrayOf(valType.Len(), valElemType) - - valArray := val - - if valArray.Interface() == reflect.Zero(valArray.Type()).Interface() || d.config.ZeroFields { - // Check input type - if dataValKind != reflect.Array && dataValKind != reflect.Slice { - if d.config.WeaklyTypedInput { - switch { - // Empty maps turn into empty arrays - case dataValKind == reflect.Map: - if dataVal.Len() == 0 { - val.Set(reflect.Zero(arrayType)) - return nil - } - - // All other types we try to convert to the array type - // and "lift" it into it. i.e. a string becomes a string array. - default: - // Just re-try this function with data as a slice. - return d.decodeArray(name, []interface{}{data}, val) - } - } - - return fmt.Errorf( - "'%s': source data must be an array or slice, got %s", name, dataValKind) - - } - if dataVal.Len() > arrayType.Len() { - return fmt.Errorf( - "'%s': expected source data to have length less or equal to %d, got %d", name, arrayType.Len(), dataVal.Len()) - - } - - // Make a new array to hold our result, same size as the original data. - valArray = reflect.New(arrayType).Elem() - } - - // Accumulate any errors - errors := make([]string, 0) - - for i := 0; i < dataVal.Len(); i++ { - currentData := dataVal.Index(i).Interface() - currentField := valArray.Index(i) - - fieldName := name + "[" + strconv.Itoa(i) + "]" - if err := d.decode(fieldName, currentData, currentField); err != nil { - errors = appendErrors(errors, err) - } - } - - // Finally, set the value to the array we built up - val.Set(valArray) - - // If there were errors, we return those - if len(errors) > 0 { - return &Error{errors} - } - - return nil -} - -func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - - // If the type of the value to write to and the data match directly, - // then we just set it directly instead of recursing into the structure. - if dataVal.Type() == val.Type() { - val.Set(dataVal) - return nil - } - - dataValKind := dataVal.Kind() - switch dataValKind { - case reflect.Map: - return d.decodeStructFromMap(name, dataVal, val) - - case reflect.Struct: - // Not the most efficient way to do this but we can optimize later if - // we want to. To convert from struct to struct we go to map first - // as an intermediary. - - // Make a new map to hold our result - mapType := reflect.TypeOf((map[string]interface{})(nil)) - mval := reflect.MakeMap(mapType) - - // Creating a pointer to a map so that other methods can completely - // overwrite the map if need be (looking at you decodeMapFromMap). The - // indirection allows the underlying map to be settable (CanSet() == true) - // where as reflect.MakeMap returns an unsettable map. - addrVal := reflect.New(mval.Type()) - - reflect.Indirect(addrVal).Set(mval) - if err := d.decodeMapFromStruct(name, dataVal, reflect.Indirect(addrVal), mval); err != nil { - return err - } - - result := d.decodeStructFromMap(name, reflect.Indirect(addrVal), val) - return result - - default: - return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind()) - } -} - -func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) error { - dataValType := dataVal.Type() - if kind := dataValType.Key().Kind(); kind != reflect.String && kind != reflect.Interface { - return fmt.Errorf( - "'%s' needs a map with string keys, has '%s' keys", - name, dataValType.Key().Kind()) - } - - dataValKeys := make(map[reflect.Value]struct{}) - dataValKeysUnused := make(map[interface{}]struct{}) - for _, dataValKey := range dataVal.MapKeys() { - dataValKeys[dataValKey] = struct{}{} - dataValKeysUnused[dataValKey.Interface()] = struct{}{} - } - - targetValKeysUnused := make(map[interface{}]struct{}) - errors := make([]string, 0) - - // This slice will keep track of all the structs we'll be decoding. - // There can be more than one struct if there are embedded structs - // that are squashed. - structs := make([]reflect.Value, 1, 5) - structs[0] = val - - // Compile the list of all the fields that we're going to be decoding - // from all the structs. - type field struct { - field reflect.StructField - val reflect.Value - } - - // remainField is set to a valid field set with the "remain" tag if - // we are keeping track of remaining values. - var remainField *field - - fields := []field{} - for len(structs) > 0 { - structVal := structs[0] - structs = structs[1:] - - structType := structVal.Type() - - for i := 0; i < structType.NumField(); i++ { - fieldType := structType.Field(i) - fieldVal := structVal.Field(i) - if fieldVal.Kind() == reflect.Ptr && fieldVal.Elem().Kind() == reflect.Struct { - // Handle embedded struct pointers as embedded structs. - fieldVal = fieldVal.Elem() - } - - // If "squash" is specified in the tag, we squash the field down. - squash := d.config.Squash && fieldVal.Kind() == reflect.Struct && fieldType.Anonymous - remain := false - - // We always parse the tags cause we're looking for other tags too - tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",") - for _, tag := range tagParts[1:] { - if tag == "squash" { - squash = true - break - } - - if tag == "remain" { - remain = true - break - } - } - - if squash { - if fieldVal.Kind() != reflect.Struct { - errors = appendErrors(errors, - fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldVal.Kind())) - } else { - structs = append(structs, fieldVal) - } - continue - } - - // Build our field - if remain { - remainField = &field{fieldType, fieldVal} - } else { - // Normal struct field, store it away - fields = append(fields, field{fieldType, fieldVal}) - } - } - } - - // for fieldType, field := range fields { - for _, f := range fields { - field, fieldValue := f.field, f.val - fieldName := field.Name - - tagValue := field.Tag.Get(d.config.TagName) - tagValue = strings.SplitN(tagValue, ",", 2)[0] - if tagValue != "" { - fieldName = tagValue - } - - rawMapKey := reflect.ValueOf(fieldName) - rawMapVal := dataVal.MapIndex(rawMapKey) - if !rawMapVal.IsValid() { - // Do a slower search by iterating over each key and - // doing case-insensitive search. - for dataValKey := range dataValKeys { - mK, ok := dataValKey.Interface().(string) - if !ok { - // Not a string key - continue - } - - if d.config.MatchName(mK, fieldName) { - rawMapKey = dataValKey - rawMapVal = dataVal.MapIndex(dataValKey) - break - } - } - - if !rawMapVal.IsValid() { - // There was no matching key in the map for the value in - // the struct. Remember it for potential errors and metadata. - targetValKeysUnused[fieldName] = struct{}{} - continue - } - } - - if !fieldValue.IsValid() { - // This should never happen - panic("field is not valid") - } - - // If we can't set the field, then it is unexported or something, - // and we just continue onwards. - if !fieldValue.CanSet() { - continue - } - - // Delete the key we're using from the unused map so we stop tracking - delete(dataValKeysUnused, rawMapKey.Interface()) - - // If the name is empty string, then we're at the root, and we - // don't dot-join the fields. - if name != "" { - fieldName = name + "." + fieldName - } - - if err := d.decode(fieldName, rawMapVal.Interface(), fieldValue); err != nil { - errors = appendErrors(errors, err) - } - } - - // If we have a "remain"-tagged field and we have unused keys then - // we put the unused keys directly into the remain field. - if remainField != nil && len(dataValKeysUnused) > 0 { - // Build a map of only the unused values - remain := map[interface{}]interface{}{} - for key := range dataValKeysUnused { - remain[key] = dataVal.MapIndex(reflect.ValueOf(key)).Interface() - } - - // Decode it as-if we were just decoding this map onto our map. - if err := d.decodeMap(name, remain, remainField.val); err != nil { - errors = appendErrors(errors, err) - } - - // Set the map to nil so we have none so that the next check will - // not error (ErrorUnused) - dataValKeysUnused = nil - } - - if d.config.ErrorUnused && len(dataValKeysUnused) > 0 { - keys := make([]string, 0, len(dataValKeysUnused)) - for rawKey := range dataValKeysUnused { - keys = append(keys, rawKey.(string)) - } - sort.Strings(keys) - - err := fmt.Errorf("'%s' has invalid keys: %s", name, strings.Join(keys, ", ")) - errors = appendErrors(errors, err) - } - - if d.config.ErrorUnset && len(targetValKeysUnused) > 0 { - keys := make([]string, 0, len(targetValKeysUnused)) - for rawKey := range targetValKeysUnused { - keys = append(keys, rawKey.(string)) - } - sort.Strings(keys) - - err := fmt.Errorf("'%s' has unset fields: %s", name, strings.Join(keys, ", ")) - errors = appendErrors(errors, err) - } - - if len(errors) > 0 { - return &Error{errors} - } - - // Add the unused keys to the list of unused keys if we're tracking metadata - if d.config.Metadata != nil { - for rawKey := range dataValKeysUnused { - key := rawKey.(string) - if name != "" { - key = name + "." + key - } - - d.config.Metadata.Unused = append(d.config.Metadata.Unused, key) - } - for rawKey := range targetValKeysUnused { - key := rawKey.(string) - if name != "" { - key = name + "." + key - } - - d.config.Metadata.Unset = append(d.config.Metadata.Unset, key) - } - } - - return nil -} - -func isEmptyValue(v reflect.Value) bool { - switch getKind(v) { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - return v.IsNil() - } - return false -} - -func getKind(val reflect.Value) reflect.Kind { - kind := val.Kind() - - switch { - case kind >= reflect.Int && kind <= reflect.Int64: - return reflect.Int - case kind >= reflect.Uint && kind <= reflect.Uint64: - return reflect.Uint - case kind >= reflect.Float32 && kind <= reflect.Float64: - return reflect.Float32 - default: - return kind - } -} - -func isStructTypeConvertibleToMap(typ reflect.Type, checkMapstructureTags bool, tagName string) bool { - for i := 0; i < typ.NumField(); i++ { - f := typ.Field(i) - if f.PkgPath == "" && !checkMapstructureTags { // check for unexported fields - return true - } - if checkMapstructureTags && f.Tag.Get(tagName) != "" { // check for mapstructure tags inside - return true - } - } - return false -} - -func dereferencePtrToStructIfNeeded(v reflect.Value, tagName string) reflect.Value { - if v.Kind() != reflect.Ptr || v.Elem().Kind() != reflect.Struct { - return v - } - deref := v.Elem() - derefT := deref.Type() - if isStructTypeConvertibleToMap(derefT, true, tagName) { - return deref - } - return v -} diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/ebpf/ebpf_linux.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/ebpf/ebpf_linux.go index 104c74a890f8..35b00aaf0552 100644 --- a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/ebpf/ebpf_linux.go +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/ebpf/ebpf_linux.go @@ -93,7 +93,7 @@ var ( ) // Loosely based on the BPF_F_REPLACE support check in -// . +// https://github.com/cilium/ebpf/blob/v0.6.0/link/syscalls.go. // // TODO: move this logic to cilium/ebpf func haveBpfProgReplace() bool { diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/fs.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/fs.go index fb4fcc7f75bb..9e2f0ec04c8a 100644 --- a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/fs.go +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/fs.go @@ -28,6 +28,7 @@ var subsystems = []subsystem{ &FreezerGroup{}, &RdmaGroup{}, &NameGroup{GroupName: "name=systemd", Join: true}, + &NameGroup{GroupName: "misc", Join: true}, } var errSubsystemDoesNotExist = errors.New("cgroup: subsystem does not exist") diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/common.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/common.go index 45744c15c0a1..5d561facebcb 100644 --- a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/common.go +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/common.go @@ -177,7 +177,7 @@ func allowAllDevices() []systemdDbus.Property { // generateDeviceProperties takes the configured device rules and generates a // corresponding set of systemd properties to configure the devices correctly. -func generateDeviceProperties(r *configs.Resources) ([]systemdDbus.Property, error) { +func generateDeviceProperties(r *configs.Resources, sdVer int) ([]systemdDbus.Property, error) { if r.SkipDevices { return nil, nil } @@ -238,9 +238,10 @@ func generateDeviceProperties(r *configs.Resources) ([]systemdDbus.Property, err // trickery to convert things: // // * Concrete rules with non-wildcard major/minor numbers have to use - // /dev/{block,char} paths. This is slightly odd because it means - // that we cannot add whitelist rules for devices that don't exist, - // but there's not too much we can do about that. + // /dev/{block,char}/MAJOR:minor paths. Before v240, systemd uses + // stat(2) on such paths to look up device properties, meaning we + // cannot add whitelist rules for devices that don't exist. Since v240, + // device properties are parsed from the path string. // // However, path globbing is not support for path-based rules so we // need to handle wildcards in some other manner. @@ -288,13 +289,16 @@ func generateDeviceProperties(r *configs.Resources) ([]systemdDbus.Property, err case devices.CharDevice: entry.Path = fmt.Sprintf("/dev/char/%d:%d", rule.Major, rule.Minor) } - // systemd will issue a warning if the path we give here doesn't exist. - // Since all of this logic is best-effort anyway (we manually set these - // rules separately to systemd) we can safely skip entries that don't - // have a corresponding path. - if _, err := os.Stat(entry.Path); err != nil { - logrus.Debugf("skipping device %s for systemd: %s", entry.Path, err) - continue + if sdVer < 240 { + // Old systemd versions use stat(2) on path to find out device major:minor + // numbers and type. If the path doesn't exist, it will not add the rule, + // emitting a warning instead. + // Since all of this logic is best-effort anyway (we manually set these + // rules separately to systemd) we can safely skip entries that don't + // have a corresponding path. + if _, err := os.Stat(entry.Path); err != nil { + continue + } } } deviceAllowList = append(deviceAllowList, entry) @@ -343,32 +347,52 @@ func isUnitExists(err error) bool { return isDbusError(err, "org.freedesktop.systemd1.UnitExists") } -func startUnit(cm *dbusConnManager, unitName string, properties []systemdDbus.Property) error { +func startUnit(cm *dbusConnManager, unitName string, properties []systemdDbus.Property, ignoreExist bool) error { statusChan := make(chan string, 1) + retry := true + +retry: err := cm.retryOnDisconnect(func(c *systemdDbus.Conn) error { _, err := c.StartTransientUnitContext(context.TODO(), unitName, "replace", properties, statusChan) return err }) - if err == nil { - timeout := time.NewTimer(30 * time.Second) - defer timeout.Stop() - - select { - case s := <-statusChan: - close(statusChan) - // Please refer to https://pkg.go.dev/github.com/coreos/go-systemd/v22/dbus#Conn.StartUnit - if s != "done" { - resetFailedUnit(cm, unitName) - return fmt.Errorf("error creating systemd unit `%s`: got `%s`", unitName, s) - } - case <-timeout.C: + if err != nil { + if !isUnitExists(err) { + return err + } + if ignoreExist { + // TODO: remove this hack. + // This is kubelet making sure a slice exists (see + // https://github.com/opencontainers/runc/pull/1124). + return nil + } + if retry { + // In case a unit with the same name exists, this may + // be a leftover failed unit. Reset it, so systemd can + // remove it, and retry once. resetFailedUnit(cm, unitName) - return errors.New("Timeout waiting for systemd to create " + unitName) + retry = false + goto retry } - } else if !isUnitExists(err) { return err } + timeout := time.NewTimer(30 * time.Second) + defer timeout.Stop() + + select { + case s := <-statusChan: + close(statusChan) + // Please refer to https://pkg.go.dev/github.com/coreos/go-systemd/v22/dbus#Conn.StartUnit + if s != "done" { + resetFailedUnit(cm, unitName) + return fmt.Errorf("error creating systemd unit `%s`: got `%s`", unitName, s) + } + case <-timeout.C: + resetFailedUnit(cm, unitName) + return errors.New("Timeout waiting for systemd to create " + unitName) + } + return nil } diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/cpuset.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/cpuset.go index 83d10dd705fd..dd474cf1b168 100644 --- a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/cpuset.go +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/cpuset.go @@ -51,5 +51,10 @@ func RangeToBits(str string) ([]byte, error) { // do not allow empty values return nil, errors.New("empty value") } + + // fit cpuset parsing order in systemd + for l, r := 0, len(ret)-1; l < r; l, r = l+1, r-1 { + ret[l], ret[r] = ret[r], ret[l] + } return ret, nil } diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/v1.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/v1.go index a74a05a5cd05..fe036b3bda5d 100644 --- a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/v1.go +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/v1.go @@ -71,12 +71,13 @@ var legacySubsystems = []subsystem{ &fs.NetClsGroup{}, &fs.NameGroup{GroupName: "name=systemd"}, &fs.RdmaGroup{}, + &fs.NameGroup{GroupName: "misc"}, } func genV1ResourcesProperties(r *configs.Resources, cm *dbusConnManager) ([]systemdDbus.Property, error) { var properties []systemdDbus.Property - deviceProperties, err := generateDeviceProperties(r) + deviceProperties, err := generateDeviceProperties(r, systemdVersion(cm)) if err != nil { return nil, err } @@ -206,7 +207,7 @@ func (m *legacyManager) Apply(pid int) error { properties = append(properties, c.SystemdProps...) - if err := startUnit(m.dbus, unitName, properties); err != nil { + if err := startUnit(m.dbus, unitName, properties, pid == -1); err != nil { return err } @@ -273,14 +274,7 @@ func getSubsystemPath(slice, unit, subsystem string) (string, error) { return "", err } - initPath, err := cgroups.GetInitCgroup(subsystem) - if err != nil { - return "", err - } - // if pid 1 is systemd 226 or later, it will be in init.scope, not the root - initPath = strings.TrimSuffix(filepath.Clean(initPath), "init.scope") - - return filepath.Join(mountpoint, initPath, slice, unit), nil + return filepath.Join(mountpoint, slice, unit), nil } func (m *legacyManager) Freeze(state configs.FreezerState) error { diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/v2.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/v2.go index de0cb974d460..919e5632f343 100644 --- a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/v2.go +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/v2.go @@ -182,7 +182,7 @@ func genV2ResourcesProperties(r *configs.Resources, cm *dbusConnManager) ([]syst // aren't the end of the world, but it is a bit concerning. However // it's unclear if systemd removes all eBPF programs attached when // doing SetUnitProperties... - deviceProperties, err := generateDeviceProperties(r) + deviceProperties, err := generateDeviceProperties(r, systemdVersion(cm)) if err != nil { return nil, err } @@ -284,7 +284,7 @@ func (m *unifiedManager) Apply(pid int) error { properties = append(properties, c.SystemdProps...) - if err := startUnit(m.dbus, unitName, properties); err != nil { + if err := startUnit(m.dbus, unitName, properties, pid == -1); err != nil { return fmt.Errorf("unable to start unit %q (properties %+v): %w", unitName, properties, err) } diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go index b32af4ee5302..fc4ae44a4853 100644 --- a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go @@ -162,8 +162,10 @@ func readProcsFile(dir string) ([]int, error) { // ParseCgroupFile parses the given cgroup file, typically /proc/self/cgroup // or /proc//cgroup, into a map of subsystems to cgroup paths, e.g. -// "cpu": "/user.slice/user-1000.slice" -// "pids": "/user.slice/user-1000.slice" +// +// "cpu": "/user.slice/user-1000.slice" +// "pids": "/user.slice/user-1000.slice" +// // etc. // // Note that for cgroup v2 unified hierarchy, there are no per-controller diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/configs/validate/validator.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/configs/validate/validator.go index 627621a58d86..4fbd308dadd6 100644 --- a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/configs/validate/validator.go +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/configs/validate/validator.go @@ -131,9 +131,8 @@ func (v *ConfigValidator) cgroupnamespace(config *configs.Config) error { // convertSysctlVariableToDotsSeparator can return sysctl variables in dots separator format. // The '/' separator is also accepted in place of a '.'. // Convert the sysctl variables to dots separator format for validation. -// More info: -// https://man7.org/linux/man-pages/man8/sysctl.8.html -// https://man7.org/linux/man-pages/man5/sysctl.d.5.html +// More info: sysctl(8), sysctl.d(5). +// // For example: // Input sysctl variable "net/ipv4/conf/eno2.100.rp_filter" // will return the converted value "net.ipv4.conf.eno2/100.rp_filter" diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/container_linux.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/container_linux.go index 9df830d8cdbb..dd61dfd3c90c 100644 --- a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/container_linux.go +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/container_linux.go @@ -926,7 +926,7 @@ func (c *linuxContainer) criuSupportsExtNS(t configs.NamespaceType) bool { } func criuNsToKey(t configs.NamespaceType) string { - return "extRoot" + strings.Title(configs.NsName(t)) + "NS" + return "extRoot" + strings.Title(configs.NsName(t)) + "NS" //nolint:staticcheck // SA1019: strings.Title is deprecated } func (c *linuxContainer) handleCheckpointingExternalNamespaces(rpcOpts *criurpc.CriuOpts, t configs.NamespaceType) error { diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/eaccess_go119.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/eaccess_go119.go new file mode 100644 index 000000000000..cc1e2079a795 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/eaccess_go119.go @@ -0,0 +1,17 @@ +//go:build !go1.20 +// +build !go1.20 + +package libcontainer + +import "golang.org/x/sys/unix" + +func eaccess(path string) error { + // This check is similar to access(2) with X_OK except for + // setuid/setgid binaries where it checks against the effective + // (rather than real) uid and gid. It is not needed in go 1.20 + // and beyond and will be removed later. + + // Relies on code added in https://go-review.googlesource.com/c/sys/+/468877 + // and older CLs linked from there. + return unix.Faccessat(unix.AT_FDCWD, path, unix.X_OK, unix.AT_EACCESS) +} diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/eaccess_stub.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/eaccess_stub.go new file mode 100644 index 000000000000..7c049fd7aa02 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/eaccess_stub.go @@ -0,0 +1,10 @@ +//go:build go1.20 + +package libcontainer + +func eaccess(path string) error { + // Not needed in Go 1.20+ as the functionality is already in there + // (added by https://go.dev/cl/416115, https://go.dev/cl/414824, + // and fixed in Go 1.20.2 by https://go.dev/cl/469956). + return nil +} diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/factory_linux.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/factory_linux.go index e6c71ac34e37..a1fa7de2d249 100644 --- a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/factory_linux.go +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/factory_linux.go @@ -179,6 +179,12 @@ func (l *LinuxFactory) Create(id string, config *configs.Config) (Container, err return nil, fmt.Errorf("unable to get cgroup PIDs: %w", err) } if len(pids) != 0 { + if config.Cgroups.Systemd { + // systemd cgroup driver can't add a pid to an + // existing systemd unit and will return an + // error anyway, so let's error out early. + return nil, fmt.Errorf("container's cgroup is not empty: %d process(es) found", len(pids)) + } // TODO: return an error. logrus.Warnf("container's cgroup is not empty: %d process(es) found", len(pids)) logrus.Warn("DEPRECATED: running container in a non-empty cgroup won't be supported in runc 1.2; https://github.com/opencontainers/runc/issues/3132") @@ -338,10 +344,9 @@ func (l *LinuxFactory) StartInitialization() (err error) { defer func() { if e := recover(); e != nil { - if e, ok := e.(error); ok { - err = fmt.Errorf("panic from initialization: %w, %s", e, debug.Stack()) + if ee, ok := e.(error); ok { + err = fmt.Errorf("panic from initialization: %w, %s", ee, debug.Stack()) } else { - //nolint:errorlint // here e is not of error type err = fmt.Errorf("panic from initialization: %v, %s", e, debug.Stack()) } } diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/init_linux.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/init_linux.go index 1e5c394c3e06..2e4c59353c83 100644 --- a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/init_linux.go +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/init_linux.go @@ -411,8 +411,9 @@ func fixStdioPermissions(u *user.ExecUser) error { return &os.PathError{Op: "fstat", Path: file.Name(), Err: err} } - // Skip chown if uid is already the one we want. - if int(s.Uid) == u.Uid { + // Skip chown if uid is already the one we want or any of the STDIO descriptors + // were redirected to /dev/null. + if int(s.Uid) == u.Uid || s.Rdev == null.Rdev { continue } diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/rootfs_linux.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/rootfs_linux.go index ec7638e4d512..c3f88fc7038b 100644 --- a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/rootfs_linux.go +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/rootfs_linux.go @@ -329,26 +329,41 @@ func mountCgroupV2(m *configs.Mount, c *mountConfig) error { if err := os.MkdirAll(dest, 0o755); err != nil { return err } - return utils.WithProcfd(c.root, m.Destination, func(procfd string) error { - if err := mount(m.Source, m.Destination, procfd, "cgroup2", uintptr(m.Flags), m.Data); err != nil { - // when we are in UserNS but CgroupNS is not unshared, we cannot mount cgroup2 (#2158) - if errors.Is(err, unix.EPERM) || errors.Is(err, unix.EBUSY) { - src := fs2.UnifiedMountpoint - if c.cgroupns && c.cgroup2Path != "" { - // Emulate cgroupns by bind-mounting - // the container cgroup path rather than - // the whole /sys/fs/cgroup. - src = c.cgroup2Path - } - err = mount(src, m.Destination, procfd, "", uintptr(m.Flags)|unix.MS_BIND, "") - if c.rootlessCgroups && errors.Is(err, unix.ENOENT) { - err = nil - } - } - return err - } - return nil + err = utils.WithProcfd(c.root, m.Destination, func(procfd string) error { + return mount(m.Source, m.Destination, procfd, "cgroup2", uintptr(m.Flags), m.Data) }) + if err == nil || !(errors.Is(err, unix.EPERM) || errors.Is(err, unix.EBUSY)) { + return err + } + + // When we are in UserNS but CgroupNS is not unshared, we cannot mount + // cgroup2 (#2158), so fall back to bind mount. + bindM := &configs.Mount{ + Device: "bind", + Source: fs2.UnifiedMountpoint, + Destination: m.Destination, + Flags: unix.MS_BIND | m.Flags, + PropagationFlags: m.PropagationFlags, + } + if c.cgroupns && c.cgroup2Path != "" { + // Emulate cgroupns by bind-mounting the container cgroup path + // rather than the whole /sys/fs/cgroup. + bindM.Source = c.cgroup2Path + } + // mountToRootfs() handles remounting for MS_RDONLY. + // No need to set c.fd here, because mountToRootfs() calls utils.WithProcfd() by itself in mountPropagate(). + err = mountToRootfs(bindM, c) + if c.rootlessCgroups && errors.Is(err, unix.ENOENT) { + // ENOENT (for `src = c.cgroup2Path`) happens when rootless runc is being executed + // outside the userns+mountns. + // + // Mask `/sys/fs/cgroup` to ensure it is read-only, even when `/sys` is mounted + // with `rbind,ro` (`runc spec --rootless` produces `rbind,ro` for `/sys`). + err = utils.WithProcfd(c.root, m.Destination, func(procfd string) error { + return maskPath(procfd, c.label) + }) + } + return err } func doTmpfsCopyUp(m *configs.Mount, rootfs, mountLabel string) (Err error) { @@ -398,32 +413,43 @@ func doTmpfsCopyUp(m *configs.Mount, rootfs, mountLabel string) (Err error) { func mountToRootfs(m *configs.Mount, c *mountConfig) error { rootfs := c.root - mountLabel := c.label - mountFd := c.fd - dest, err := securejoin.SecureJoin(rootfs, m.Destination) - if err != nil { - return err - } + // procfs and sysfs are special because we need to ensure they are actually + // mounted on a specific path in a container without any funny business. switch m.Device { case "proc", "sysfs": // If the destination already exists and is not a directory, we bail - // out This is to avoid mounting through a symlink or similar -- which + // out. This is to avoid mounting through a symlink or similar -- which // has been a "fun" attack scenario in the past. // TODO: This won't be necessary once we switch to libpathrs and we can // stop all of these symlink-exchange attacks. + dest := filepath.Clean(m.Destination) + if !strings.HasPrefix(dest, rootfs) { + // Do not use securejoin as it resolves symlinks. + dest = filepath.Join(rootfs, dest) + } if fi, err := os.Lstat(dest); err != nil { if !os.IsNotExist(err) { return err } - } else if fi.Mode()&os.ModeDir == 0 { + } else if !fi.IsDir() { return fmt.Errorf("filesystem %q must be mounted on ordinary directory", m.Device) } if err := os.MkdirAll(dest, 0o755); err != nil { return err } - // Selinux kernels do not support labeling of /proc or /sys + // Selinux kernels do not support labeling of /proc or /sys. return mountPropagate(m, rootfs, "", nil) + } + + mountLabel := c.label + mountFd := c.fd + dest, err := securejoin.SecureJoin(rootfs, m.Destination) + if err != nil { + return err + } + + switch m.Device { case "mqueue": if err := os.MkdirAll(dest, 0o755); err != nil { return err diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/standard_init_linux.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/standard_init_linux.go index 081d1503a3f3..c09a7bed30ea 100644 --- a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/standard_init_linux.go +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/standard_init_linux.go @@ -198,11 +198,12 @@ func (l *linuxStandardInit) Init() error { if err != nil { return err } - // exec.LookPath might return no error for an executable residing on a - // file system mounted with noexec flag, so perform this extra check - // now while we can still return a proper error. - if err := system.Eaccess(name); err != nil { - return &os.PathError{Op: "exec", Path: name, Err: err} + // exec.LookPath in Go < 1.20 might return no error for an executable + // residing on a file system mounted with noexec flag, so perform this + // extra check now while we can still return a proper error. + // TODO: remove this once go < 1.20 is not supported. + if err := eaccess(name); err != nil { + return &os.PathError{Op: "eaccess", Path: name, Err: err} } // Set seccomp as close to execve as possible, so as few syscalls take diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/sync.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/sync.go index c9a23ef3a760..25dc28630710 100644 --- a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/sync.go +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/sync.go @@ -15,16 +15,16 @@ type syncType string // during container setup. They come in pairs (with procError being a generic // response which is followed by an &initError). // -// [ child ] <-> [ parent ] +// [ child ] <-> [ parent ] // -// procHooks --> [run hooks] -// <-- procResume +// procHooks --> [run hooks] +// <-- procResume // -// procReady --> [final setup] -// <-- procRun +// procReady --> [final setup] +// <-- procRun // -// procSeccomp --> [pick up seccomp fd with pidfd_getfd()] -// <-- procSeccompDone +// procSeccomp --> [pick up seccomp fd with pidfd_getfd()] +// <-- procSeccompDone const ( procError syncType = "procError" procReady syncType = "procReady" diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/system/linux.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/system/linux.go index 039059a444cc..e1d6eb18034c 100644 --- a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/system/linux.go +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/system/linux.go @@ -31,25 +31,6 @@ func (p ParentDeathSignal) Set() error { return SetParentDeathSignal(uintptr(p)) } -// Eaccess is similar to unix.Access except for setuid/setgid binaries -// it checks against the effective (rather than real) uid and gid. -func Eaccess(path string) error { - err := unix.Faccessat2(unix.AT_FDCWD, path, unix.X_OK, unix.AT_EACCESS) - if err != unix.ENOSYS && err != unix.EPERM { //nolint:errorlint // unix errors are bare - return err - } - - // Faccessat2() not available; check if we are a set[ug]id binary. - if os.Getuid() == os.Geteuid() && os.Getgid() == os.Getegid() { - // For a non-set[ug]id binary, use access(2). - return unix.Access(path, unix.X_OK) - } - - // For a setuid/setgid binary, there is no fallback way - // so assume we can execute the binary. - return nil -} - func Execv(cmd string, args []string, env []string) error { name, err := exec.LookPath(cmd) if err != nil { diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/user/user.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/user/user.go index 2473c5eaddce..a1e216683d90 100644 --- a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/user/user.go +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/user/user.go @@ -280,13 +280,13 @@ func GetExecUserPath(userSpec string, defaults *ExecUser, passwdPath, groupPath // found in any entry in passwd and group respectively. // // Examples of valid user specifications are: -// * "" -// * "user" -// * "uid" -// * "user:group" -// * "uid:gid -// * "user:gid" -// * "uid:group" +// - "" +// - "user" +// - "uid" +// - "user:group" +// - "uid:gid +// - "user:gid" +// - "uid:group" // // It should be noted that if you specify a numeric user or group id, they will // not be evaluated as usernames (only the metadata will be filled). So attempting diff --git a/cluster-autoscaler/vendor/github.com/seccomp/libseccomp-golang/CHANGELOG b/cluster-autoscaler/vendor/github.com/seccomp/libseccomp-golang/CHANGELOG index a01d9a722d91..905a9b5cdc88 100644 --- a/cluster-autoscaler/vendor/github.com/seccomp/libseccomp-golang/CHANGELOG +++ b/cluster-autoscaler/vendor/github.com/seccomp/libseccomp-golang/CHANGELOG @@ -2,6 +2,31 @@ libseccomp-golang: Releases =============================================================================== https://github.com/seccomp/libseccomp-golang +* Version 0.10.0 - June 9, 2022 +- Minimum supported version of libseccomp bumped to v2.3.1 +- Add seccomp userspace notification API (ActNotify, filter.*Notif*) +- Add filter.{Get,Set}SSB (to support SCMP_FLTATR_CTL_SSB) +- Add filter.{Get,Set}Optimize (to support SCMP_FLTATR_CTL_OPTIMIZE) +- Add filter.{Get,Set}RawRC (to support SCMP_FLTATR_API_SYSRAWRC) +- Add ArchPARISC, ArchPARISC64, ArchRISCV64 +- Add ActKillProcess and ActKillThread; deprecate ActKill +- Add go module support +- Return ErrSyscallDoesNotExist when unable to resolve a syscall +- Fix some functions to check for both kernel level API and libseccomp version +- Fix MakeCondition to use sanitizeCompareOp +- Fix AddRule to handle EACCES (from libseccomp >= 2.5.0) +- Updated the main docs and converted to README.md +- Added CONTRIBUTING.md, SECURITY.md, and administrative docs under doc/admin +- Add GitHub action CI, enable more linters +- test: test against various libseccomp versions +- test: fix and simplify execInSubprocess +- test: fix APILevelIsSupported +- Refactor the Errno(-1 * retCode) pattern +- Refactor/unify libseccomp version / API level checks +- Code cleanups (linter, formatting, spelling fixes) +- Cleanup: use errors.New instead of fmt.Errorf where appropriate +- Cleanup: remove duplicated cgo stuff, redundant linux build tag + * Version 0.9.1 - May 21, 2019 - Minimum supported version of libseccomp bumped to v2.2.0 - Use Libseccomp's `seccomp_version` API to retrieve library version diff --git a/cluster-autoscaler/vendor/github.com/seccomp/libseccomp-golang/README.md b/cluster-autoscaler/vendor/github.com/seccomp/libseccomp-golang/README.md index 6430f1c9e257..312135ee59e6 100644 --- a/cluster-autoscaler/vendor/github.com/seccomp/libseccomp-golang/README.md +++ b/cluster-autoscaler/vendor/github.com/seccomp/libseccomp-golang/README.md @@ -22,19 +22,37 @@ The library source repository currently lives on GitHub at the following URLs: * https://github.com/seccomp/libseccomp-golang * https://github.com/seccomp/libseccomp -The project mailing list is currently hosted on Google Groups at the URL below, -please note that a Google account is not required to subscribe to the mailing -list. - -* https://groups.google.com/d/forum/libseccomp - Documentation for this package is also available at: * https://pkg.go.dev/github.com/seccomp/libseccomp-golang +## Verifying Releases + +Starting with libseccomp-golang v0.10.0, the git tag corresponding to each +release should be signed by one of the libseccomp-golang maintainers. It is +recommended that before use you verify the release tags using the following +command: + + % git tag -v + +At present, only the following keys, specified via the fingerprints below, are +authorized to sign official libseccomp-golang release tags: + + Paul Moore + 7100 AADF AE6E 6E94 0D2E 0AD6 55E4 5A5A E8CA 7C8A + + Tom Hromatka + 47A6 8FCE 37C7 D702 4FD6 5E11 356C E62C 2B52 4099 + + Kir Kolyshkin + C242 8CD7 5720 FACD CF76 B6EA 17DE 5ECB 75A1 100E + +More information on GnuPG and git tag verification can be found at their +respective websites: https://git-scm.com/docs/git and https://gnupg.org. + ## Installing the package - # go get github.com/seccomp/libseccomp-golang + % go get github.com/seccomp/libseccomp-golang ## Contributing diff --git a/cluster-autoscaler/vendor/github.com/seccomp/libseccomp-golang/SECURITY.md b/cluster-autoscaler/vendor/github.com/seccomp/libseccomp-golang/SECURITY.md index c448faa8e809..f645d4efec99 100644 --- a/cluster-autoscaler/vendor/github.com/seccomp/libseccomp-golang/SECURITY.md +++ b/cluster-autoscaler/vendor/github.com/seccomp/libseccomp-golang/SECURITY.md @@ -22,6 +22,7 @@ window. * Paul Moore, paul@paul-moore.com * Tom Hromatka, tom.hromatka@oracle.com +* Kir Kolyshkin, kolyshkin@gmail.com ### Resolving Sensitive Security Issues diff --git a/cluster-autoscaler/vendor/github.com/seccomp/libseccomp-golang/seccomp.go b/cluster-autoscaler/vendor/github.com/seccomp/libseccomp-golang/seccomp.go index 8dad12fdbb9b..c23406754c6b 100644 --- a/cluster-autoscaler/vendor/github.com/seccomp/libseccomp-golang/seccomp.go +++ b/cluster-autoscaler/vendor/github.com/seccomp/libseccomp-golang/seccomp.go @@ -7,6 +7,7 @@ package seccomp import ( + "errors" "fmt" "os" "runtime" @@ -245,8 +246,8 @@ const ( ) // ErrSyscallDoesNotExist represents an error condition where -// libseccomp is unable to resolve the syscall -var ErrSyscallDoesNotExist = fmt.Errorf("could not resolve syscall name") +// libseccomp is unable to resolve the syscall. +var ErrSyscallDoesNotExist = errors.New("could not resolve syscall name") const ( // Userspace notification response flags @@ -556,7 +557,7 @@ func MakeCondition(arg uint, comparison ScmpCompareOp, values ...uint64) (ScmpCo } else if len(values) > 2 { return condStruct, fmt.Errorf("conditions can have at most 2 arguments (%d given)", len(values)) } else if len(values) == 0 { - return condStruct, fmt.Errorf("must provide at least one value to compare against") + return condStruct, errors.New("must provide at least one value to compare against") } condStruct.Argument = arg @@ -611,7 +612,7 @@ func NewFilter(defaultAction ScmpAction) (*ScmpFilter, error) { fPtr := C.seccomp_init(defaultAction.toNative()) if fPtr == nil { - return nil, fmt.Errorf("could not create filter") + return nil, errors.New("could not create filter") } filter := new(ScmpFilter) @@ -623,7 +624,7 @@ func NewFilter(defaultAction ScmpAction) (*ScmpFilter, error) { // If the kernel does not support TSYNC, allow us to continue without error. if err := filter.setFilterAttr(filterAttrTsync, 0x1); err != nil && err != syscall.ENOTSUP { filter.Release() - return nil, fmt.Errorf("could not create filter - error setting tsync bit: %v", err) + return nil, fmt.Errorf("could not create filter: error setting tsync bit: %w", err) } return filter, nil @@ -695,14 +696,14 @@ func (f *ScmpFilter) Merge(src *ScmpFilter) error { defer src.lock.Unlock() if !src.valid || !f.valid { - return fmt.Errorf("one or more of the filter contexts is invalid or uninitialized") + return errors.New("one or more of the filter contexts is invalid or uninitialized") } // Merge the filters if retCode := C.seccomp_merge(f.filterCtx, src.filterCtx); retCode != 0 { e := errRc(retCode) if e == syscall.EINVAL { - return fmt.Errorf("filters could not be merged due to a mismatch in attributes or invalid filter") + return fmt.Errorf("filters could not be merged due to a mismatch in attributes or invalid filter: %w", e) } return e } diff --git a/cluster-autoscaler/vendor/github.com/seccomp/libseccomp-golang/seccomp_internal.go b/cluster-autoscaler/vendor/github.com/seccomp/libseccomp-golang/seccomp_internal.go index df4dfb7eba87..0a7fd34f510d 100644 --- a/cluster-autoscaler/vendor/github.com/seccomp/libseccomp-golang/seccomp_internal.go +++ b/cluster-autoscaler/vendor/github.com/seccomp/libseccomp-golang/seccomp_internal.go @@ -340,7 +340,7 @@ func ensureSupportedVersion() error { func getAPI() (uint, error) { api := C.seccomp_api_get() if api == 0 { - return 0, fmt.Errorf("API level operations are not supported") + return 0, errors.New("API level operations are not supported") } return uint(api), nil @@ -349,11 +349,12 @@ func getAPI() (uint, error) { // Set the API level func setAPI(api uint) error { if retCode := C.seccomp_api_set(C.uint(api)); retCode != 0 { - if errRc(retCode) == syscall.EOPNOTSUPP { - return fmt.Errorf("API level operations are not supported") + e := errRc(retCode) + if e == syscall.EOPNOTSUPP { + return errors.New("API level operations are not supported") } - return fmt.Errorf("could not set API level: %v", retCode) + return fmt.Errorf("could not set API level: %w", e) } return nil @@ -411,7 +412,7 @@ func (f *ScmpFilter) setFilterAttr(attr scmpFilterAttr, value C.uint32_t) error // Wrapper for seccomp_rule_add_... functions func (f *ScmpFilter) addRuleWrapper(call ScmpSyscall, action ScmpAction, exact bool, length C.uint, cond C.scmp_cast_t) error { if length != 0 && cond == nil { - return fmt.Errorf("null conditions list, but length is nonzero") + return errors.New("null conditions list, but length is nonzero") } var retCode C.int @@ -430,7 +431,7 @@ func (f *ScmpFilter) addRuleWrapper(call ScmpSyscall, action ScmpAction, exact b case syscall.EPERM, syscall.EACCES: return errDefAction case syscall.EINVAL: - return fmt.Errorf("two checks on same syscall argument") + return errors.New("two checks on same syscall argument") default: return e } @@ -455,7 +456,7 @@ func (f *ScmpFilter) addRuleGeneric(call ScmpSyscall, action ScmpAction, exact b } else { argsArr := C.make_arg_cmp_array(C.uint(len(conds))) if argsArr == nil { - return fmt.Errorf("error allocating memory for conditions") + return errors.New("error allocating memory for conditions") } defer C.free(argsArr) @@ -495,7 +496,7 @@ func sanitizeAction(in ScmpAction) error { } if inTmp != ActTrace && inTmp != ActErrno && (in&0xFFFF0000) != 0 { - return fmt.Errorf("highest 16 bits must be zeroed except for Trace and Errno") + return errors.New("highest 16 bits must be zeroed except for Trace and Errno") } return nil diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netns/.golangci.yml b/cluster-autoscaler/vendor/github.com/vishvananda/netns/.golangci.yml new file mode 100644 index 000000000000..600bef78e2b7 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netns/.golangci.yml @@ -0,0 +1,2 @@ +run: + timeout: 5m diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netns/README.md b/cluster-autoscaler/vendor/github.com/vishvananda/netns/README.md index 91057c10a5ee..bdfedbe81fa5 100644 --- a/cluster-autoscaler/vendor/github.com/vishvananda/netns/README.md +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netns/README.md @@ -49,14 +49,3 @@ func main() { } ``` - -## NOTE - -The library can be safely used only with Go >= 1.10 due to [golang/go#20676](https://github.com/golang/go/issues/20676). - -After locking a goroutine to its current OS thread with `runtime.LockOSThread()` -and changing its network namespace, any new subsequent goroutine won't be -scheduled on that thread while it's locked. Therefore, the new goroutine -will run in a different namespace leading to unexpected results. - -See [here](https://www.weave.works/blog/linux-namespaces-golang-followup) for more details. diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netns/netns_linux.go b/cluster-autoscaler/vendor/github.com/vishvananda/netns/netns_linux.go index ae0558d2903d..2ed7c7e2fa40 100644 --- a/cluster-autoscaler/vendor/github.com/vishvananda/netns/netns_linux.go +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netns/netns_linux.go @@ -2,7 +2,6 @@ package netns import ( "fmt" - "io/ioutil" "os" "path" "path/filepath" @@ -136,7 +135,7 @@ func GetFromDocker(id string) (NsHandle, error) { // borrowed from docker/utils/utils.go func findCgroupMountpoint(cgroupType string) (int, string, error) { - output, err := ioutil.ReadFile("/proc/mounts") + output, err := os.ReadFile("/proc/mounts") if err != nil { return -1, "", err } @@ -166,7 +165,7 @@ func findCgroupMountpoint(cgroupType string) (int, string, error) { // borrowed from docker/utils/utils.go // modified to get the docker pid instead of using /proc/self func getDockerCgroup(cgroupVer int, cgroupType string) (string, error) { - dockerpid, err := ioutil.ReadFile("/var/run/docker.pid") + dockerpid, err := os.ReadFile("/var/run/docker.pid") if err != nil { return "", err } @@ -178,7 +177,7 @@ func getDockerCgroup(cgroupVer int, cgroupType string) (string, error) { if err != nil { return "", err } - output, err := ioutil.ReadFile(fmt.Sprintf("/proc/%d/cgroup", pid)) + output, err := os.ReadFile(fmt.Sprintf("/proc/%d/cgroup", pid)) if err != nil { return "", err } @@ -265,7 +264,7 @@ func getPidForContainer(id string) (int, error) { return pid, fmt.Errorf("Unable to find container: %v", id[:len(id)-1]) } - output, err := ioutil.ReadFile(filename) + output, err := os.ReadFile(filename) if err != nil { return pid, err } diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netns/nshandle_linux.go b/cluster-autoscaler/vendor/github.com/vishvananda/netns/nshandle_linux.go index 0e1117106859..1baffb66acbf 100644 --- a/cluster-autoscaler/vendor/github.com/vishvananda/netns/nshandle_linux.go +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netns/nshandle_linux.go @@ -30,7 +30,7 @@ func (ns NsHandle) Equal(other NsHandle) bool { // String shows the file descriptor number and its dev and inode. func (ns NsHandle) String() string { if ns == -1 { - return "NS(None)" + return "NS(none)" } var s unix.Stat_t if err := unix.Fstat(int(ns), &s); err != nil { diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netns/nshandle_others.go b/cluster-autoscaler/vendor/github.com/vishvananda/netns/nshandle_others.go index 7a87acbe201a..af727bc091c5 100644 --- a/cluster-autoscaler/vendor/github.com/vishvananda/netns/nshandle_others.go +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netns/nshandle_others.go @@ -17,7 +17,7 @@ func (ns NsHandle) Equal(_ NsHandle) bool { // It is only implemented on Linux, and returns "NS(none)" on other // platforms. func (ns NsHandle) String() string { - return "NS(None)" + return "NS(none)" } // UniqueId returns a string which uniquely identifies the namespace diff --git a/cluster-autoscaler/vendor/go.etcd.io/etcd/api/v3/version/version.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/api/v3/version/version.go index f3b389421ef5..d62f6474d95c 100644 --- a/cluster-autoscaler/vendor/go.etcd.io/etcd/api/v3/version/version.go +++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/api/v3/version/version.go @@ -26,7 +26,7 @@ import ( var ( // MinClusterVersion is the min cluster version this etcd binary is compatible with. MinClusterVersion = "3.0.0" - Version = "3.5.7" + Version = "3.5.9" APIVersion = "unknown" // Git SHA Value will be set during build diff --git a/cluster-autoscaler/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/zap.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/zap.go index d7fd0d90dbd1..34f35b9f28fb 100644 --- a/cluster-autoscaler/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/zap.go +++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/zap.go @@ -16,6 +16,7 @@ package logutil import ( "sort" + "time" "go.uber.org/zap" "go.uber.org/zap/zapcore" @@ -46,15 +47,20 @@ var DefaultZapLoggerConfig = zap.Config{ // copied from "zap.NewProductionEncoderConfig" with some updates EncoderConfig: zapcore.EncoderConfig{ - TimeKey: "ts", - LevelKey: "level", - NameKey: "logger", - CallerKey: "caller", - MessageKey: "msg", - StacktraceKey: "stacktrace", - LineEnding: zapcore.DefaultLineEnding, - EncodeLevel: zapcore.LowercaseLevelEncoder, - EncodeTime: zapcore.ISO8601TimeEncoder, + TimeKey: "ts", + LevelKey: "level", + NameKey: "logger", + CallerKey: "caller", + MessageKey: "msg", + StacktraceKey: "stacktrace", + LineEnding: zapcore.DefaultLineEnding, + EncodeLevel: zapcore.LowercaseLevelEncoder, + + // Custom EncodeTime function to ensure we match format and precision of historic capnslog timestamps + EncodeTime: func(t time.Time, enc zapcore.PrimitiveArrayEncoder) { + enc.AppendString(t.Format("2006-01-02T15:04:05.999999Z0700")) + }, + EncodeDuration: zapcore.StringDurationEncoder, EncodeCaller: zapcore.ShortCallerEncoder, }, diff --git a/cluster-autoscaler/vendor/go.etcd.io/etcd/client/pkg/v3/tlsutil/versions.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/client/pkg/v3/tlsutil/versions.go new file mode 100644 index 000000000000..ffcecd8c670f --- /dev/null +++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/client/pkg/v3/tlsutil/versions.go @@ -0,0 +1,47 @@ +// Copyright 2023 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tlsutil + +import ( + "crypto/tls" + "fmt" +) + +type TLSVersion string + +// Constants for TLS versions. +const ( + TLSVersionDefault TLSVersion = "" + TLSVersion12 TLSVersion = "TLS1.2" + TLSVersion13 TLSVersion = "TLS1.3" +) + +// GetTLSVersion returns the corresponding tls.Version or error. +func GetTLSVersion(version string) (uint16, error) { + var v uint16 + + switch version { + case string(TLSVersionDefault): + v = 0 // 0 means let Go decide. + case string(TLSVersion12): + v = tls.VersionTLS12 + case string(TLSVersion13): + v = tls.VersionTLS13 + default: + return 0, fmt.Errorf("unexpected TLS version %q (must be one of: TLS1.2, TLS1.3)", version) + } + + return v, nil +} diff --git a/cluster-autoscaler/vendor/go.etcd.io/etcd/client/pkg/v3/transport/listener.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/client/pkg/v3/transport/listener.go index c3bc56a65b59..150545d08df2 100644 --- a/cluster-autoscaler/vendor/go.etcd.io/etcd/client/pkg/v3/transport/listener.go +++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/client/pkg/v3/transport/listener.go @@ -165,6 +165,14 @@ type TLSInfo struct { // Note that cipher suites are prioritized in the given order. CipherSuites []uint16 + // MinVersion is the minimum TLS version that is acceptable. + // If not set, the minimum version is TLS 1.2. + MinVersion uint16 + + // MaxVersion is the maximum TLS version that is acceptable. + // If not set, the default used by Go is selected (see tls.Config.MaxVersion). + MaxVersion uint16 + selfCert bool // parseFunc exists to simplify testing. Typically, parseFunc @@ -339,8 +347,8 @@ func SelfCert(lg *zap.Logger, dirpath string, hosts []string, selfSignedCertVali // Previously, // 1. Server has non-empty (*tls.Config).Certificates on client hello // 2. Server calls (*tls.Config).GetCertificate iff: -// - Server's (*tls.Config).Certificates is not empty, or -// - Client supplies SNI; non-empty (*tls.ClientHelloInfo).ServerName +// - Server's (*tls.Config).Certificates is not empty, or +// - Client supplies SNI; non-empty (*tls.ClientHelloInfo).ServerName // // When (*tls.Config).Certificates is always populated on initial handshake, // client is expected to provide a valid matching SNI to pass the TLS @@ -378,8 +386,17 @@ func (info TLSInfo) baseConfig() (*tls.Config, error) { } } + var minVersion uint16 + if info.MinVersion != 0 { + minVersion = info.MinVersion + } else { + // Default minimum version is TLS 1.2, previous versions are insecure and deprecated. + minVersion = tls.VersionTLS12 + } + cfg := &tls.Config{ - MinVersion: tls.VersionTLS12, + MinVersion: minVersion, + MaxVersion: info.MaxVersion, ServerName: info.ServerName, } @@ -510,11 +527,6 @@ func (info TLSInfo) ServerConfig() (*tls.Config, error) { // "h2" NextProtos is necessary for enabling HTTP2 for go's HTTP server cfg.NextProtos = []string{"h2"} - // go1.13 enables TLS 1.3 by default - // and in TLS 1.3, cipher suites are not configurable - // setting Max TLS version to TLS 1.2 for go 1.13 - cfg.MaxVersion = tls.VersionTLS12 - return cfg, nil } @@ -569,11 +581,6 @@ func (info TLSInfo) ClientConfig() (*tls.Config, error) { } } - // go1.13 enables TLS 1.3 by default - // and in TLS 1.3, cipher suites are not configurable - // setting Max TLS version to TLS 1.2 for go 1.13 - cfg.MaxVersion = tls.VersionTLS12 - return cfg, nil } diff --git a/cluster-autoscaler/vendor/go.etcd.io/etcd/client/v3/doc.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/client/v3/doc.go index 645d744a5a7f..fd61aff117aa 100644 --- a/cluster-autoscaler/vendor/go.etcd.io/etcd/client/v3/doc.go +++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/client/v3/doc.go @@ -61,7 +61,8 @@ // // 1. context error: canceled or deadline exceeded. // 2. gRPC error: e.g. when clock drifts in server-side before client's context deadline exceeded. -// See https://github.com/etcd-io/etcd/blob/main/api/v3rpc/rpctypes/error.go +// +// See https://github.com/etcd-io/etcd/blob/main/api/v3rpc/rpctypes/error.go // // Here is the example code to handle client errors: // @@ -102,5 +103,4 @@ // The grpc load balancer is registered statically and is shared across etcd clients. // To enable detailed load balancer logging, set the ETCD_CLIENT_DEBUG environment // variable. E.g. "ETCD_CLIENT_DEBUG=1". -// package clientv3 diff --git a/cluster-autoscaler/vendor/go.etcd.io/etcd/client/v3/internal/endpoint/endpoint.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/client/v3/internal/endpoint/endpoint.go index 1d3f1a7a2c7f..f6674235cd9e 100644 --- a/cluster-autoscaler/vendor/go.etcd.io/etcd/client/v3/internal/endpoint/endpoint.go +++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/client/v3/internal/endpoint/endpoint.go @@ -45,8 +45,8 @@ func extractHostFromPath(pathStr string) string { return extractHostFromHostPort(path.Base(pathStr)) } -//mustSplit2 returns the values from strings.SplitN(s, sep, 2). -//If sep is not found, it returns ("", "", false) instead. +// mustSplit2 returns the values from strings.SplitN(s, sep, 2). +// If sep is not found, it returns ("", "", false) instead. func mustSplit2(s, sep string) (string, string) { spl := strings.SplitN(s, sep, 2) if len(spl) < 2 { @@ -81,11 +81,12 @@ func schemeToCredsRequirement(schema string) CredsRequirement { // The main differences: // - etcd supports unixs & https names as opposed to unix & http to // distinguish need to configure certificates. -// - etcd support http(s) names as opposed to tcp supported by grpc/dial method. -// - etcd supports unix(s)://local-file naming schema +// - etcd support http(s) names as opposed to tcp supported by grpc/dial method. +// - etcd supports unix(s)://local-file naming schema // (as opposed to unix:local-file canonical name used by grpc for current dir files). -// - Within the unix(s) schemas, the last segment (filename) without 'port' (content after colon) -// is considered serverName - to allow local testing of cert-protected communication. +// - Within the unix(s) schemas, the last segment (filename) without 'port' (content after colon) +// is considered serverName - to allow local testing of cert-protected communication. +// // See more: // - https://github.com/grpc/grpc-go/blob/26c143bd5f59344a4b8a1e491e0f5e18aa97abc7/internal/grpcutil/target.go#L47 // - https://golang.org/pkg/net/#Dial diff --git a/cluster-autoscaler/vendor/go.etcd.io/etcd/client/v3/txn.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/client/v3/txn.go index 22301fba6b14..3f6a953cf094 100644 --- a/cluster-autoscaler/vendor/go.etcd.io/etcd/client/v3/txn.go +++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/client/v3/txn.go @@ -25,15 +25,14 @@ import ( // Txn is the interface that wraps mini-transactions. // -// Txn(context.TODO()).If( -// Compare(Value(k1), ">", v1), -// Compare(Version(k1), "=", 2) -// ).Then( -// OpPut(k2,v2), OpPut(k3,v3) -// ).Else( -// OpPut(k4,v4), OpPut(k5,v5) -// ).Commit() -// +// Txn(context.TODO()).If( +// Compare(Value(k1), ">", v1), +// Compare(Version(k1), "=", 2) +// ).Then( +// OpPut(k2,v2), OpPut(k3,v3) +// ).Else( +// OpPut(k4,v4), OpPut(k5,v5) +// ).Commit() type Txn interface { // If takes a list of comparison. If all comparisons passed in succeed, // the operations passed into Then() will be executed. Or the operations diff --git a/cluster-autoscaler/vendor/go.etcd.io/etcd/client/v3/watch.go b/cluster-autoscaler/vendor/go.etcd.io/etcd/client/v3/watch.go index bc886936c869..41a6ec976333 100644 --- a/cluster-autoscaler/vendor/go.etcd.io/etcd/client/v3/watch.go +++ b/cluster-autoscaler/vendor/go.etcd.io/etcd/client/v3/watch.go @@ -848,7 +848,7 @@ func (w *watchGrpcStream) serveSubstream(ws *watcherStream, resumec chan struct{ } } else { // current progress of watch; <= store revision - nextRev = wr.Header.Revision + nextRev = wr.Header.Revision + 1 } if len(wr.Events) > 0 { diff --git a/cluster-autoscaler/vendor/github.com/ghodss/yaml/LICENSE b/cluster-autoscaler/vendor/golang.org/x/mod/LICENSE similarity index 55% rename from cluster-autoscaler/vendor/github.com/ghodss/yaml/LICENSE rename to cluster-autoscaler/vendor/golang.org/x/mod/LICENSE index 7805d36de730..6a66aea5eafe 100644 --- a/cluster-autoscaler/vendor/github.com/ghodss/yaml/LICENSE +++ b/cluster-autoscaler/vendor/golang.org/x/mod/LICENSE @@ -1,27 +1,4 @@ -The MIT License (MIT) - -Copyright (c) 2014 Sam Ghods - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -Copyright (c) 2012 The Go Authors. All rights reserved. +Copyright (c) 2009 The Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are diff --git a/cluster-autoscaler/vendor/golang.org/x/mod/PATENTS b/cluster-autoscaler/vendor/golang.org/x/mod/PATENTS new file mode 100644 index 000000000000..733099041f84 --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/mod/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/cluster-autoscaler/vendor/golang.org/x/mod/semver/semver.go b/cluster-autoscaler/vendor/golang.org/x/mod/semver/semver.go new file mode 100644 index 000000000000..a30a22bf20f1 --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/mod/semver/semver.go @@ -0,0 +1,401 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package semver implements comparison of semantic version strings. +// In this package, semantic version strings must begin with a leading "v", +// as in "v1.0.0". +// +// The general form of a semantic version string accepted by this package is +// +// vMAJOR[.MINOR[.PATCH[-PRERELEASE][+BUILD]]] +// +// where square brackets indicate optional parts of the syntax; +// MAJOR, MINOR, and PATCH are decimal integers without extra leading zeros; +// PRERELEASE and BUILD are each a series of non-empty dot-separated identifiers +// using only alphanumeric characters and hyphens; and +// all-numeric PRERELEASE identifiers must not have leading zeros. +// +// This package follows Semantic Versioning 2.0.0 (see semver.org) +// with two exceptions. First, it requires the "v" prefix. Second, it recognizes +// vMAJOR and vMAJOR.MINOR (with no prerelease or build suffixes) +// as shorthands for vMAJOR.0.0 and vMAJOR.MINOR.0. +package semver + +import "sort" + +// parsed returns the parsed form of a semantic version string. +type parsed struct { + major string + minor string + patch string + short string + prerelease string + build string +} + +// IsValid reports whether v is a valid semantic version string. +func IsValid(v string) bool { + _, ok := parse(v) + return ok +} + +// Canonical returns the canonical formatting of the semantic version v. +// It fills in any missing .MINOR or .PATCH and discards build metadata. +// Two semantic versions compare equal only if their canonical formattings +// are identical strings. +// The canonical invalid semantic version is the empty string. +func Canonical(v string) string { + p, ok := parse(v) + if !ok { + return "" + } + if p.build != "" { + return v[:len(v)-len(p.build)] + } + if p.short != "" { + return v + p.short + } + return v +} + +// Major returns the major version prefix of the semantic version v. +// For example, Major("v2.1.0") == "v2". +// If v is an invalid semantic version string, Major returns the empty string. +func Major(v string) string { + pv, ok := parse(v) + if !ok { + return "" + } + return v[:1+len(pv.major)] +} + +// MajorMinor returns the major.minor version prefix of the semantic version v. +// For example, MajorMinor("v2.1.0") == "v2.1". +// If v is an invalid semantic version string, MajorMinor returns the empty string. +func MajorMinor(v string) string { + pv, ok := parse(v) + if !ok { + return "" + } + i := 1 + len(pv.major) + if j := i + 1 + len(pv.minor); j <= len(v) && v[i] == '.' && v[i+1:j] == pv.minor { + return v[:j] + } + return v[:i] + "." + pv.minor +} + +// Prerelease returns the prerelease suffix of the semantic version v. +// For example, Prerelease("v2.1.0-pre+meta") == "-pre". +// If v is an invalid semantic version string, Prerelease returns the empty string. +func Prerelease(v string) string { + pv, ok := parse(v) + if !ok { + return "" + } + return pv.prerelease +} + +// Build returns the build suffix of the semantic version v. +// For example, Build("v2.1.0+meta") == "+meta". +// If v is an invalid semantic version string, Build returns the empty string. +func Build(v string) string { + pv, ok := parse(v) + if !ok { + return "" + } + return pv.build +} + +// Compare returns an integer comparing two versions according to +// semantic version precedence. +// The result will be 0 if v == w, -1 if v < w, or +1 if v > w. +// +// An invalid semantic version string is considered less than a valid one. +// All invalid semantic version strings compare equal to each other. +func Compare(v, w string) int { + pv, ok1 := parse(v) + pw, ok2 := parse(w) + if !ok1 && !ok2 { + return 0 + } + if !ok1 { + return -1 + } + if !ok2 { + return +1 + } + if c := compareInt(pv.major, pw.major); c != 0 { + return c + } + if c := compareInt(pv.minor, pw.minor); c != 0 { + return c + } + if c := compareInt(pv.patch, pw.patch); c != 0 { + return c + } + return comparePrerelease(pv.prerelease, pw.prerelease) +} + +// Max canonicalizes its arguments and then returns the version string +// that compares greater. +// +// Deprecated: use Compare instead. In most cases, returning a canonicalized +// version is not expected or desired. +func Max(v, w string) string { + v = Canonical(v) + w = Canonical(w) + if Compare(v, w) > 0 { + return v + } + return w +} + +// ByVersion implements sort.Interface for sorting semantic version strings. +type ByVersion []string + +func (vs ByVersion) Len() int { return len(vs) } +func (vs ByVersion) Swap(i, j int) { vs[i], vs[j] = vs[j], vs[i] } +func (vs ByVersion) Less(i, j int) bool { + cmp := Compare(vs[i], vs[j]) + if cmp != 0 { + return cmp < 0 + } + return vs[i] < vs[j] +} + +// Sort sorts a list of semantic version strings using ByVersion. +func Sort(list []string) { + sort.Sort(ByVersion(list)) +} + +func parse(v string) (p parsed, ok bool) { + if v == "" || v[0] != 'v' { + return + } + p.major, v, ok = parseInt(v[1:]) + if !ok { + return + } + if v == "" { + p.minor = "0" + p.patch = "0" + p.short = ".0.0" + return + } + if v[0] != '.' { + ok = false + return + } + p.minor, v, ok = parseInt(v[1:]) + if !ok { + return + } + if v == "" { + p.patch = "0" + p.short = ".0" + return + } + if v[0] != '.' { + ok = false + return + } + p.patch, v, ok = parseInt(v[1:]) + if !ok { + return + } + if len(v) > 0 && v[0] == '-' { + p.prerelease, v, ok = parsePrerelease(v) + if !ok { + return + } + } + if len(v) > 0 && v[0] == '+' { + p.build, v, ok = parseBuild(v) + if !ok { + return + } + } + if v != "" { + ok = false + return + } + ok = true + return +} + +func parseInt(v string) (t, rest string, ok bool) { + if v == "" { + return + } + if v[0] < '0' || '9' < v[0] { + return + } + i := 1 + for i < len(v) && '0' <= v[i] && v[i] <= '9' { + i++ + } + if v[0] == '0' && i != 1 { + return + } + return v[:i], v[i:], true +} + +func parsePrerelease(v string) (t, rest string, ok bool) { + // "A pre-release version MAY be denoted by appending a hyphen and + // a series of dot separated identifiers immediately following the patch version. + // Identifiers MUST comprise only ASCII alphanumerics and hyphen [0-9A-Za-z-]. + // Identifiers MUST NOT be empty. Numeric identifiers MUST NOT include leading zeroes." + if v == "" || v[0] != '-' { + return + } + i := 1 + start := 1 + for i < len(v) && v[i] != '+' { + if !isIdentChar(v[i]) && v[i] != '.' { + return + } + if v[i] == '.' { + if start == i || isBadNum(v[start:i]) { + return + } + start = i + 1 + } + i++ + } + if start == i || isBadNum(v[start:i]) { + return + } + return v[:i], v[i:], true +} + +func parseBuild(v string) (t, rest string, ok bool) { + if v == "" || v[0] != '+' { + return + } + i := 1 + start := 1 + for i < len(v) { + if !isIdentChar(v[i]) && v[i] != '.' { + return + } + if v[i] == '.' { + if start == i { + return + } + start = i + 1 + } + i++ + } + if start == i { + return + } + return v[:i], v[i:], true +} + +func isIdentChar(c byte) bool { + return 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' || c == '-' +} + +func isBadNum(v string) bool { + i := 0 + for i < len(v) && '0' <= v[i] && v[i] <= '9' { + i++ + } + return i == len(v) && i > 1 && v[0] == '0' +} + +func isNum(v string) bool { + i := 0 + for i < len(v) && '0' <= v[i] && v[i] <= '9' { + i++ + } + return i == len(v) +} + +func compareInt(x, y string) int { + if x == y { + return 0 + } + if len(x) < len(y) { + return -1 + } + if len(x) > len(y) { + return +1 + } + if x < y { + return -1 + } else { + return +1 + } +} + +func comparePrerelease(x, y string) int { + // "When major, minor, and patch are equal, a pre-release version has + // lower precedence than a normal version. + // Example: 1.0.0-alpha < 1.0.0. + // Precedence for two pre-release versions with the same major, minor, + // and patch version MUST be determined by comparing each dot separated + // identifier from left to right until a difference is found as follows: + // identifiers consisting of only digits are compared numerically and + // identifiers with letters or hyphens are compared lexically in ASCII + // sort order. Numeric identifiers always have lower precedence than + // non-numeric identifiers. A larger set of pre-release fields has a + // higher precedence than a smaller set, if all of the preceding + // identifiers are equal. + // Example: 1.0.0-alpha < 1.0.0-alpha.1 < 1.0.0-alpha.beta < + // 1.0.0-beta < 1.0.0-beta.2 < 1.0.0-beta.11 < 1.0.0-rc.1 < 1.0.0." + if x == y { + return 0 + } + if x == "" { + return +1 + } + if y == "" { + return -1 + } + for x != "" && y != "" { + x = x[1:] // skip - or . + y = y[1:] // skip - or . + var dx, dy string + dx, x = nextIdent(x) + dy, y = nextIdent(y) + if dx != dy { + ix := isNum(dx) + iy := isNum(dy) + if ix != iy { + if ix { + return -1 + } else { + return +1 + } + } + if ix { + if len(dx) < len(dy) { + return -1 + } + if len(dx) > len(dy) { + return +1 + } + } + if dx < dy { + return -1 + } else { + return +1 + } + } + } + if x == "" { + return -1 + } else { + return +1 + } +} + +func nextIdent(x string) (dx, rest string) { + i := 0 + for i < len(x) && x[i] != '.' { + i++ + } + return x[:i], x[i:] +} diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/execabs/execabs.go b/cluster-autoscaler/vendor/golang.org/x/sys/execabs/execabs.go new file mode 100644 index 000000000000..3bf40fdfecd5 --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/sys/execabs/execabs.go @@ -0,0 +1,102 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package execabs is a drop-in replacement for os/exec +// that requires PATH lookups to find absolute paths. +// That is, execabs.Command("cmd") runs the same PATH lookup +// as exec.Command("cmd"), but if the result is a path +// which is relative, the Run and Start methods will report +// an error instead of running the executable. +// +// See https://blog.golang.org/path-security for more information +// about when it may be necessary or appropriate to use this package. +package execabs + +import ( + "context" + "fmt" + "os/exec" + "path/filepath" + "reflect" + "unsafe" +) + +// ErrNotFound is the error resulting if a path search failed to find an executable file. +// It is an alias for exec.ErrNotFound. +var ErrNotFound = exec.ErrNotFound + +// Cmd represents an external command being prepared or run. +// It is an alias for exec.Cmd. +type Cmd = exec.Cmd + +// Error is returned by LookPath when it fails to classify a file as an executable. +// It is an alias for exec.Error. +type Error = exec.Error + +// An ExitError reports an unsuccessful exit by a command. +// It is an alias for exec.ExitError. +type ExitError = exec.ExitError + +func relError(file, path string) error { + return fmt.Errorf("%s resolves to executable in current directory (.%c%s)", file, filepath.Separator, path) +} + +// LookPath searches for an executable named file in the directories +// named by the PATH environment variable. If file contains a slash, +// it is tried directly and the PATH is not consulted. The result will be +// an absolute path. +// +// LookPath differs from exec.LookPath in its handling of PATH lookups, +// which are used for file names without slashes. If exec.LookPath's +// PATH lookup would have returned an executable from the current directory, +// LookPath instead returns an error. +func LookPath(file string) (string, error) { + path, err := exec.LookPath(file) + if err != nil && !isGo119ErrDot(err) { + return "", err + } + if filepath.Base(file) == file && !filepath.IsAbs(path) { + return "", relError(file, path) + } + return path, nil +} + +func fixCmd(name string, cmd *exec.Cmd) { + if filepath.Base(name) == name && !filepath.IsAbs(cmd.Path) && !isGo119ErrFieldSet(cmd) { + // exec.Command was called with a bare binary name and + // exec.LookPath returned a path which is not absolute. + // Set cmd.lookPathErr and clear cmd.Path so that it + // cannot be run. + lookPathErr := (*error)(unsafe.Pointer(reflect.ValueOf(cmd).Elem().FieldByName("lookPathErr").Addr().Pointer())) + if *lookPathErr == nil { + *lookPathErr = relError(name, cmd.Path) + } + cmd.Path = "" + } +} + +// CommandContext is like Command but includes a context. +// +// The provided context is used to kill the process (by calling os.Process.Kill) +// if the context becomes done before the command completes on its own. +func CommandContext(ctx context.Context, name string, arg ...string) *exec.Cmd { + cmd := exec.CommandContext(ctx, name, arg...) + fixCmd(name, cmd) + return cmd + +} + +// Command returns the Cmd struct to execute the named program with the given arguments. +// See exec.Command for most details. +// +// Command differs from exec.Command in its handling of PATH lookups, +// which are used when the program name contains no slashes. +// If exec.Command would have returned an exec.Cmd configured to run an +// executable from the current directory, Command instead +// returns an exec.Cmd that will return an error from Start or Run. +func Command(name string, arg ...string) *exec.Cmd { + cmd := exec.Command(name, arg...) + fixCmd(name, cmd) + return cmd +} diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/execabs/execabs_go118.go b/cluster-autoscaler/vendor/golang.org/x/sys/execabs/execabs_go118.go new file mode 100644 index 000000000000..2000064a8124 --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/sys/execabs/execabs_go118.go @@ -0,0 +1,18 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.19 +// +build !go1.19 + +package execabs + +import "os/exec" + +func isGo119ErrDot(err error) bool { + return false +} + +func isGo119ErrFieldSet(cmd *exec.Cmd) bool { + return false +} diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/execabs/execabs_go119.go b/cluster-autoscaler/vendor/golang.org/x/sys/execabs/execabs_go119.go new file mode 100644 index 000000000000..f364b3418926 --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/sys/execabs/execabs_go119.go @@ -0,0 +1,21 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.19 +// +build go1.19 + +package execabs + +import ( + "errors" + "os/exec" +) + +func isGo119ErrDot(err error) bool { + return errors.Is(err, exec.ErrDot) +} + +func isGo119ErrFieldSet(cmd *exec.Cmd) bool { + return cmd.Err != nil +} diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/mkerrors.sh b/cluster-autoscaler/vendor/golang.org/x/sys/unix/mkerrors.sh index 2045d3dadb87..be0423e6856b 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -204,6 +204,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -518,7 +519,7 @@ ccflags="$@" $2 ~ /^LOCK_(SH|EX|NB|UN)$/ || $2 ~ /^LO_(KEY|NAME)_SIZE$/ || $2 ~ /^LOOP_(CLR|CTL|GET|SET)_/ || - $2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|TCP|MCAST|EVFILT|NOTE|SHUT|PROT|MAP|MFD|T?PACKET|MSG|SCM|MCL|DT|MADV|PR|LOCAL|TCPOPT)_/ || + $2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|TCP|MCAST|EVFILT|NOTE|SHUT|PROT|MAP|MFD|T?PACKET|MSG|SCM|MCL|DT|MADV|PR|LOCAL|TCPOPT|UDP)_/ || $2 ~ /^NFC_(GENL|PROTO|COMM|RF|SE|DIRECTION|LLCP|SOCKPROTO)_/ || $2 ~ /^NFC_.*_(MAX)?SIZE$/ || $2 ~ /^RAW_PAYLOAD_/ || diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_linux.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_linux.go index 398c37e52d6b..de936b677b6a 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -2967,6 +2967,7 @@ const ( SOL_TCP = 0x6 SOL_TIPC = 0x10f SOL_TLS = 0x11a + SOL_UDP = 0x11 SOL_X25 = 0x106 SOL_XDP = 0x11b SOMAXCONN = 0x1000 @@ -3251,6 +3252,19 @@ const ( TRACEFS_MAGIC = 0x74726163 TS_COMM_LEN = 0x20 UDF_SUPER_MAGIC = 0x15013346 + UDP_CORK = 0x1 + UDP_ENCAP = 0x64 + UDP_ENCAP_ESPINUDP = 0x2 + UDP_ENCAP_ESPINUDP_NON_IKE = 0x1 + UDP_ENCAP_GTP0 = 0x4 + UDP_ENCAP_GTP1U = 0x5 + UDP_ENCAP_L2TPINUDP = 0x3 + UDP_GRO = 0x68 + UDP_NO_CHECK6_RX = 0x66 + UDP_NO_CHECK6_TX = 0x65 + UDP_SEGMENT = 0x67 + UDP_V4_FLOW = 0x2 + UDP_V6_FLOW = 0x6 UMOUNT_NOFOLLOW = 0x8 USBDEVICE_SUPER_MAGIC = 0x9fa2 UTIME_NOW = 0x3fffffff diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/windows/env_windows.go b/cluster-autoscaler/vendor/golang.org/x/sys/windows/env_windows.go index 92ac05ff4ea6..b8ad19250689 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/windows/env_windows.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/windows/env_windows.go @@ -37,14 +37,14 @@ func (token Token) Environ(inheritExisting bool) (env []string, err error) { return nil, err } defer DestroyEnvironmentBlock(block) - blockp := uintptr(unsafe.Pointer(block)) + blockp := unsafe.Pointer(block) for { - entry := UTF16PtrToString((*uint16)(unsafe.Pointer(blockp))) + entry := UTF16PtrToString((*uint16)(blockp)) if len(entry) == 0 { break } env = append(env, entry) - blockp += 2 * (uintptr(len(entry)) + 1) + blockp = unsafe.Add(blockp, 2*(len(entry)+1)) } return env, nil } diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/windows/exec_windows.go b/cluster-autoscaler/vendor/golang.org/x/sys/windows/exec_windows.go index 75980fd44ad7..a52e0331d8bc 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/windows/exec_windows.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/windows/exec_windows.go @@ -95,12 +95,17 @@ func ComposeCommandLine(args []string) string { // DecomposeCommandLine breaks apart its argument command line into unescaped parts using CommandLineToArgv, // as gathered from GetCommandLine, QUERY_SERVICE_CONFIG's BinaryPathName argument, or elsewhere that // command lines are passed around. +// DecomposeCommandLine returns error if commandLine contains NUL. func DecomposeCommandLine(commandLine string) ([]string, error) { if len(commandLine) == 0 { return []string{}, nil } + utf16CommandLine, err := UTF16FromString(commandLine) + if err != nil { + return nil, errorspkg.New("string with NUL passed to DecomposeCommandLine") + } var argc int32 - argv, err := CommandLineToArgv(StringToUTF16Ptr(commandLine), &argc) + argv, err := CommandLineToArgv(&utf16CommandLine[0], &argc) if err != nil { return nil, err } diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/windows/service.go b/cluster-autoscaler/vendor/golang.org/x/sys/windows/service.go index f8deca8397ae..c964b6848d4f 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/windows/service.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/windows/service.go @@ -141,6 +141,12 @@ const ( SERVICE_DYNAMIC_INFORMATION_LEVEL_START_REASON = 1 ) +type ENUM_SERVICE_STATUS struct { + ServiceName *uint16 + DisplayName *uint16 + ServiceStatus SERVICE_STATUS +} + type SERVICE_STATUS struct { ServiceType uint32 CurrentState uint32 @@ -245,3 +251,4 @@ type QUERY_SERVICE_LOCK_STATUS struct { //sys UnsubscribeServiceChangeNotifications(subscription uintptr) = sechost.UnsubscribeServiceChangeNotifications? //sys RegisterServiceCtrlHandlerEx(serviceName *uint16, handlerProc uintptr, context uintptr) (handle Handle, err error) = advapi32.RegisterServiceCtrlHandlerExW //sys QueryServiceDynamicInformation(service Handle, infoLevel uint32, dynamicInfo unsafe.Pointer) (err error) = advapi32.QueryServiceDynamicInformation? +//sys EnumDependentServices(service Handle, activityState uint32, services *ENUM_SERVICE_STATUS, buffSize uint32, bytesNeeded *uint32, servicesReturned *uint32) (err error) = advapi32.EnumDependentServicesW diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/windows/svc/service.go b/cluster-autoscaler/vendor/golang.org/x/sys/windows/svc/service.go index 806baa055f6e..2b4a7bc6c251 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/windows/svc/service.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/windows/svc/service.go @@ -68,6 +68,15 @@ const ( AcceptPreShutdown = Accepted(windows.SERVICE_ACCEPT_PRESHUTDOWN) ) +// ActivityStatus allows for services to be selected based on active and inactive categories of service state. +type ActivityStatus uint32 + +const ( + Active = ActivityStatus(windows.SERVICE_ACTIVE) + Inactive = ActivityStatus(windows.SERVICE_INACTIVE) + AnyActivity = ActivityStatus(windows.SERVICE_STATE_ALL) +) + // Status combines State and Accepted commands to fully describe running service. type Status struct { State State diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/windows/types_windows.go b/cluster-autoscaler/vendor/golang.org/x/sys/windows/types_windows.go index 0dbb20841178..88e62a63851b 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/windows/types_windows.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/windows/types_windows.go @@ -2220,15 +2220,19 @@ type JOBOBJECT_BASIC_UI_RESTRICTIONS struct { } const ( - // JobObjectInformationClass + // JobObjectInformationClass for QueryInformationJobObject and SetInformationJobObject JobObjectAssociateCompletionPortInformation = 7 + JobObjectBasicAccountingInformation = 1 + JobObjectBasicAndIoAccountingInformation = 8 JobObjectBasicLimitInformation = 2 + JobObjectBasicProcessIdList = 3 JobObjectBasicUIRestrictions = 4 JobObjectCpuRateControlInformation = 15 JobObjectEndOfJobTimeInformation = 6 JobObjectExtendedLimitInformation = 9 JobObjectGroupInformation = 11 JobObjectGroupInformationEx = 14 + JobObjectLimitViolationInformation = 13 JobObjectLimitViolationInformation2 = 34 JobObjectNetRateControlInformation = 32 JobObjectNotificationLimitInformation = 12 diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/cluster-autoscaler/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 6d2a268534d7..a81ea2c70019 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -86,6 +86,7 @@ var ( procDeleteService = modadvapi32.NewProc("DeleteService") procDeregisterEventSource = modadvapi32.NewProc("DeregisterEventSource") procDuplicateTokenEx = modadvapi32.NewProc("DuplicateTokenEx") + procEnumDependentServicesW = modadvapi32.NewProc("EnumDependentServicesW") procEnumServicesStatusExW = modadvapi32.NewProc("EnumServicesStatusExW") procEqualSid = modadvapi32.NewProc("EqualSid") procFreeSid = modadvapi32.NewProc("FreeSid") @@ -734,6 +735,14 @@ func DuplicateTokenEx(existingToken Token, desiredAccess uint32, tokenAttributes return } +func EnumDependentServices(service Handle, activityState uint32, services *ENUM_SERVICE_STATUS, buffSize uint32, bytesNeeded *uint32, servicesReturned *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procEnumDependentServicesW.Addr(), 6, uintptr(service), uintptr(activityState), uintptr(unsafe.Pointer(services)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func EnumServicesStatusEx(mgr Handle, infoLevel uint32, serviceType uint32, serviceState uint32, services *byte, bufSize uint32, bytesNeeded *uint32, servicesReturned *uint32, resumeHandle *uint32, groupName *uint16) (err error) { r1, _, e1 := syscall.Syscall12(procEnumServicesStatusExW.Addr(), 10, uintptr(mgr), uintptr(infoLevel), uintptr(serviceType), uintptr(serviceState), uintptr(unsafe.Pointer(services)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned)), uintptr(unsafe.Pointer(resumeHandle)), uintptr(unsafe.Pointer(groupName)), 0, 0) if r1 == 0 { diff --git a/cluster-autoscaler/vendor/golang.org/x/tools/LICENSE b/cluster-autoscaler/vendor/golang.org/x/tools/LICENSE new file mode 100644 index 000000000000..6a66aea5eafe --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/tools/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/cluster-autoscaler/vendor/golang.org/x/tools/PATENTS b/cluster-autoscaler/vendor/golang.org/x/tools/PATENTS new file mode 100644 index 000000000000..733099041f84 --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/tools/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/cluster-autoscaler/vendor/golang.org/x/tools/cmd/stringer/stringer.go b/cluster-autoscaler/vendor/golang.org/x/tools/cmd/stringer/stringer.go new file mode 100644 index 000000000000..998d1a51bfd0 --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/tools/cmd/stringer/stringer.go @@ -0,0 +1,657 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Stringer is a tool to automate the creation of methods that satisfy the fmt.Stringer +// interface. Given the name of a (signed or unsigned) integer type T that has constants +// defined, stringer will create a new self-contained Go source file implementing +// +// func (t T) String() string +// +// The file is created in the same package and directory as the package that defines T. +// It has helpful defaults designed for use with go generate. +// +// Stringer works best with constants that are consecutive values such as created using iota, +// but creates good code regardless. In the future it might also provide custom support for +// constant sets that are bit patterns. +// +// For example, given this snippet, +// +// package painkiller +// +// type Pill int +// +// const ( +// Placebo Pill = iota +// Aspirin +// Ibuprofen +// Paracetamol +// Acetaminophen = Paracetamol +// ) +// +// running this command +// +// stringer -type=Pill +// +// in the same directory will create the file pill_string.go, in package painkiller, +// containing a definition of +// +// func (Pill) String() string +// +// That method will translate the value of a Pill constant to the string representation +// of the respective constant name, so that the call fmt.Print(painkiller.Aspirin) will +// print the string "Aspirin". +// +// Typically this process would be run using go generate, like this: +// +// //go:generate stringer -type=Pill +// +// If multiple constants have the same value, the lexically first matching name will +// be used (in the example, Acetaminophen will print as "Paracetamol"). +// +// With no arguments, it processes the package in the current directory. +// Otherwise, the arguments must name a single directory holding a Go package +// or a set of Go source files that represent a single Go package. +// +// The -type flag accepts a comma-separated list of types so a single run can +// generate methods for multiple types. The default output file is t_string.go, +// where t is the lower-cased name of the first type listed. It can be overridden +// with the -output flag. +// +// The -linecomment flag tells stringer to generate the text of any line comment, trimmed +// of leading spaces, instead of the constant name. For instance, if the constants above had a +// Pill prefix, one could write +// +// PillAspirin // Aspirin +// +// to suppress it in the output. +package main // import "golang.org/x/tools/cmd/stringer" + +import ( + "bytes" + "flag" + "fmt" + "go/ast" + "go/constant" + "go/format" + "go/token" + "go/types" + "log" + "os" + "path/filepath" + "sort" + "strings" + + "golang.org/x/tools/go/packages" +) + +var ( + typeNames = flag.String("type", "", "comma-separated list of type names; must be set") + output = flag.String("output", "", "output file name; default srcdir/_string.go") + trimprefix = flag.String("trimprefix", "", "trim the `prefix` from the generated constant names") + linecomment = flag.Bool("linecomment", false, "use line comment text as printed text when present") + buildTags = flag.String("tags", "", "comma-separated list of build tags to apply") +) + +// Usage is a replacement usage function for the flags package. +func Usage() { + fmt.Fprintf(os.Stderr, "Usage of stringer:\n") + fmt.Fprintf(os.Stderr, "\tstringer [flags] -type T [directory]\n") + fmt.Fprintf(os.Stderr, "\tstringer [flags] -type T files... # Must be a single package\n") + fmt.Fprintf(os.Stderr, "For more information, see:\n") + fmt.Fprintf(os.Stderr, "\thttps://pkg.go.dev/golang.org/x/tools/cmd/stringer\n") + fmt.Fprintf(os.Stderr, "Flags:\n") + flag.PrintDefaults() +} + +func main() { + log.SetFlags(0) + log.SetPrefix("stringer: ") + flag.Usage = Usage + flag.Parse() + if len(*typeNames) == 0 { + flag.Usage() + os.Exit(2) + } + types := strings.Split(*typeNames, ",") + var tags []string + if len(*buildTags) > 0 { + tags = strings.Split(*buildTags, ",") + } + + // We accept either one directory or a list of files. Which do we have? + args := flag.Args() + if len(args) == 0 { + // Default: process whole package in current directory. + args = []string{"."} + } + + // Parse the package once. + var dir string + g := Generator{ + trimPrefix: *trimprefix, + lineComment: *linecomment, + } + // TODO(suzmue): accept other patterns for packages (directories, list of files, import paths, etc). + if len(args) == 1 && isDirectory(args[0]) { + dir = args[0] + } else { + if len(tags) != 0 { + log.Fatal("-tags option applies only to directories, not when files are specified") + } + dir = filepath.Dir(args[0]) + } + + g.parsePackage(args, tags) + + // Print the header and package clause. + g.Printf("// Code generated by \"stringer %s\"; DO NOT EDIT.\n", strings.Join(os.Args[1:], " ")) + g.Printf("\n") + g.Printf("package %s", g.pkg.name) + g.Printf("\n") + g.Printf("import \"strconv\"\n") // Used by all methods. + + // Run generate for each type. + for _, typeName := range types { + g.generate(typeName) + } + + // Format the output. + src := g.format() + + // Write to file. + outputName := *output + if outputName == "" { + baseName := fmt.Sprintf("%s_string.go", types[0]) + outputName = filepath.Join(dir, strings.ToLower(baseName)) + } + err := os.WriteFile(outputName, src, 0644) + if err != nil { + log.Fatalf("writing output: %s", err) + } +} + +// isDirectory reports whether the named file is a directory. +func isDirectory(name string) bool { + info, err := os.Stat(name) + if err != nil { + log.Fatal(err) + } + return info.IsDir() +} + +// Generator holds the state of the analysis. Primarily used to buffer +// the output for format.Source. +type Generator struct { + buf bytes.Buffer // Accumulated output. + pkg *Package // Package we are scanning. + + trimPrefix string + lineComment bool +} + +func (g *Generator) Printf(format string, args ...interface{}) { + fmt.Fprintf(&g.buf, format, args...) +} + +// File holds a single parsed file and associated data. +type File struct { + pkg *Package // Package to which this file belongs. + file *ast.File // Parsed AST. + // These fields are reset for each type being generated. + typeName string // Name of the constant type. + values []Value // Accumulator for constant values of that type. + + trimPrefix string + lineComment bool +} + +type Package struct { + name string + defs map[*ast.Ident]types.Object + files []*File +} + +// parsePackage analyzes the single package constructed from the patterns and tags. +// parsePackage exits if there is an error. +func (g *Generator) parsePackage(patterns []string, tags []string) { + cfg := &packages.Config{ + Mode: packages.NeedName | packages.NeedTypes | packages.NeedTypesInfo | packages.NeedSyntax, + // TODO: Need to think about constants in test files. Maybe write type_string_test.go + // in a separate pass? For later. + Tests: false, + BuildFlags: []string{fmt.Sprintf("-tags=%s", strings.Join(tags, " "))}, + } + pkgs, err := packages.Load(cfg, patterns...) + if err != nil { + log.Fatal(err) + } + if len(pkgs) != 1 { + log.Fatalf("error: %d packages found", len(pkgs)) + } + g.addPackage(pkgs[0]) +} + +// addPackage adds a type checked Package and its syntax files to the generator. +func (g *Generator) addPackage(pkg *packages.Package) { + g.pkg = &Package{ + name: pkg.Name, + defs: pkg.TypesInfo.Defs, + files: make([]*File, len(pkg.Syntax)), + } + + for i, file := range pkg.Syntax { + g.pkg.files[i] = &File{ + file: file, + pkg: g.pkg, + trimPrefix: g.trimPrefix, + lineComment: g.lineComment, + } + } +} + +// generate produces the String method for the named type. +func (g *Generator) generate(typeName string) { + values := make([]Value, 0, 100) + for _, file := range g.pkg.files { + // Set the state for this run of the walker. + file.typeName = typeName + file.values = nil + if file.file != nil { + ast.Inspect(file.file, file.genDecl) + values = append(values, file.values...) + } + } + + if len(values) == 0 { + log.Fatalf("no values defined for type %s", typeName) + } + // Generate code that will fail if the constants change value. + g.Printf("func _() {\n") + g.Printf("\t// An \"invalid array index\" compiler error signifies that the constant values have changed.\n") + g.Printf("\t// Re-run the stringer command to generate them again.\n") + g.Printf("\tvar x [1]struct{}\n") + for _, v := range values { + g.Printf("\t_ = x[%s - %s]\n", v.originalName, v.str) + } + g.Printf("}\n") + runs := splitIntoRuns(values) + // The decision of which pattern to use depends on the number of + // runs in the numbers. If there's only one, it's easy. For more than + // one, there's a tradeoff between complexity and size of the data + // and code vs. the simplicity of a map. A map takes more space, + // but so does the code. The decision here (crossover at 10) is + // arbitrary, but considers that for large numbers of runs the cost + // of the linear scan in the switch might become important, and + // rather than use yet another algorithm such as binary search, + // we punt and use a map. In any case, the likelihood of a map + // being necessary for any realistic example other than bitmasks + // is very low. And bitmasks probably deserve their own analysis, + // to be done some other day. + switch { + case len(runs) == 1: + g.buildOneRun(runs, typeName) + case len(runs) <= 10: + g.buildMultipleRuns(runs, typeName) + default: + g.buildMap(runs, typeName) + } +} + +// splitIntoRuns breaks the values into runs of contiguous sequences. +// For example, given 1,2,3,5,6,7 it returns {1,2,3},{5,6,7}. +// The input slice is known to be non-empty. +func splitIntoRuns(values []Value) [][]Value { + // We use stable sort so the lexically first name is chosen for equal elements. + sort.Stable(byValue(values)) + // Remove duplicates. Stable sort has put the one we want to print first, + // so use that one. The String method won't care about which named constant + // was the argument, so the first name for the given value is the only one to keep. + // We need to do this because identical values would cause the switch or map + // to fail to compile. + j := 1 + for i := 1; i < len(values); i++ { + if values[i].value != values[i-1].value { + values[j] = values[i] + j++ + } + } + values = values[:j] + runs := make([][]Value, 0, 10) + for len(values) > 0 { + // One contiguous sequence per outer loop. + i := 1 + for i < len(values) && values[i].value == values[i-1].value+1 { + i++ + } + runs = append(runs, values[:i]) + values = values[i:] + } + return runs +} + +// format returns the gofmt-ed contents of the Generator's buffer. +func (g *Generator) format() []byte { + src, err := format.Source(g.buf.Bytes()) + if err != nil { + // Should never happen, but can arise when developing this code. + // The user can compile the output to see the error. + log.Printf("warning: internal error: invalid Go generated: %s", err) + log.Printf("warning: compile the package to analyze the error") + return g.buf.Bytes() + } + return src +} + +// Value represents a declared constant. +type Value struct { + originalName string // The name of the constant. + name string // The name with trimmed prefix. + // The value is stored as a bit pattern alone. The boolean tells us + // whether to interpret it as an int64 or a uint64; the only place + // this matters is when sorting. + // Much of the time the str field is all we need; it is printed + // by Value.String. + value uint64 // Will be converted to int64 when needed. + signed bool // Whether the constant is a signed type. + str string // The string representation given by the "go/constant" package. +} + +func (v *Value) String() string { + return v.str +} + +// byValue lets us sort the constants into increasing order. +// We take care in the Less method to sort in signed or unsigned order, +// as appropriate. +type byValue []Value + +func (b byValue) Len() int { return len(b) } +func (b byValue) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byValue) Less(i, j int) bool { + if b[i].signed { + return int64(b[i].value) < int64(b[j].value) + } + return b[i].value < b[j].value +} + +// genDecl processes one declaration clause. +func (f *File) genDecl(node ast.Node) bool { + decl, ok := node.(*ast.GenDecl) + if !ok || decl.Tok != token.CONST { + // We only care about const declarations. + return true + } + // The name of the type of the constants we are declaring. + // Can change if this is a multi-element declaration. + typ := "" + // Loop over the elements of the declaration. Each element is a ValueSpec: + // a list of names possibly followed by a type, possibly followed by values. + // If the type and value are both missing, we carry down the type (and value, + // but the "go/types" package takes care of that). + for _, spec := range decl.Specs { + vspec := spec.(*ast.ValueSpec) // Guaranteed to succeed as this is CONST. + if vspec.Type == nil && len(vspec.Values) > 0 { + // "X = 1". With no type but a value. If the constant is untyped, + // skip this vspec and reset the remembered type. + typ = "" + + // If this is a simple type conversion, remember the type. + // We don't mind if this is actually a call; a qualified call won't + // be matched (that will be SelectorExpr, not Ident), and only unusual + // situations will result in a function call that appears to be + // a type conversion. + ce, ok := vspec.Values[0].(*ast.CallExpr) + if !ok { + continue + } + id, ok := ce.Fun.(*ast.Ident) + if !ok { + continue + } + typ = id.Name + } + if vspec.Type != nil { + // "X T". We have a type. Remember it. + ident, ok := vspec.Type.(*ast.Ident) + if !ok { + continue + } + typ = ident.Name + } + if typ != f.typeName { + // This is not the type we're looking for. + continue + } + // We now have a list of names (from one line of source code) all being + // declared with the desired type. + // Grab their names and actual values and store them in f.values. + for _, name := range vspec.Names { + if name.Name == "_" { + continue + } + // This dance lets the type checker find the values for us. It's a + // bit tricky: look up the object declared by the name, find its + // types.Const, and extract its value. + obj, ok := f.pkg.defs[name] + if !ok { + log.Fatalf("no value for constant %s", name) + } + info := obj.Type().Underlying().(*types.Basic).Info() + if info&types.IsInteger == 0 { + log.Fatalf("can't handle non-integer constant type %s", typ) + } + value := obj.(*types.Const).Val() // Guaranteed to succeed as this is CONST. + if value.Kind() != constant.Int { + log.Fatalf("can't happen: constant is not an integer %s", name) + } + i64, isInt := constant.Int64Val(value) + u64, isUint := constant.Uint64Val(value) + if !isInt && !isUint { + log.Fatalf("internal error: value of %s is not an integer: %s", name, value.String()) + } + if !isInt { + u64 = uint64(i64) + } + v := Value{ + originalName: name.Name, + value: u64, + signed: info&types.IsUnsigned == 0, + str: value.String(), + } + if c := vspec.Comment; f.lineComment && c != nil && len(c.List) == 1 { + v.name = strings.TrimSpace(c.Text()) + } else { + v.name = strings.TrimPrefix(v.originalName, f.trimPrefix) + } + f.values = append(f.values, v) + } + } + return false +} + +// Helpers + +// usize returns the number of bits of the smallest unsigned integer +// type that will hold n. Used to create the smallest possible slice of +// integers to use as indexes into the concatenated strings. +func usize(n int) int { + switch { + case n < 1<<8: + return 8 + case n < 1<<16: + return 16 + default: + // 2^32 is enough constants for anyone. + return 32 + } +} + +// declareIndexAndNameVars declares the index slices and concatenated names +// strings representing the runs of values. +func (g *Generator) declareIndexAndNameVars(runs [][]Value, typeName string) { + var indexes, names []string + for i, run := range runs { + index, name := g.createIndexAndNameDecl(run, typeName, fmt.Sprintf("_%d", i)) + if len(run) != 1 { + indexes = append(indexes, index) + } + names = append(names, name) + } + g.Printf("const (\n") + for _, name := range names { + g.Printf("\t%s\n", name) + } + g.Printf(")\n\n") + + if len(indexes) > 0 { + g.Printf("var (") + for _, index := range indexes { + g.Printf("\t%s\n", index) + } + g.Printf(")\n\n") + } +} + +// declareIndexAndNameVar is the single-run version of declareIndexAndNameVars +func (g *Generator) declareIndexAndNameVar(run []Value, typeName string) { + index, name := g.createIndexAndNameDecl(run, typeName, "") + g.Printf("const %s\n", name) + g.Printf("var %s\n", index) +} + +// createIndexAndNameDecl returns the pair of declarations for the run. The caller will add "const" and "var". +func (g *Generator) createIndexAndNameDecl(run []Value, typeName string, suffix string) (string, string) { + b := new(bytes.Buffer) + indexes := make([]int, len(run)) + for i := range run { + b.WriteString(run[i].name) + indexes[i] = b.Len() + } + nameConst := fmt.Sprintf("_%s_name%s = %q", typeName, suffix, b.String()) + nameLen := b.Len() + b.Reset() + fmt.Fprintf(b, "_%s_index%s = [...]uint%d{0, ", typeName, suffix, usize(nameLen)) + for i, v := range indexes { + if i > 0 { + fmt.Fprintf(b, ", ") + } + fmt.Fprintf(b, "%d", v) + } + fmt.Fprintf(b, "}") + return b.String(), nameConst +} + +// declareNameVars declares the concatenated names string representing all the values in the runs. +func (g *Generator) declareNameVars(runs [][]Value, typeName string, suffix string) { + g.Printf("const _%s_name%s = \"", typeName, suffix) + for _, run := range runs { + for i := range run { + g.Printf("%s", run[i].name) + } + } + g.Printf("\"\n") +} + +// buildOneRun generates the variables and String method for a single run of contiguous values. +func (g *Generator) buildOneRun(runs [][]Value, typeName string) { + values := runs[0] + g.Printf("\n") + g.declareIndexAndNameVar(values, typeName) + // The generated code is simple enough to write as a Printf format. + lessThanZero := "" + if values[0].signed { + lessThanZero = "i < 0 || " + } + if values[0].value == 0 { // Signed or unsigned, 0 is still 0. + g.Printf(stringOneRun, typeName, usize(len(values)), lessThanZero) + } else { + g.Printf(stringOneRunWithOffset, typeName, values[0].String(), usize(len(values)), lessThanZero) + } +} + +// Arguments to format are: +// +// [1]: type name +// [2]: size of index element (8 for uint8 etc.) +// [3]: less than zero check (for signed types) +const stringOneRun = `func (i %[1]s) String() string { + if %[3]si >= %[1]s(len(_%[1]s_index)-1) { + return "%[1]s(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _%[1]s_name[_%[1]s_index[i]:_%[1]s_index[i+1]] +} +` + +// Arguments to format are: +// [1]: type name +// [2]: lowest defined value for type, as a string +// [3]: size of index element (8 for uint8 etc.) +// [4]: less than zero check (for signed types) +/* + */ +const stringOneRunWithOffset = `func (i %[1]s) String() string { + i -= %[2]s + if %[4]si >= %[1]s(len(_%[1]s_index)-1) { + return "%[1]s(" + strconv.FormatInt(int64(i + %[2]s), 10) + ")" + } + return _%[1]s_name[_%[1]s_index[i] : _%[1]s_index[i+1]] +} +` + +// buildMultipleRuns generates the variables and String method for multiple runs of contiguous values. +// For this pattern, a single Printf format won't do. +func (g *Generator) buildMultipleRuns(runs [][]Value, typeName string) { + g.Printf("\n") + g.declareIndexAndNameVars(runs, typeName) + g.Printf("func (i %s) String() string {\n", typeName) + g.Printf("\tswitch {\n") + for i, values := range runs { + if len(values) == 1 { + g.Printf("\tcase i == %s:\n", &values[0]) + g.Printf("\t\treturn _%s_name_%d\n", typeName, i) + continue + } + if values[0].value == 0 && !values[0].signed { + // For an unsigned lower bound of 0, "0 <= i" would be redundant. + g.Printf("\tcase i <= %s:\n", &values[len(values)-1]) + } else { + g.Printf("\tcase %s <= i && i <= %s:\n", &values[0], &values[len(values)-1]) + } + if values[0].value != 0 { + g.Printf("\t\ti -= %s\n", &values[0]) + } + g.Printf("\t\treturn _%s_name_%d[_%s_index_%d[i]:_%s_index_%d[i+1]]\n", + typeName, i, typeName, i, typeName, i) + } + g.Printf("\tdefault:\n") + g.Printf("\t\treturn \"%s(\" + strconv.FormatInt(int64(i), 10) + \")\"\n", typeName) + g.Printf("\t}\n") + g.Printf("}\n") +} + +// buildMap handles the case where the space is so sparse a map is a reasonable fallback. +// It's a rare situation but has simple code. +func (g *Generator) buildMap(runs [][]Value, typeName string) { + g.Printf("\n") + g.declareNameVars(runs, typeName, "") + g.Printf("\nvar _%s_map = map[%s]string{\n", typeName, typeName) + n := 0 + for _, values := range runs { + for _, value := range values { + g.Printf("\t%s: _%s_name[%d:%d],\n", &value, typeName, n, n+len(value.name)) + n += len(value.name) + } + } + g.Printf("}\n\n") + g.Printf(stringMap, typeName) +} + +// Argument to format is the type name. +const stringMap = `func (i %[1]s) String() string { + if str, ok := _%[1]s_map[i]; ok { + return str + } + return "%[1]s(" + strconv.FormatInt(int64(i), 10) + ")" +} +` diff --git a/cluster-autoscaler/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go b/cluster-autoscaler/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go new file mode 100644 index 000000000000..165ede0f8f38 --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go @@ -0,0 +1,187 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package gcexportdata provides functions for locating, reading, and +// writing export data files containing type information produced by the +// gc compiler. This package supports go1.7 export data format and all +// later versions. +// +// Although it might seem convenient for this package to live alongside +// go/types in the standard library, this would cause version skew +// problems for developer tools that use it, since they must be able to +// consume the outputs of the gc compiler both before and after a Go +// update such as from Go 1.7 to Go 1.8. Because this package lives in +// golang.org/x/tools, sites can update their version of this repo some +// time before the Go 1.8 release and rebuild and redeploy their +// developer tools, which will then be able to consume both Go 1.7 and +// Go 1.8 export data files, so they will work before and after the +// Go update. (See discussion at https://golang.org/issue/15651.) +package gcexportdata // import "golang.org/x/tools/go/gcexportdata" + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "go/token" + "go/types" + "io" + "os/exec" + + "golang.org/x/tools/internal/gcimporter" +) + +// Find returns the name of an object (.o) or archive (.a) file +// containing type information for the specified import path, +// using the go command. +// If no file was found, an empty filename is returned. +// +// A relative srcDir is interpreted relative to the current working directory. +// +// Find also returns the package's resolved (canonical) import path, +// reflecting the effects of srcDir and vendoring on importPath. +// +// Deprecated: Use the higher-level API in golang.org/x/tools/go/packages, +// which is more efficient. +func Find(importPath, srcDir string) (filename, path string) { + cmd := exec.Command("go", "list", "-json", "-export", "--", importPath) + cmd.Dir = srcDir + out, err := cmd.CombinedOutput() + if err != nil { + return "", "" + } + var data struct { + ImportPath string + Export string + } + json.Unmarshal(out, &data) + return data.Export, data.ImportPath +} + +// NewReader returns a reader for the export data section of an object +// (.o) or archive (.a) file read from r. The new reader may provide +// additional trailing data beyond the end of the export data. +func NewReader(r io.Reader) (io.Reader, error) { + buf := bufio.NewReader(r) + _, size, err := gcimporter.FindExportData(buf) + if err != nil { + return nil, err + } + + if size >= 0 { + // We were given an archive and found the __.PKGDEF in it. + // This tells us the size of the export data, and we don't + // need to return the entire file. + return &io.LimitedReader{ + R: buf, + N: size, + }, nil + } else { + // We were given an object file. As such, we don't know how large + // the export data is and must return the entire file. + return buf, nil + } +} + +// readAll works the same way as io.ReadAll, but avoids allocations and copies +// by preallocating a byte slice of the necessary size if the size is known up +// front. This is always possible when the input is an archive. In that case, +// NewReader will return the known size using an io.LimitedReader. +func readAll(r io.Reader) ([]byte, error) { + if lr, ok := r.(*io.LimitedReader); ok { + data := make([]byte, lr.N) + _, err := io.ReadFull(lr, data) + return data, err + } + return io.ReadAll(r) +} + +// Read reads export data from in, decodes it, and returns type +// information for the package. +// +// The package path (effectively its linker symbol prefix) is +// specified by path, since unlike the package name, this information +// may not be recorded in the export data. +// +// File position information is added to fset. +// +// Read may inspect and add to the imports map to ensure that references +// within the export data to other packages are consistent. The caller +// must ensure that imports[path] does not exist, or exists but is +// incomplete (see types.Package.Complete), and Read inserts the +// resulting package into this map entry. +// +// On return, the state of the reader is undefined. +func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package, path string) (*types.Package, error) { + data, err := readAll(in) + if err != nil { + return nil, fmt.Errorf("reading export data for %q: %v", path, err) + } + + if bytes.HasPrefix(data, []byte("!")) { + return nil, fmt.Errorf("can't read export data for %q directly from an archive file (call gcexportdata.NewReader first to extract export data)", path) + } + + // The indexed export format starts with an 'i'; the older + // binary export format starts with a 'c', 'd', or 'v' + // (from "version"). Select appropriate importer. + if len(data) > 0 { + switch data[0] { + case 'i': + _, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path) + return pkg, err + + case 'v', 'c', 'd': + _, pkg, err := gcimporter.BImportData(fset, imports, data, path) + return pkg, err + + case 'u': + _, pkg, err := gcimporter.UImportData(fset, imports, data[1:], path) + return pkg, err + + default: + l := len(data) + if l > 10 { + l = 10 + } + return nil, fmt.Errorf("unexpected export data with prefix %q for path %s", string(data[:l]), path) + } + } + return nil, fmt.Errorf("empty export data for %s", path) +} + +// Write writes encoded type information for the specified package to out. +// The FileSet provides file position information for named objects. +func Write(out io.Writer, fset *token.FileSet, pkg *types.Package) error { + if _, err := io.WriteString(out, "i"); err != nil { + return err + } + return gcimporter.IExportData(out, fset, pkg) +} + +// ReadBundle reads an export bundle from in, decodes it, and returns type +// information for the packages. +// File position information is added to fset. +// +// ReadBundle may inspect and add to the imports map to ensure that references +// within the export bundle to other packages are consistent. +// +// On return, the state of the reader is undefined. +// +// Experimental: This API is experimental and may change in the future. +func ReadBundle(in io.Reader, fset *token.FileSet, imports map[string]*types.Package) ([]*types.Package, error) { + data, err := readAll(in) + if err != nil { + return nil, fmt.Errorf("reading export bundle: %v", err) + } + return gcimporter.IImportBundle(fset, imports, data) +} + +// WriteBundle writes encoded type information for the specified packages to out. +// The FileSet provides file position information for named objects. +// +// Experimental: This API is experimental and may change in the future. +func WriteBundle(out io.Writer, fset *token.FileSet, pkgs []*types.Package) error { + return gcimporter.IExportBundle(out, fset, pkgs) +} diff --git a/cluster-autoscaler/vendor/golang.org/x/tools/go/gcexportdata/importer.go b/cluster-autoscaler/vendor/golang.org/x/tools/go/gcexportdata/importer.go new file mode 100644 index 000000000000..37a7247e2686 --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/tools/go/gcexportdata/importer.go @@ -0,0 +1,75 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gcexportdata + +import ( + "fmt" + "go/token" + "go/types" + "os" +) + +// NewImporter returns a new instance of the types.Importer interface +// that reads type information from export data files written by gc. +// The Importer also satisfies types.ImporterFrom. +// +// Export data files are located using "go build" workspace conventions +// and the build.Default context. +// +// Use this importer instead of go/importer.For("gc", ...) to avoid the +// version-skew problems described in the documentation of this package, +// or to control the FileSet or access the imports map populated during +// package loading. +// +// Deprecated: Use the higher-level API in golang.org/x/tools/go/packages, +// which is more efficient. +func NewImporter(fset *token.FileSet, imports map[string]*types.Package) types.ImporterFrom { + return importer{fset, imports} +} + +type importer struct { + fset *token.FileSet + imports map[string]*types.Package +} + +func (imp importer) Import(importPath string) (*types.Package, error) { + return imp.ImportFrom(importPath, "", 0) +} + +func (imp importer) ImportFrom(importPath, srcDir string, mode types.ImportMode) (_ *types.Package, err error) { + filename, path := Find(importPath, srcDir) + if filename == "" { + if importPath == "unsafe" { + // Even for unsafe, call Find first in case + // the package was vendored. + return types.Unsafe, nil + } + return nil, fmt.Errorf("can't find import: %s", importPath) + } + + if pkg, ok := imp.imports[path]; ok && pkg.Complete() { + return pkg, nil // cache hit + } + + // open file + f, err := os.Open(filename) + if err != nil { + return nil, err + } + defer func() { + f.Close() + if err != nil { + // add file name to error + err = fmt.Errorf("reading export data: %s: %v", filename, err) + } + }() + + r, err := NewReader(f) + if err != nil { + return nil, err + } + + return Read(r, imp.fset, imp.imports, path) +} diff --git a/cluster-autoscaler/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go b/cluster-autoscaler/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go new file mode 100644 index 000000000000..18a002f82a1f --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go @@ -0,0 +1,49 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package packagesdriver fetches type sizes for go/packages and go/analysis. +package packagesdriver + +import ( + "context" + "fmt" + "go/types" + "strings" + + "golang.org/x/tools/internal/gocommand" +) + +var debug = false + +func GetSizesGolist(ctx context.Context, inv gocommand.Invocation, gocmdRunner *gocommand.Runner) (types.Sizes, error) { + inv.Verb = "list" + inv.Args = []string{"-f", "{{context.GOARCH}} {{context.Compiler}}", "--", "unsafe"} + stdout, stderr, friendlyErr, rawErr := gocmdRunner.RunRaw(ctx, inv) + var goarch, compiler string + if rawErr != nil { + if rawErrMsg := rawErr.Error(); strings.Contains(rawErrMsg, "cannot find main module") || strings.Contains(rawErrMsg, "go.mod file not found") { + // User's running outside of a module. All bets are off. Get GOARCH and guess compiler is gc. + // TODO(matloob): Is this a problem in practice? + inv.Verb = "env" + inv.Args = []string{"GOARCH"} + envout, enverr := gocmdRunner.Run(ctx, inv) + if enverr != nil { + return nil, enverr + } + goarch = strings.TrimSpace(envout.String()) + compiler = "gc" + } else { + return nil, friendlyErr + } + } else { + fields := strings.Fields(stdout.String()) + if len(fields) < 2 { + return nil, fmt.Errorf("could not parse GOARCH and Go compiler in format \" \":\nstdout: <<%s>>\nstderr: <<%s>>", + stdout.String(), stderr.String()) + } + goarch = fields[0] + compiler = fields[1] + } + return types.SizesFor(compiler, goarch), nil +} diff --git a/cluster-autoscaler/vendor/golang.org/x/tools/go/packages/doc.go b/cluster-autoscaler/vendor/golang.org/x/tools/go/packages/doc.go new file mode 100644 index 000000000000..da4ab89fe63f --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/tools/go/packages/doc.go @@ -0,0 +1,220 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package packages loads Go packages for inspection and analysis. + +The Load function takes as input a list of patterns and return a list of Package +structs describing individual packages matched by those patterns. +The LoadMode controls the amount of detail in the loaded packages. + +Load passes most patterns directly to the underlying build tool, +but all patterns with the prefix "query=", where query is a +non-empty string of letters from [a-z], are reserved and may be +interpreted as query operators. + +Two query operators are currently supported: "file" and "pattern". + +The query "file=path/to/file.go" matches the package or packages enclosing +the Go source file path/to/file.go. For example "file=~/go/src/fmt/print.go" +might return the packages "fmt" and "fmt [fmt.test]". + +The query "pattern=string" causes "string" to be passed directly to +the underlying build tool. In most cases this is unnecessary, +but an application can use Load("pattern=" + x) as an escaping mechanism +to ensure that x is not interpreted as a query operator if it contains '='. + +All other query operators are reserved for future use and currently +cause Load to report an error. + +The Package struct provides basic information about the package, including + + - ID, a unique identifier for the package in the returned set; + - GoFiles, the names of the package's Go source files; + - Imports, a map from source import strings to the Packages they name; + - Types, the type information for the package's exported symbols; + - Syntax, the parsed syntax trees for the package's source code; and + - TypeInfo, the result of a complete type-check of the package syntax trees. + +(See the documentation for type Package for the complete list of fields +and more detailed descriptions.) + +For example, + + Load(nil, "bytes", "unicode...") + +returns four Package structs describing the standard library packages +bytes, unicode, unicode/utf16, and unicode/utf8. Note that one pattern +can match multiple packages and that a package might be matched by +multiple patterns: in general it is not possible to determine which +packages correspond to which patterns. + +Note that the list returned by Load contains only the packages matched +by the patterns. Their dependencies can be found by walking the import +graph using the Imports fields. + +The Load function can be configured by passing a pointer to a Config as +the first argument. A nil Config is equivalent to the zero Config, which +causes Load to run in LoadFiles mode, collecting minimal information. +See the documentation for type Config for details. + +As noted earlier, the Config.Mode controls the amount of detail +reported about the loaded packages. See the documentation for type LoadMode +for details. + +Most tools should pass their command-line arguments (after any flags) +uninterpreted to the loader, so that the loader can interpret them +according to the conventions of the underlying build system. +See the Example function for typical usage. +*/ +package packages // import "golang.org/x/tools/go/packages" + +/* + +Motivation and design considerations + +The new package's design solves problems addressed by two existing +packages: go/build, which locates and describes packages, and +golang.org/x/tools/go/loader, which loads, parses and type-checks them. +The go/build.Package structure encodes too much of the 'go build' way +of organizing projects, leaving us in need of a data type that describes a +package of Go source code independent of the underlying build system. +We wanted something that works equally well with go build and vgo, and +also other build systems such as Bazel and Blaze, making it possible to +construct analysis tools that work in all these environments. +Tools such as errcheck and staticcheck were essentially unavailable to +the Go community at Google, and some of Google's internal tools for Go +are unavailable externally. +This new package provides a uniform way to obtain package metadata by +querying each of these build systems, optionally supporting their +preferred command-line notations for packages, so that tools integrate +neatly with users' build environments. The Metadata query function +executes an external query tool appropriate to the current workspace. + +Loading packages always returns the complete import graph "all the way down", +even if all you want is information about a single package, because the query +mechanisms of all the build systems we currently support ({go,vgo} list, and +blaze/bazel aspect-based query) cannot provide detailed information +about one package without visiting all its dependencies too, so there is +no additional asymptotic cost to providing transitive information. +(This property might not be true of a hypothetical 5th build system.) + +In calls to TypeCheck, all initial packages, and any package that +transitively depends on one of them, must be loaded from source. +Consider A->B->C->D->E: if A,C are initial, A,B,C must be loaded from +source; D may be loaded from export data, and E may not be loaded at all +(though it's possible that D's export data mentions it, so a +types.Package may be created for it and exposed.) + +The old loader had a feature to suppress type-checking of function +bodies on a per-package basis, primarily intended to reduce the work of +obtaining type information for imported packages. Now that imports are +satisfied by export data, the optimization no longer seems necessary. + +Despite some early attempts, the old loader did not exploit export data, +instead always using the equivalent of WholeProgram mode. This was due +to the complexity of mixing source and export data packages (now +resolved by the upward traversal mentioned above), and because export data +files were nearly always missing or stale. Now that 'go build' supports +caching, all the underlying build systems can guarantee to produce +export data in a reasonable (amortized) time. + +Test "main" packages synthesized by the build system are now reported as +first-class packages, avoiding the need for clients (such as go/ssa) to +reinvent this generation logic. + +One way in which go/packages is simpler than the old loader is in its +treatment of in-package tests. In-package tests are packages that +consist of all the files of the library under test, plus the test files. +The old loader constructed in-package tests by a two-phase process of +mutation called "augmentation": first it would construct and type check +all the ordinary library packages and type-check the packages that +depend on them; then it would add more (test) files to the package and +type-check again. This two-phase approach had four major problems: +1) in processing the tests, the loader modified the library package, + leaving no way for a client application to see both the test + package and the library package; one would mutate into the other. +2) because test files can declare additional methods on types defined in + the library portion of the package, the dispatch of method calls in + the library portion was affected by the presence of the test files. + This should have been a clue that the packages were logically + different. +3) this model of "augmentation" assumed at most one in-package test + per library package, which is true of projects using 'go build', + but not other build systems. +4) because of the two-phase nature of test processing, all packages that + import the library package had to be processed before augmentation, + forcing a "one-shot" API and preventing the client from calling Load + in several times in sequence as is now possible in WholeProgram mode. + (TypeCheck mode has a similar one-shot restriction for a different reason.) + +Early drafts of this package supported "multi-shot" operation. +Although it allowed clients to make a sequence of calls (or concurrent +calls) to Load, building up the graph of Packages incrementally, +it was of marginal value: it complicated the API +(since it allowed some options to vary across calls but not others), +it complicated the implementation, +it cannot be made to work in Types mode, as explained above, +and it was less efficient than making one combined call (when this is possible). +Among the clients we have inspected, none made multiple calls to load +but could not be easily and satisfactorily modified to make only a single call. +However, applications changes may be required. +For example, the ssadump command loads the user-specified packages +and in addition the runtime package. It is tempting to simply append +"runtime" to the user-provided list, but that does not work if the user +specified an ad-hoc package such as [a.go b.go]. +Instead, ssadump no longer requests the runtime package, +but seeks it among the dependencies of the user-specified packages, +and emits an error if it is not found. + +Overlays: The Overlay field in the Config allows providing alternate contents +for Go source files, by providing a mapping from file path to contents. +go/packages will pull in new imports added in overlay files when go/packages +is run in LoadImports mode or greater. +Overlay support for the go list driver isn't complete yet: if the file doesn't +exist on disk, it will only be recognized in an overlay if it is a non-test file +and the package would be reported even without the overlay. + +Questions & Tasks + +- Add GOARCH/GOOS? + They are not portable concepts, but could be made portable. + Our goal has been to allow users to express themselves using the conventions + of the underlying build system: if the build system honors GOARCH + during a build and during a metadata query, then so should + applications built atop that query mechanism. + Conversely, if the target architecture of the build is determined by + command-line flags, the application can pass the relevant + flags through to the build system using a command such as: + myapp -query_flag="--cpu=amd64" -query_flag="--os=darwin" + However, this approach is low-level, unwieldy, and non-portable. + GOOS and GOARCH seem important enough to warrant a dedicated option. + +- How should we handle partial failures such as a mixture of good and + malformed patterns, existing and non-existent packages, successful and + failed builds, import failures, import cycles, and so on, in a call to + Load? + +- Support bazel, blaze, and go1.10 list, not just go1.11 list. + +- Handle (and test) various partial success cases, e.g. + a mixture of good packages and: + invalid patterns + nonexistent packages + empty packages + packages with malformed package or import declarations + unreadable files + import cycles + other parse errors + type errors + Make sure we record errors at the correct place in the graph. + +- Missing packages among initial arguments are not reported. + Return bogus packages for them, like golist does. + +- "undeclared name" errors (for example) are reported out of source file + order. I suspect this is due to the breadth-first resolution now used + by go/types. Is that a bug? Discuss with gri. + +*/ diff --git a/cluster-autoscaler/vendor/golang.org/x/tools/go/packages/external.go b/cluster-autoscaler/vendor/golang.org/x/tools/go/packages/external.go new file mode 100644 index 000000000000..7242a0a7d2be --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/tools/go/packages/external.go @@ -0,0 +1,101 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file enables an external tool to intercept package requests. +// If the tool is present then its results are used in preference to +// the go list command. + +package packages + +import ( + "bytes" + "encoding/json" + "fmt" + exec "golang.org/x/sys/execabs" + "os" + "strings" +) + +// The Driver Protocol +// +// The driver, given the inputs to a call to Load, returns metadata about the packages specified. +// This allows for different build systems to support go/packages by telling go/packages how the +// packages' source is organized. +// The driver is a binary, either specified by the GOPACKAGESDRIVER environment variable or in +// the path as gopackagesdriver. It's given the inputs to load in its argv. See the package +// documentation in doc.go for the full description of the patterns that need to be supported. +// A driver receives as a JSON-serialized driverRequest struct in standard input and will +// produce a JSON-serialized driverResponse (see definition in packages.go) in its standard output. + +// driverRequest is used to provide the portion of Load's Config that is needed by a driver. +type driverRequest struct { + Mode LoadMode `json:"mode"` + // Env specifies the environment the underlying build system should be run in. + Env []string `json:"env"` + // BuildFlags are flags that should be passed to the underlying build system. + BuildFlags []string `json:"build_flags"` + // Tests specifies whether the patterns should also return test packages. + Tests bool `json:"tests"` + // Overlay maps file paths (relative to the driver's working directory) to the byte contents + // of overlay files. + Overlay map[string][]byte `json:"overlay"` +} + +// findExternalDriver returns the file path of a tool that supplies +// the build system package structure, or "" if not found." +// If GOPACKAGESDRIVER is set in the environment findExternalTool returns its +// value, otherwise it searches for a binary named gopackagesdriver on the PATH. +func findExternalDriver(cfg *Config) driver { + const toolPrefix = "GOPACKAGESDRIVER=" + tool := "" + for _, env := range cfg.Env { + if val := strings.TrimPrefix(env, toolPrefix); val != env { + tool = val + } + } + if tool != "" && tool == "off" { + return nil + } + if tool == "" { + var err error + tool, err = exec.LookPath("gopackagesdriver") + if err != nil { + return nil + } + } + return func(cfg *Config, words ...string) (*driverResponse, error) { + req, err := json.Marshal(driverRequest{ + Mode: cfg.Mode, + Env: cfg.Env, + BuildFlags: cfg.BuildFlags, + Tests: cfg.Tests, + Overlay: cfg.Overlay, + }) + if err != nil { + return nil, fmt.Errorf("failed to encode message to driver tool: %v", err) + } + + buf := new(bytes.Buffer) + stderr := new(bytes.Buffer) + cmd := exec.CommandContext(cfg.Context, tool, words...) + cmd.Dir = cfg.Dir + cmd.Env = cfg.Env + cmd.Stdin = bytes.NewReader(req) + cmd.Stdout = buf + cmd.Stderr = stderr + + if err := cmd.Run(); err != nil { + return nil, fmt.Errorf("%v: %v: %s", tool, err, cmd.Stderr) + } + if len(stderr.Bytes()) != 0 && os.Getenv("GOPACKAGESPRINTDRIVERERRORS") != "" { + fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(cmd), stderr) + } + + var response driverResponse + if err := json.Unmarshal(buf.Bytes(), &response); err != nil { + return nil, err + } + return &response, nil + } +} diff --git a/cluster-autoscaler/vendor/golang.org/x/tools/go/packages/golist.go b/cluster-autoscaler/vendor/golang.org/x/tools/go/packages/golist.go new file mode 100644 index 000000000000..6bb7168d2e34 --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/tools/go/packages/golist.go @@ -0,0 +1,1173 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packages + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "go/types" + "io/ioutil" + "log" + "os" + "path" + "path/filepath" + "reflect" + "sort" + "strconv" + "strings" + "sync" + "unicode" + + exec "golang.org/x/sys/execabs" + "golang.org/x/tools/go/internal/packagesdriver" + "golang.org/x/tools/internal/gocommand" + "golang.org/x/tools/internal/packagesinternal" +) + +// debug controls verbose logging. +var debug, _ = strconv.ParseBool(os.Getenv("GOPACKAGESDEBUG")) + +// A goTooOldError reports that the go command +// found by exec.LookPath is too old to use the new go list behavior. +type goTooOldError struct { + error +} + +// responseDeduper wraps a driverResponse, deduplicating its contents. +type responseDeduper struct { + seenRoots map[string]bool + seenPackages map[string]*Package + dr *driverResponse +} + +func newDeduper() *responseDeduper { + return &responseDeduper{ + dr: &driverResponse{}, + seenRoots: map[string]bool{}, + seenPackages: map[string]*Package{}, + } +} + +// addAll fills in r with a driverResponse. +func (r *responseDeduper) addAll(dr *driverResponse) { + for _, pkg := range dr.Packages { + r.addPackage(pkg) + } + for _, root := range dr.Roots { + r.addRoot(root) + } + r.dr.GoVersion = dr.GoVersion +} + +func (r *responseDeduper) addPackage(p *Package) { + if r.seenPackages[p.ID] != nil { + return + } + r.seenPackages[p.ID] = p + r.dr.Packages = append(r.dr.Packages, p) +} + +func (r *responseDeduper) addRoot(id string) { + if r.seenRoots[id] { + return + } + r.seenRoots[id] = true + r.dr.Roots = append(r.dr.Roots, id) +} + +type golistState struct { + cfg *Config + ctx context.Context + + envOnce sync.Once + goEnvError error + goEnv map[string]string + + rootsOnce sync.Once + rootDirsError error + rootDirs map[string]string + + goVersionOnce sync.Once + goVersionError error + goVersion int // The X in Go 1.X. + + // vendorDirs caches the (non)existence of vendor directories. + vendorDirs map[string]bool +} + +// getEnv returns Go environment variables. Only specific variables are +// populated -- computing all of them is slow. +func (state *golistState) getEnv() (map[string]string, error) { + state.envOnce.Do(func() { + var b *bytes.Buffer + b, state.goEnvError = state.invokeGo("env", "-json", "GOMOD", "GOPATH") + if state.goEnvError != nil { + return + } + + state.goEnv = make(map[string]string) + decoder := json.NewDecoder(b) + if state.goEnvError = decoder.Decode(&state.goEnv); state.goEnvError != nil { + return + } + }) + return state.goEnv, state.goEnvError +} + +// mustGetEnv is a convenience function that can be used if getEnv has already succeeded. +func (state *golistState) mustGetEnv() map[string]string { + env, err := state.getEnv() + if err != nil { + panic(fmt.Sprintf("mustGetEnv: %v", err)) + } + return env +} + +// goListDriver uses the go list command to interpret the patterns and produce +// the build system package structure. +// See driver for more details. +func goListDriver(cfg *Config, patterns ...string) (*driverResponse, error) { + // Make sure that any asynchronous go commands are killed when we return. + parentCtx := cfg.Context + if parentCtx == nil { + parentCtx = context.Background() + } + ctx, cancel := context.WithCancel(parentCtx) + defer cancel() + + response := newDeduper() + + state := &golistState{ + cfg: cfg, + ctx: ctx, + vendorDirs: map[string]bool{}, + } + + // Fill in response.Sizes asynchronously if necessary. + var sizeserr error + var sizeswg sync.WaitGroup + if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&NeedTypes != 0 { + sizeswg.Add(1) + go func() { + var sizes types.Sizes + sizes, sizeserr = packagesdriver.GetSizesGolist(ctx, state.cfgInvocation(), cfg.gocmdRunner) + // types.SizesFor always returns nil or a *types.StdSizes. + response.dr.Sizes, _ = sizes.(*types.StdSizes) + sizeswg.Done() + }() + } + + // Determine files requested in contains patterns + var containFiles []string + restPatterns := make([]string, 0, len(patterns)) + // Extract file= and other [querytype]= patterns. Report an error if querytype + // doesn't exist. +extractQueries: + for _, pattern := range patterns { + eqidx := strings.Index(pattern, "=") + if eqidx < 0 { + restPatterns = append(restPatterns, pattern) + } else { + query, value := pattern[:eqidx], pattern[eqidx+len("="):] + switch query { + case "file": + containFiles = append(containFiles, value) + case "pattern": + restPatterns = append(restPatterns, value) + case "": // not a reserved query + restPatterns = append(restPatterns, pattern) + default: + for _, rune := range query { + if rune < 'a' || rune > 'z' { // not a reserved query + restPatterns = append(restPatterns, pattern) + continue extractQueries + } + } + // Reject all other patterns containing "=" + return nil, fmt.Errorf("invalid query type %q in query pattern %q", query, pattern) + } + } + } + + // See if we have any patterns to pass through to go list. Zero initial + // patterns also requires a go list call, since it's the equivalent of + // ".". + if len(restPatterns) > 0 || len(patterns) == 0 { + dr, err := state.createDriverResponse(restPatterns...) + if err != nil { + return nil, err + } + response.addAll(dr) + } + + if len(containFiles) != 0 { + if err := state.runContainsQueries(response, containFiles); err != nil { + return nil, err + } + } + + // Only use go/packages' overlay processing if we're using a Go version + // below 1.16. Otherwise, go list handles it. + if goVersion, err := state.getGoVersion(); err == nil && goVersion < 16 { + modifiedPkgs, needPkgs, err := state.processGolistOverlay(response) + if err != nil { + return nil, err + } + + var containsCandidates []string + if len(containFiles) > 0 { + containsCandidates = append(containsCandidates, modifiedPkgs...) + containsCandidates = append(containsCandidates, needPkgs...) + } + if err := state.addNeededOverlayPackages(response, needPkgs); err != nil { + return nil, err + } + // Check candidate packages for containFiles. + if len(containFiles) > 0 { + for _, id := range containsCandidates { + pkg, ok := response.seenPackages[id] + if !ok { + response.addPackage(&Package{ + ID: id, + Errors: []Error{{ + Kind: ListError, + Msg: fmt.Sprintf("package %s expected but not seen", id), + }}, + }) + continue + } + for _, f := range containFiles { + for _, g := range pkg.GoFiles { + if sameFile(f, g) { + response.addRoot(id) + } + } + } + } + } + // Add root for any package that matches a pattern. This applies only to + // packages that are modified by overlays, since they are not added as + // roots automatically. + for _, pattern := range restPatterns { + match := matchPattern(pattern) + for _, pkgID := range modifiedPkgs { + pkg, ok := response.seenPackages[pkgID] + if !ok { + continue + } + if match(pkg.PkgPath) { + response.addRoot(pkg.ID) + } + } + } + } + + sizeswg.Wait() + if sizeserr != nil { + return nil, sizeserr + } + return response.dr, nil +} + +func (state *golistState) addNeededOverlayPackages(response *responseDeduper, pkgs []string) error { + if len(pkgs) == 0 { + return nil + } + dr, err := state.createDriverResponse(pkgs...) + if err != nil { + return err + } + for _, pkg := range dr.Packages { + response.addPackage(pkg) + } + _, needPkgs, err := state.processGolistOverlay(response) + if err != nil { + return err + } + return state.addNeededOverlayPackages(response, needPkgs) +} + +func (state *golistState) runContainsQueries(response *responseDeduper, queries []string) error { + for _, query := range queries { + // TODO(matloob): Do only one query per directory. + fdir := filepath.Dir(query) + // Pass absolute path of directory to go list so that it knows to treat it as a directory, + // not a package path. + pattern, err := filepath.Abs(fdir) + if err != nil { + return fmt.Errorf("could not determine absolute path of file= query path %q: %v", query, err) + } + dirResponse, err := state.createDriverResponse(pattern) + + // If there was an error loading the package, or no packages are returned, + // or the package is returned with errors, try to load the file as an + // ad-hoc package. + // Usually the error will appear in a returned package, but may not if we're + // in module mode and the ad-hoc is located outside a module. + if err != nil || len(dirResponse.Packages) == 0 || len(dirResponse.Packages) == 1 && len(dirResponse.Packages[0].GoFiles) == 0 && + len(dirResponse.Packages[0].Errors) == 1 { + var queryErr error + if dirResponse, queryErr = state.adhocPackage(pattern, query); queryErr != nil { + return err // return the original error + } + } + isRoot := make(map[string]bool, len(dirResponse.Roots)) + for _, root := range dirResponse.Roots { + isRoot[root] = true + } + for _, pkg := range dirResponse.Packages { + // Add any new packages to the main set + // We don't bother to filter packages that will be dropped by the changes of roots, + // that will happen anyway during graph construction outside this function. + // Over-reporting packages is not a problem. + response.addPackage(pkg) + // if the package was not a root one, it cannot have the file + if !isRoot[pkg.ID] { + continue + } + for _, pkgFile := range pkg.GoFiles { + if filepath.Base(query) == filepath.Base(pkgFile) { + response.addRoot(pkg.ID) + break + } + } + } + } + return nil +} + +// adhocPackage attempts to load or construct an ad-hoc package for a given +// query, if the original call to the driver produced inadequate results. +func (state *golistState) adhocPackage(pattern, query string) (*driverResponse, error) { + response, err := state.createDriverResponse(query) + if err != nil { + return nil, err + } + // If we get nothing back from `go list`, + // try to make this file into its own ad-hoc package. + // TODO(rstambler): Should this check against the original response? + if len(response.Packages) == 0 { + response.Packages = append(response.Packages, &Package{ + ID: "command-line-arguments", + PkgPath: query, + GoFiles: []string{query}, + CompiledGoFiles: []string{query}, + Imports: make(map[string]*Package), + }) + response.Roots = append(response.Roots, "command-line-arguments") + } + // Handle special cases. + if len(response.Packages) == 1 { + // golang/go#33482: If this is a file= query for ad-hoc packages where + // the file only exists on an overlay, and exists outside of a module, + // add the file to the package and remove the errors. + if response.Packages[0].ID == "command-line-arguments" || + filepath.ToSlash(response.Packages[0].PkgPath) == filepath.ToSlash(query) { + if len(response.Packages[0].GoFiles) == 0 { + filename := filepath.Join(pattern, filepath.Base(query)) // avoid recomputing abspath + // TODO(matloob): check if the file is outside of a root dir? + for path := range state.cfg.Overlay { + if path == filename { + response.Packages[0].Errors = nil + response.Packages[0].GoFiles = []string{path} + response.Packages[0].CompiledGoFiles = []string{path} + } + } + } + } + } + return response, nil +} + +// Fields must match go list; +// see $GOROOT/src/cmd/go/internal/load/pkg.go. +type jsonPackage struct { + ImportPath string + Dir string + Name string + Export string + GoFiles []string + CompiledGoFiles []string + IgnoredGoFiles []string + IgnoredOtherFiles []string + EmbedPatterns []string + EmbedFiles []string + CFiles []string + CgoFiles []string + CXXFiles []string + MFiles []string + HFiles []string + FFiles []string + SFiles []string + SwigFiles []string + SwigCXXFiles []string + SysoFiles []string + Imports []string + ImportMap map[string]string + Deps []string + Module *Module + TestGoFiles []string + TestImports []string + XTestGoFiles []string + XTestImports []string + ForTest string // q in a "p [q.test]" package, else "" + DepOnly bool + + Error *packagesinternal.PackageError + DepsErrors []*packagesinternal.PackageError +} + +type jsonPackageError struct { + ImportStack []string + Pos string + Err string +} + +func otherFiles(p *jsonPackage) [][]string { + return [][]string{p.CFiles, p.CXXFiles, p.MFiles, p.HFiles, p.FFiles, p.SFiles, p.SwigFiles, p.SwigCXXFiles, p.SysoFiles} +} + +// createDriverResponse uses the "go list" command to expand the pattern +// words and return a response for the specified packages. +func (state *golistState) createDriverResponse(words ...string) (*driverResponse, error) { + // go list uses the following identifiers in ImportPath and Imports: + // + // "p" -- importable package or main (command) + // "q.test" -- q's test executable + // "p [q.test]" -- variant of p as built for q's test executable + // "q_test [q.test]" -- q's external test package + // + // The packages p that are built differently for a test q.test + // are q itself, plus any helpers used by the external test q_test, + // typically including "testing" and all its dependencies. + + // Run "go list" for complete + // information on the specified packages. + goVersion, err := state.getGoVersion() + if err != nil { + return nil, err + } + buf, err := state.invokeGo("list", golistargs(state.cfg, words, goVersion)...) + if err != nil { + return nil, err + } + + seen := make(map[string]*jsonPackage) + pkgs := make(map[string]*Package) + additionalErrors := make(map[string][]Error) + // Decode the JSON and convert it to Package form. + response := &driverResponse{ + GoVersion: goVersion, + } + for dec := json.NewDecoder(buf); dec.More(); { + p := new(jsonPackage) + if err := dec.Decode(p); err != nil { + return nil, fmt.Errorf("JSON decoding failed: %v", err) + } + + if p.ImportPath == "" { + // The documentation for go list says that “[e]rroneous packages will have + // a non-empty ImportPath”. If for some reason it comes back empty, we + // prefer to error out rather than silently discarding data or handing + // back a package without any way to refer to it. + if p.Error != nil { + return nil, Error{ + Pos: p.Error.Pos, + Msg: p.Error.Err, + } + } + return nil, fmt.Errorf("package missing import path: %+v", p) + } + + // Work around https://golang.org/issue/33157: + // go list -e, when given an absolute path, will find the package contained at + // that directory. But when no package exists there, it will return a fake package + // with an error and the ImportPath set to the absolute path provided to go list. + // Try to convert that absolute path to what its package path would be if it's + // contained in a known module or GOPATH entry. This will allow the package to be + // properly "reclaimed" when overlays are processed. + if filepath.IsAbs(p.ImportPath) && p.Error != nil { + pkgPath, ok, err := state.getPkgPath(p.ImportPath) + if err != nil { + return nil, err + } + if ok { + p.ImportPath = pkgPath + } + } + + if old, found := seen[p.ImportPath]; found { + // If one version of the package has an error, and the other doesn't, assume + // that this is a case where go list is reporting a fake dependency variant + // of the imported package: When a package tries to invalidly import another + // package, go list emits a variant of the imported package (with the same + // import path, but with an error on it, and the package will have a + // DepError set on it). An example of when this can happen is for imports of + // main packages: main packages can not be imported, but they may be + // separately matched and listed by another pattern. + // See golang.org/issue/36188 for more details. + + // The plan is that eventually, hopefully in Go 1.15, the error will be + // reported on the importing package rather than the duplicate "fake" + // version of the imported package. Once all supported versions of Go + // have the new behavior this logic can be deleted. + // TODO(matloob): delete the workaround logic once all supported versions of + // Go return the errors on the proper package. + + // There should be exactly one version of a package that doesn't have an + // error. + if old.Error == nil && p.Error == nil { + if !reflect.DeepEqual(p, old) { + return nil, fmt.Errorf("internal error: go list gives conflicting information for package %v", p.ImportPath) + } + continue + } + + // Determine if this package's error needs to be bubbled up. + // This is a hack, and we expect for go list to eventually set the error + // on the package. + if old.Error != nil { + var errkind string + if strings.Contains(old.Error.Err, "not an importable package") { + errkind = "not an importable package" + } else if strings.Contains(old.Error.Err, "use of internal package") && strings.Contains(old.Error.Err, "not allowed") { + errkind = "use of internal package not allowed" + } + if errkind != "" { + if len(old.Error.ImportStack) < 1 { + return nil, fmt.Errorf(`internal error: go list gave a %q error with empty import stack`, errkind) + } + importingPkg := old.Error.ImportStack[len(old.Error.ImportStack)-1] + if importingPkg == old.ImportPath { + // Using an older version of Go which put this package itself on top of import + // stack, instead of the importer. Look for importer in second from top + // position. + if len(old.Error.ImportStack) < 2 { + return nil, fmt.Errorf(`internal error: go list gave a %q error with an import stack without importing package`, errkind) + } + importingPkg = old.Error.ImportStack[len(old.Error.ImportStack)-2] + } + additionalErrors[importingPkg] = append(additionalErrors[importingPkg], Error{ + Pos: old.Error.Pos, + Msg: old.Error.Err, + Kind: ListError, + }) + } + } + + // Make sure that if there's a version of the package without an error, + // that's the one reported to the user. + if old.Error == nil { + continue + } + + // This package will replace the old one at the end of the loop. + } + seen[p.ImportPath] = p + + pkg := &Package{ + Name: p.Name, + ID: p.ImportPath, + GoFiles: absJoin(p.Dir, p.GoFiles, p.CgoFiles), + CompiledGoFiles: absJoin(p.Dir, p.CompiledGoFiles), + OtherFiles: absJoin(p.Dir, otherFiles(p)...), + EmbedFiles: absJoin(p.Dir, p.EmbedFiles), + EmbedPatterns: absJoin(p.Dir, p.EmbedPatterns), + IgnoredFiles: absJoin(p.Dir, p.IgnoredGoFiles, p.IgnoredOtherFiles), + forTest: p.ForTest, + depsErrors: p.DepsErrors, + Module: p.Module, + } + + if (state.cfg.Mode&typecheckCgo) != 0 && len(p.CgoFiles) != 0 { + if len(p.CompiledGoFiles) > len(p.GoFiles) { + // We need the cgo definitions, which are in the first + // CompiledGoFile after the non-cgo ones. This is a hack but there + // isn't currently a better way to find it. We also need the pure + // Go files and unprocessed cgo files, all of which are already + // in pkg.GoFiles. + cgoTypes := p.CompiledGoFiles[len(p.GoFiles)] + pkg.CompiledGoFiles = append([]string{cgoTypes}, pkg.GoFiles...) + } else { + // golang/go#38990: go list silently fails to do cgo processing + pkg.CompiledGoFiles = nil + pkg.Errors = append(pkg.Errors, Error{ + Msg: "go list failed to return CompiledGoFiles. This may indicate failure to perform cgo processing; try building at the command line. See https://golang.org/issue/38990.", + Kind: ListError, + }) + } + } + + // Work around https://golang.org/issue/28749: + // cmd/go puts assembly, C, and C++ files in CompiledGoFiles. + // Remove files from CompiledGoFiles that are non-go files + // (or are not files that look like they are from the cache). + if len(pkg.CompiledGoFiles) > 0 { + out := pkg.CompiledGoFiles[:0] + for _, f := range pkg.CompiledGoFiles { + if ext := filepath.Ext(f); ext != ".go" && ext != "" { // ext == "" means the file is from the cache, so probably cgo-processed file + continue + } + out = append(out, f) + } + pkg.CompiledGoFiles = out + } + + // Extract the PkgPath from the package's ID. + if i := strings.IndexByte(pkg.ID, ' '); i >= 0 { + pkg.PkgPath = pkg.ID[:i] + } else { + pkg.PkgPath = pkg.ID + } + + if pkg.PkgPath == "unsafe" { + pkg.GoFiles = nil // ignore fake unsafe.go file + } + + // Assume go list emits only absolute paths for Dir. + if p.Dir != "" && !filepath.IsAbs(p.Dir) { + log.Fatalf("internal error: go list returned non-absolute Package.Dir: %s", p.Dir) + } + + if p.Export != "" && !filepath.IsAbs(p.Export) { + pkg.ExportFile = filepath.Join(p.Dir, p.Export) + } else { + pkg.ExportFile = p.Export + } + + // imports + // + // Imports contains the IDs of all imported packages. + // ImportsMap records (path, ID) only where they differ. + ids := make(map[string]bool) + for _, id := range p.Imports { + ids[id] = true + } + pkg.Imports = make(map[string]*Package) + for path, id := range p.ImportMap { + pkg.Imports[path] = &Package{ID: id} // non-identity import + delete(ids, id) + } + for id := range ids { + if id == "C" { + continue + } + + pkg.Imports[id] = &Package{ID: id} // identity import + } + if !p.DepOnly { + response.Roots = append(response.Roots, pkg.ID) + } + + // Work around for pre-go.1.11 versions of go list. + // TODO(matloob): they should be handled by the fallback. + // Can we delete this? + if len(pkg.CompiledGoFiles) == 0 { + pkg.CompiledGoFiles = pkg.GoFiles + } + + // Temporary work-around for golang/go#39986. Parse filenames out of + // error messages. This happens if there are unrecoverable syntax + // errors in the source, so we can't match on a specific error message. + if err := p.Error; err != nil && state.shouldAddFilenameFromError(p) { + addFilenameFromPos := func(pos string) bool { + split := strings.Split(pos, ":") + if len(split) < 1 { + return false + } + filename := strings.TrimSpace(split[0]) + if filename == "" { + return false + } + if !filepath.IsAbs(filename) { + filename = filepath.Join(state.cfg.Dir, filename) + } + info, _ := os.Stat(filename) + if info == nil { + return false + } + pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, filename) + pkg.GoFiles = append(pkg.GoFiles, filename) + return true + } + found := addFilenameFromPos(err.Pos) + // In some cases, go list only reports the error position in the + // error text, not the error position. One such case is when the + // file's package name is a keyword (see golang.org/issue/39763). + if !found { + addFilenameFromPos(err.Err) + } + } + + if p.Error != nil { + msg := strings.TrimSpace(p.Error.Err) // Trim to work around golang.org/issue/32363. + // Address golang.org/issue/35964 by appending import stack to error message. + if msg == "import cycle not allowed" && len(p.Error.ImportStack) != 0 { + msg += fmt.Sprintf(": import stack: %v", p.Error.ImportStack) + } + pkg.Errors = append(pkg.Errors, Error{ + Pos: p.Error.Pos, + Msg: msg, + Kind: ListError, + }) + } + + pkgs[pkg.ID] = pkg + } + + for id, errs := range additionalErrors { + if p, ok := pkgs[id]; ok { + p.Errors = append(p.Errors, errs...) + } + } + for _, pkg := range pkgs { + response.Packages = append(response.Packages, pkg) + } + sort.Slice(response.Packages, func(i, j int) bool { return response.Packages[i].ID < response.Packages[j].ID }) + + return response, nil +} + +func (state *golistState) shouldAddFilenameFromError(p *jsonPackage) bool { + if len(p.GoFiles) > 0 || len(p.CompiledGoFiles) > 0 { + return false + } + + goV, err := state.getGoVersion() + if err != nil { + return false + } + + // On Go 1.14 and earlier, only add filenames from errors if the import stack is empty. + // The import stack behaves differently for these versions than newer Go versions. + if goV < 15 { + return len(p.Error.ImportStack) == 0 + } + + // On Go 1.15 and later, only parse filenames out of error if there's no import stack, + // or the current package is at the top of the import stack. This is not guaranteed + // to work perfectly, but should avoid some cases where files in errors don't belong to this + // package. + return len(p.Error.ImportStack) == 0 || p.Error.ImportStack[len(p.Error.ImportStack)-1] == p.ImportPath +} + +// getGoVersion returns the effective minor version of the go command. +func (state *golistState) getGoVersion() (int, error) { + state.goVersionOnce.Do(func() { + state.goVersion, state.goVersionError = gocommand.GoVersion(state.ctx, state.cfgInvocation(), state.cfg.gocmdRunner) + }) + return state.goVersion, state.goVersionError +} + +// getPkgPath finds the package path of a directory if it's relative to a root +// directory. +func (state *golistState) getPkgPath(dir string) (string, bool, error) { + absDir, err := filepath.Abs(dir) + if err != nil { + return "", false, err + } + roots, err := state.determineRootDirs() + if err != nil { + return "", false, err + } + + for rdir, rpath := range roots { + // Make sure that the directory is in the module, + // to avoid creating a path relative to another module. + if !strings.HasPrefix(absDir, rdir) { + continue + } + // TODO(matloob): This doesn't properly handle symlinks. + r, err := filepath.Rel(rdir, dir) + if err != nil { + continue + } + if rpath != "" { + // We choose only one root even though the directory even it can belong in multiple modules + // or GOPATH entries. This is okay because we only need to work with absolute dirs when a + // file is missing from disk, for instance when gopls calls go/packages in an overlay. + // Once the file is saved, gopls, or the next invocation of the tool will get the correct + // result straight from golist. + // TODO(matloob): Implement module tiebreaking? + return path.Join(rpath, filepath.ToSlash(r)), true, nil + } + return filepath.ToSlash(r), true, nil + } + return "", false, nil +} + +// absJoin absolutizes and flattens the lists of files. +func absJoin(dir string, fileses ...[]string) (res []string) { + for _, files := range fileses { + for _, file := range files { + if !filepath.IsAbs(file) { + file = filepath.Join(dir, file) + } + res = append(res, file) + } + } + return res +} + +func jsonFlag(cfg *Config, goVersion int) string { + if goVersion < 19 { + return "-json" + } + var fields []string + added := make(map[string]bool) + addFields := func(fs ...string) { + for _, f := range fs { + if !added[f] { + added[f] = true + fields = append(fields, f) + } + } + } + addFields("Name", "ImportPath", "Error") // These fields are always needed + if cfg.Mode&NeedFiles != 0 || cfg.Mode&NeedTypes != 0 { + addFields("Dir", "GoFiles", "IgnoredGoFiles", "IgnoredOtherFiles", "CFiles", + "CgoFiles", "CXXFiles", "MFiles", "HFiles", "FFiles", "SFiles", + "SwigFiles", "SwigCXXFiles", "SysoFiles") + if cfg.Tests { + addFields("TestGoFiles", "XTestGoFiles") + } + } + if cfg.Mode&NeedTypes != 0 { + // CompiledGoFiles seems to be required for the test case TestCgoNoSyntax, + // even when -compiled isn't passed in. + // TODO(#52435): Should we make the test ask for -compiled, or automatically + // request CompiledGoFiles in certain circumstances? + addFields("Dir", "CompiledGoFiles") + } + if cfg.Mode&NeedCompiledGoFiles != 0 { + addFields("Dir", "CompiledGoFiles", "Export") + } + if cfg.Mode&NeedImports != 0 { + // When imports are requested, DepOnly is used to distinguish between packages + // explicitly requested and transitive imports of those packages. + addFields("DepOnly", "Imports", "ImportMap") + if cfg.Tests { + addFields("TestImports", "XTestImports") + } + } + if cfg.Mode&NeedDeps != 0 { + addFields("DepOnly") + } + if usesExportData(cfg) { + // Request Dir in the unlikely case Export is not absolute. + addFields("Dir", "Export") + } + if cfg.Mode&needInternalForTest != 0 { + addFields("ForTest") + } + if cfg.Mode&needInternalDepsErrors != 0 { + addFields("DepsErrors") + } + if cfg.Mode&NeedModule != 0 { + addFields("Module") + } + if cfg.Mode&NeedEmbedFiles != 0 { + addFields("EmbedFiles") + } + if cfg.Mode&NeedEmbedPatterns != 0 { + addFields("EmbedPatterns") + } + return "-json=" + strings.Join(fields, ",") +} + +func golistargs(cfg *Config, words []string, goVersion int) []string { + const findFlags = NeedImports | NeedTypes | NeedSyntax | NeedTypesInfo + fullargs := []string{ + "-e", jsonFlag(cfg, goVersion), + fmt.Sprintf("-compiled=%t", cfg.Mode&(NeedCompiledGoFiles|NeedSyntax|NeedTypes|NeedTypesInfo|NeedTypesSizes) != 0), + fmt.Sprintf("-test=%t", cfg.Tests), + fmt.Sprintf("-export=%t", usesExportData(cfg)), + fmt.Sprintf("-deps=%t", cfg.Mode&NeedImports != 0), + // go list doesn't let you pass -test and -find together, + // probably because you'd just get the TestMain. + fmt.Sprintf("-find=%t", !cfg.Tests && cfg.Mode&findFlags == 0 && !usesExportData(cfg)), + } + fullargs = append(fullargs, cfg.BuildFlags...) + fullargs = append(fullargs, "--") + fullargs = append(fullargs, words...) + return fullargs +} + +// cfgInvocation returns an Invocation that reflects cfg's settings. +func (state *golistState) cfgInvocation() gocommand.Invocation { + cfg := state.cfg + return gocommand.Invocation{ + BuildFlags: cfg.BuildFlags, + ModFile: cfg.modFile, + ModFlag: cfg.modFlag, + CleanEnv: cfg.Env != nil, + Env: cfg.Env, + Logf: cfg.Logf, + WorkingDir: cfg.Dir, + } +} + +// invokeGo returns the stdout of a go command invocation. +func (state *golistState) invokeGo(verb string, args ...string) (*bytes.Buffer, error) { + cfg := state.cfg + + inv := state.cfgInvocation() + + // For Go versions 1.16 and above, `go list` accepts overlays directly via + // the -overlay flag. Set it, if it's available. + // + // The check for "list" is not necessarily required, but we should avoid + // getting the go version if possible. + if verb == "list" { + goVersion, err := state.getGoVersion() + if err != nil { + return nil, err + } + if goVersion >= 16 { + filename, cleanup, err := state.writeOverlays() + if err != nil { + return nil, err + } + defer cleanup() + inv.Overlay = filename + } + } + inv.Verb = verb + inv.Args = args + gocmdRunner := cfg.gocmdRunner + if gocmdRunner == nil { + gocmdRunner = &gocommand.Runner{} + } + stdout, stderr, friendlyErr, err := gocmdRunner.RunRaw(cfg.Context, inv) + if err != nil { + // Check for 'go' executable not being found. + if ee, ok := err.(*exec.Error); ok && ee.Err == exec.ErrNotFound { + return nil, fmt.Errorf("'go list' driver requires 'go', but %s", exec.ErrNotFound) + } + + exitErr, ok := err.(*exec.ExitError) + if !ok { + // Catastrophic error: + // - context cancellation + return nil, fmt.Errorf("couldn't run 'go': %w", err) + } + + // Old go version? + if strings.Contains(stderr.String(), "flag provided but not defined") { + return nil, goTooOldError{fmt.Errorf("unsupported version of go: %s: %s", exitErr, stderr)} + } + + // Related to #24854 + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "unexpected directory layout") { + return nil, friendlyErr + } + + // Is there an error running the C compiler in cgo? This will be reported in the "Error" field + // and should be suppressed by go list -e. + // + // This condition is not perfect yet because the error message can include other error messages than runtime/cgo. + isPkgPathRune := func(r rune) bool { + // From https://golang.org/ref/spec#Import_declarations: + // Implementation restriction: A compiler may restrict ImportPaths to non-empty strings + // using only characters belonging to Unicode's L, M, N, P, and S general categories + // (the Graphic characters without spaces) and may also exclude the + // characters !"#$%&'()*,:;<=>?[\]^`{|} and the Unicode replacement character U+FFFD. + return unicode.IsOneOf([]*unicode.RangeTable{unicode.L, unicode.M, unicode.N, unicode.P, unicode.S}, r) && + !strings.ContainsRune("!\"#$%&'()*,:;<=>?[\\]^`{|}\uFFFD", r) + } + // golang/go#36770: Handle case where cmd/go prints module download messages before the error. + msg := stderr.String() + for strings.HasPrefix(msg, "go: downloading") { + msg = msg[strings.IndexRune(msg, '\n')+1:] + } + if len(stderr.String()) > 0 && strings.HasPrefix(stderr.String(), "# ") { + msg := msg[len("# "):] + if strings.HasPrefix(strings.TrimLeftFunc(msg, isPkgPathRune), "\n") { + return stdout, nil + } + // Treat pkg-config errors as a special case (golang.org/issue/36770). + if strings.HasPrefix(msg, "pkg-config") { + return stdout, nil + } + } + + // This error only appears in stderr. See golang.org/cl/166398 for a fix in go list to show + // the error in the Err section of stdout in case -e option is provided. + // This fix is provided for backwards compatibility. + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "named files must be .go files") { + output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + strings.Trim(stderr.String(), "\n")) + return bytes.NewBufferString(output), nil + } + + // Similar to the previous error, but currently lacks a fix in Go. + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "named files must all be in one directory") { + output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + strings.Trim(stderr.String(), "\n")) + return bytes.NewBufferString(output), nil + } + + // Backwards compatibility for Go 1.11 because 1.12 and 1.13 put the directory in the ImportPath. + // If the package doesn't exist, put the absolute path of the directory into the error message, + // as Go 1.13 list does. + const noSuchDirectory = "no such directory" + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), noSuchDirectory) { + errstr := stderr.String() + abspath := strings.TrimSpace(errstr[strings.Index(errstr, noSuchDirectory)+len(noSuchDirectory):]) + output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + abspath, strings.Trim(stderr.String(), "\n")) + return bytes.NewBufferString(output), nil + } + + // Workaround for #29280: go list -e has incorrect behavior when an ad-hoc package doesn't exist. + // Note that the error message we look for in this case is different that the one looked for above. + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "no such file or directory") { + output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + strings.Trim(stderr.String(), "\n")) + return bytes.NewBufferString(output), nil + } + + // Workaround for #34273. go list -e with GO111MODULE=on has incorrect behavior when listing a + // directory outside any module. + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "outside available modules") { + output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + // TODO(matloob): command-line-arguments isn't correct here. + "command-line-arguments", strings.Trim(stderr.String(), "\n")) + return bytes.NewBufferString(output), nil + } + + // Another variation of the previous error + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "outside module root") { + output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + // TODO(matloob): command-line-arguments isn't correct here. + "command-line-arguments", strings.Trim(stderr.String(), "\n")) + return bytes.NewBufferString(output), nil + } + + // Workaround for an instance of golang.org/issue/26755: go list -e will return a non-zero exit + // status if there's a dependency on a package that doesn't exist. But it should return + // a zero exit status and set an error on that package. + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "no Go files in") { + // Don't clobber stdout if `go list` actually returned something. + if len(stdout.String()) > 0 { + return stdout, nil + } + // try to extract package name from string + stderrStr := stderr.String() + var importPath string + colon := strings.Index(stderrStr, ":") + if colon > 0 && strings.HasPrefix(stderrStr, "go build ") { + importPath = stderrStr[len("go build "):colon] + } + output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + importPath, strings.Trim(stderrStr, "\n")) + return bytes.NewBufferString(output), nil + } + + // Export mode entails a build. + // If that build fails, errors appear on stderr + // (despite the -e flag) and the Export field is blank. + // Do not fail in that case. + // The same is true if an ad-hoc package given to go list doesn't exist. + // TODO(matloob): Remove these once we can depend on go list to exit with a zero status with -e even when + // packages don't exist or a build fails. + if !usesExportData(cfg) && !containsGoFile(args) { + return nil, friendlyErr + } + } + return stdout, nil +} + +// OverlayJSON is the format overlay files are expected to be in. +// The Replace map maps from overlaid paths to replacement paths: +// the Go command will forward all reads trying to open +// each overlaid path to its replacement path, or consider the overlaid +// path not to exist if the replacement path is empty. +// +// From golang/go#39958. +type OverlayJSON struct { + Replace map[string]string `json:"replace,omitempty"` +} + +// writeOverlays writes out files for go list's -overlay flag, as described +// above. +func (state *golistState) writeOverlays() (filename string, cleanup func(), err error) { + // Do nothing if there are no overlays in the config. + if len(state.cfg.Overlay) == 0 { + return "", func() {}, nil + } + dir, err := ioutil.TempDir("", "gopackages-*") + if err != nil { + return "", nil, err + } + // The caller must clean up this directory, unless this function returns an + // error. + cleanup = func() { + os.RemoveAll(dir) + } + defer func() { + if err != nil { + cleanup() + } + }() + overlays := map[string]string{} + for k, v := range state.cfg.Overlay { + // Create a unique filename for the overlaid files, to avoid + // creating nested directories. + noSeparator := strings.Join(strings.Split(filepath.ToSlash(k), "/"), "") + f, err := ioutil.TempFile(dir, fmt.Sprintf("*-%s", noSeparator)) + if err != nil { + return "", func() {}, err + } + if _, err := f.Write(v); err != nil { + return "", func() {}, err + } + if err := f.Close(); err != nil { + return "", func() {}, err + } + overlays[k] = f.Name() + } + b, err := json.Marshal(OverlayJSON{Replace: overlays}) + if err != nil { + return "", func() {}, err + } + // Write out the overlay file that contains the filepath mappings. + filename = filepath.Join(dir, "overlay.json") + if err := ioutil.WriteFile(filename, b, 0665); err != nil { + return "", func() {}, err + } + return filename, cleanup, nil +} + +func containsGoFile(s []string) bool { + for _, f := range s { + if strings.HasSuffix(f, ".go") { + return true + } + } + return false +} + +func cmdDebugStr(cmd *exec.Cmd) string { + env := make(map[string]string) + for _, kv := range cmd.Env { + split := strings.SplitN(kv, "=", 2) + k, v := split[0], split[1] + env[k] = v + } + + var args []string + for _, arg := range cmd.Args { + quoted := strconv.Quote(arg) + if quoted[1:len(quoted)-1] != arg || strings.Contains(arg, " ") { + args = append(args, quoted) + } else { + args = append(args, arg) + } + } + return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v GOPROXY=%v PWD=%v %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["GOPROXY"], env["PWD"], strings.Join(args, " ")) +} diff --git a/cluster-autoscaler/vendor/golang.org/x/tools/go/packages/golist_overlay.go b/cluster-autoscaler/vendor/golang.org/x/tools/go/packages/golist_overlay.go new file mode 100644 index 000000000000..9576b472f9cc --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/tools/go/packages/golist_overlay.go @@ -0,0 +1,575 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packages + +import ( + "encoding/json" + "fmt" + "go/parser" + "go/token" + "os" + "path/filepath" + "regexp" + "sort" + "strconv" + "strings" + + "golang.org/x/tools/internal/gocommand" +) + +// processGolistOverlay provides rudimentary support for adding +// files that don't exist on disk to an overlay. The results can be +// sometimes incorrect. +// TODO(matloob): Handle unsupported cases, including the following: +// - determining the correct package to add given a new import path +func (state *golistState) processGolistOverlay(response *responseDeduper) (modifiedPkgs, needPkgs []string, err error) { + havePkgs := make(map[string]string) // importPath -> non-test package ID + needPkgsSet := make(map[string]bool) + modifiedPkgsSet := make(map[string]bool) + + pkgOfDir := make(map[string][]*Package) + for _, pkg := range response.dr.Packages { + // This is an approximation of import path to id. This can be + // wrong for tests, vendored packages, and a number of other cases. + havePkgs[pkg.PkgPath] = pkg.ID + dir, err := commonDir(pkg.GoFiles) + if err != nil { + return nil, nil, err + } + if dir != "" { + pkgOfDir[dir] = append(pkgOfDir[dir], pkg) + } + } + + // If no new imports are added, it is safe to avoid loading any needPkgs. + // Otherwise, it's hard to tell which package is actually being loaded + // (due to vendoring) and whether any modified package will show up + // in the transitive set of dependencies (because new imports are added, + // potentially modifying the transitive set of dependencies). + var overlayAddsImports bool + + // If both a package and its test package are created by the overlay, we + // need the real package first. Process all non-test files before test + // files, and make the whole process deterministic while we're at it. + var overlayFiles []string + for opath := range state.cfg.Overlay { + overlayFiles = append(overlayFiles, opath) + } + sort.Slice(overlayFiles, func(i, j int) bool { + iTest := strings.HasSuffix(overlayFiles[i], "_test.go") + jTest := strings.HasSuffix(overlayFiles[j], "_test.go") + if iTest != jTest { + return !iTest // non-tests are before tests. + } + return overlayFiles[i] < overlayFiles[j] + }) + for _, opath := range overlayFiles { + contents := state.cfg.Overlay[opath] + base := filepath.Base(opath) + dir := filepath.Dir(opath) + var pkg *Package // if opath belongs to both a package and its test variant, this will be the test variant + var testVariantOf *Package // if opath is a test file, this is the package it is testing + var fileExists bool + isTestFile := strings.HasSuffix(opath, "_test.go") + pkgName, ok := extractPackageName(opath, contents) + if !ok { + // Don't bother adding a file that doesn't even have a parsable package statement + // to the overlay. + continue + } + // If all the overlay files belong to a different package, change the + // package name to that package. + maybeFixPackageName(pkgName, isTestFile, pkgOfDir[dir]) + nextPackage: + for _, p := range response.dr.Packages { + if pkgName != p.Name && p.ID != "command-line-arguments" { + continue + } + for _, f := range p.GoFiles { + if !sameFile(filepath.Dir(f), dir) { + continue + } + // Make sure to capture information on the package's test variant, if needed. + if isTestFile && !hasTestFiles(p) { + // TODO(matloob): Are there packages other than the 'production' variant + // of a package that this can match? This shouldn't match the test main package + // because the file is generated in another directory. + testVariantOf = p + continue nextPackage + } else if !isTestFile && hasTestFiles(p) { + // We're examining a test variant, but the overlaid file is + // a non-test file. Because the overlay implementation + // (currently) only adds a file to one package, skip this + // package, so that we can add the file to the production + // variant of the package. (https://golang.org/issue/36857 + // tracks handling overlays on both the production and test + // variant of a package). + continue nextPackage + } + if pkg != nil && p != pkg && pkg.PkgPath == p.PkgPath { + // We have already seen the production version of the + // for which p is a test variant. + if hasTestFiles(p) { + testVariantOf = pkg + } + } + pkg = p + if filepath.Base(f) == base { + fileExists = true + } + } + } + // The overlay could have included an entirely new package or an + // ad-hoc package. An ad-hoc package is one that we have manually + // constructed from inadequate `go list` results for a file= query. + // It will have the ID command-line-arguments. + if pkg == nil || pkg.ID == "command-line-arguments" { + // Try to find the module or gopath dir the file is contained in. + // Then for modules, add the module opath to the beginning. + pkgPath, ok, err := state.getPkgPath(dir) + if err != nil { + return nil, nil, err + } + if !ok { + break + } + var forTest string // only set for x tests + isXTest := strings.HasSuffix(pkgName, "_test") + if isXTest { + forTest = pkgPath + pkgPath += "_test" + } + id := pkgPath + if isTestFile { + if isXTest { + id = fmt.Sprintf("%s [%s.test]", pkgPath, forTest) + } else { + id = fmt.Sprintf("%s [%s.test]", pkgPath, pkgPath) + } + } + if pkg != nil { + // TODO(rstambler): We should change the package's path and ID + // here. The only issue is that this messes with the roots. + } else { + // Try to reclaim a package with the same ID, if it exists in the response. + for _, p := range response.dr.Packages { + if reclaimPackage(p, id, opath, contents) { + pkg = p + break + } + } + // Otherwise, create a new package. + if pkg == nil { + pkg = &Package{ + PkgPath: pkgPath, + ID: id, + Name: pkgName, + Imports: make(map[string]*Package), + } + response.addPackage(pkg) + havePkgs[pkg.PkgPath] = id + // Add the production package's sources for a test variant. + if isTestFile && !isXTest && testVariantOf != nil { + pkg.GoFiles = append(pkg.GoFiles, testVariantOf.GoFiles...) + pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, testVariantOf.CompiledGoFiles...) + // Add the package under test and its imports to the test variant. + pkg.forTest = testVariantOf.PkgPath + for k, v := range testVariantOf.Imports { + pkg.Imports[k] = &Package{ID: v.ID} + } + } + if isXTest { + pkg.forTest = forTest + } + } + } + } + if !fileExists { + pkg.GoFiles = append(pkg.GoFiles, opath) + // TODO(matloob): Adding the file to CompiledGoFiles can exhibit the wrong behavior + // if the file will be ignored due to its build tags. + pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, opath) + modifiedPkgsSet[pkg.ID] = true + } + imports, err := extractImports(opath, contents) + if err != nil { + // Let the parser or type checker report errors later. + continue + } + for _, imp := range imports { + // TODO(rstambler): If the package is an x test and the import has + // a test variant, make sure to replace it. + if _, found := pkg.Imports[imp]; found { + continue + } + overlayAddsImports = true + id, ok := havePkgs[imp] + if !ok { + var err error + id, err = state.resolveImport(dir, imp) + if err != nil { + return nil, nil, err + } + } + pkg.Imports[imp] = &Package{ID: id} + // Add dependencies to the non-test variant version of this package as well. + if testVariantOf != nil { + testVariantOf.Imports[imp] = &Package{ID: id} + } + } + } + + // toPkgPath guesses the package path given the id. + toPkgPath := func(sourceDir, id string) (string, error) { + if i := strings.IndexByte(id, ' '); i >= 0 { + return state.resolveImport(sourceDir, id[:i]) + } + return state.resolveImport(sourceDir, id) + } + + // Now that new packages have been created, do another pass to determine + // the new set of missing packages. + for _, pkg := range response.dr.Packages { + for _, imp := range pkg.Imports { + if len(pkg.GoFiles) == 0 { + return nil, nil, fmt.Errorf("cannot resolve imports for package %q with no Go files", pkg.PkgPath) + } + pkgPath, err := toPkgPath(filepath.Dir(pkg.GoFiles[0]), imp.ID) + if err != nil { + return nil, nil, err + } + if _, ok := havePkgs[pkgPath]; !ok { + needPkgsSet[pkgPath] = true + } + } + } + + if overlayAddsImports { + needPkgs = make([]string, 0, len(needPkgsSet)) + for pkg := range needPkgsSet { + needPkgs = append(needPkgs, pkg) + } + } + modifiedPkgs = make([]string, 0, len(modifiedPkgsSet)) + for pkg := range modifiedPkgsSet { + modifiedPkgs = append(modifiedPkgs, pkg) + } + return modifiedPkgs, needPkgs, err +} + +// resolveImport finds the ID of a package given its import path. +// In particular, it will find the right vendored copy when in GOPATH mode. +func (state *golistState) resolveImport(sourceDir, importPath string) (string, error) { + env, err := state.getEnv() + if err != nil { + return "", err + } + if env["GOMOD"] != "" { + return importPath, nil + } + + searchDir := sourceDir + for { + vendorDir := filepath.Join(searchDir, "vendor") + exists, ok := state.vendorDirs[vendorDir] + if !ok { + info, err := os.Stat(vendorDir) + exists = err == nil && info.IsDir() + state.vendorDirs[vendorDir] = exists + } + + if exists { + vendoredPath := filepath.Join(vendorDir, importPath) + if info, err := os.Stat(vendoredPath); err == nil && info.IsDir() { + // We should probably check for .go files here, but shame on anyone who fools us. + path, ok, err := state.getPkgPath(vendoredPath) + if err != nil { + return "", err + } + if ok { + return path, nil + } + } + } + + // We know we've hit the top of the filesystem when we Dir / and get /, + // or C:\ and get C:\, etc. + next := filepath.Dir(searchDir) + if next == searchDir { + break + } + searchDir = next + } + return importPath, nil +} + +func hasTestFiles(p *Package) bool { + for _, f := range p.GoFiles { + if strings.HasSuffix(f, "_test.go") { + return true + } + } + return false +} + +// determineRootDirs returns a mapping from absolute directories that could +// contain code to their corresponding import path prefixes. +func (state *golistState) determineRootDirs() (map[string]string, error) { + env, err := state.getEnv() + if err != nil { + return nil, err + } + if env["GOMOD"] != "" { + state.rootsOnce.Do(func() { + state.rootDirs, state.rootDirsError = state.determineRootDirsModules() + }) + } else { + state.rootsOnce.Do(func() { + state.rootDirs, state.rootDirsError = state.determineRootDirsGOPATH() + }) + } + return state.rootDirs, state.rootDirsError +} + +func (state *golistState) determineRootDirsModules() (map[string]string, error) { + // List all of the modules--the first will be the directory for the main + // module. Any replaced modules will also need to be treated as roots. + // Editing files in the module cache isn't a great idea, so we don't + // plan to ever support that. + out, err := state.invokeGo("list", "-m", "-json", "all") + if err != nil { + // 'go list all' will fail if we're outside of a module and + // GO111MODULE=on. Try falling back without 'all'. + var innerErr error + out, innerErr = state.invokeGo("list", "-m", "-json") + if innerErr != nil { + return nil, err + } + } + roots := map[string]string{} + modules := map[string]string{} + var i int + for dec := json.NewDecoder(out); dec.More(); { + mod := new(gocommand.ModuleJSON) + if err := dec.Decode(mod); err != nil { + return nil, err + } + if mod.Dir != "" && mod.Path != "" { + // This is a valid module; add it to the map. + absDir, err := filepath.Abs(mod.Dir) + if err != nil { + return nil, err + } + modules[absDir] = mod.Path + // The first result is the main module. + if i == 0 || mod.Replace != nil && mod.Replace.Path != "" { + roots[absDir] = mod.Path + } + } + i++ + } + return roots, nil +} + +func (state *golistState) determineRootDirsGOPATH() (map[string]string, error) { + m := map[string]string{} + for _, dir := range filepath.SplitList(state.mustGetEnv()["GOPATH"]) { + absDir, err := filepath.Abs(dir) + if err != nil { + return nil, err + } + m[filepath.Join(absDir, "src")] = "" + } + return m, nil +} + +func extractImports(filename string, contents []byte) ([]string, error) { + f, err := parser.ParseFile(token.NewFileSet(), filename, contents, parser.ImportsOnly) // TODO(matloob): reuse fileset? + if err != nil { + return nil, err + } + var res []string + for _, imp := range f.Imports { + quotedPath := imp.Path.Value + path, err := strconv.Unquote(quotedPath) + if err != nil { + return nil, err + } + res = append(res, path) + } + return res, nil +} + +// reclaimPackage attempts to reuse a package that failed to load in an overlay. +// +// If the package has errors and has no Name, GoFiles, or Imports, +// then it's possible that it doesn't yet exist on disk. +func reclaimPackage(pkg *Package, id string, filename string, contents []byte) bool { + // TODO(rstambler): Check the message of the actual error? + // It differs between $GOPATH and module mode. + if pkg.ID != id { + return false + } + if len(pkg.Errors) != 1 { + return false + } + if pkg.Name != "" || pkg.ExportFile != "" { + return false + } + if len(pkg.GoFiles) > 0 || len(pkg.CompiledGoFiles) > 0 || len(pkg.OtherFiles) > 0 { + return false + } + if len(pkg.Imports) > 0 { + return false + } + pkgName, ok := extractPackageName(filename, contents) + if !ok { + return false + } + pkg.Name = pkgName + pkg.Errors = nil + return true +} + +func extractPackageName(filename string, contents []byte) (string, bool) { + // TODO(rstambler): Check the message of the actual error? + // It differs between $GOPATH and module mode. + f, err := parser.ParseFile(token.NewFileSet(), filename, contents, parser.PackageClauseOnly) // TODO(matloob): reuse fileset? + if err != nil { + return "", false + } + return f.Name.Name, true +} + +// commonDir returns the directory that all files are in, "" if files is empty, +// or an error if they aren't in the same directory. +func commonDir(files []string) (string, error) { + seen := make(map[string]bool) + for _, f := range files { + seen[filepath.Dir(f)] = true + } + if len(seen) > 1 { + return "", fmt.Errorf("files (%v) are in more than one directory: %v", files, seen) + } + for k := range seen { + // seen has only one element; return it. + return k, nil + } + return "", nil // no files +} + +// It is possible that the files in the disk directory dir have a different package +// name from newName, which is deduced from the overlays. If they all have a different +// package name, and they all have the same package name, then that name becomes +// the package name. +// It returns true if it changes the package name, false otherwise. +func maybeFixPackageName(newName string, isTestFile bool, pkgsOfDir []*Package) { + names := make(map[string]int) + for _, p := range pkgsOfDir { + names[p.Name]++ + } + if len(names) != 1 { + // some files are in different packages + return + } + var oldName string + for k := range names { + oldName = k + } + if newName == oldName { + return + } + // We might have a case where all of the package names in the directory are + // the same, but the overlay file is for an x test, which belongs to its + // own package. If the x test does not yet exist on disk, we may not yet + // have its package name on disk, but we should not rename the packages. + // + // We use a heuristic to determine if this file belongs to an x test: + // The test file should have a package name whose package name has a _test + // suffix or looks like "newName_test". + maybeXTest := strings.HasPrefix(oldName+"_test", newName) || strings.HasSuffix(newName, "_test") + if isTestFile && maybeXTest { + return + } + for _, p := range pkgsOfDir { + p.Name = newName + } +} + +// This function is copy-pasted from +// https://github.com/golang/go/blob/9706f510a5e2754595d716bd64be8375997311fb/src/cmd/go/internal/search/search.go#L360. +// It should be deleted when we remove support for overlays from go/packages. +// +// NOTE: This does not handle any ./... or ./ style queries, as this function +// doesn't know the working directory. +// +// matchPattern(pattern)(name) reports whether +// name matches pattern. Pattern is a limited glob +// pattern in which '...' means 'any string' and there +// is no other special syntax. +// Unfortunately, there are two special cases. Quoting "go help packages": +// +// First, /... at the end of the pattern can match an empty string, +// so that net/... matches both net and packages in its subdirectories, like net/http. +// Second, any slash-separated pattern element containing a wildcard never +// participates in a match of the "vendor" element in the path of a vendored +// package, so that ./... does not match packages in subdirectories of +// ./vendor or ./mycode/vendor, but ./vendor/... and ./mycode/vendor/... do. +// Note, however, that a directory named vendor that itself contains code +// is not a vendored package: cmd/vendor would be a command named vendor, +// and the pattern cmd/... matches it. +func matchPattern(pattern string) func(name string) bool { + // Convert pattern to regular expression. + // The strategy for the trailing /... is to nest it in an explicit ? expression. + // The strategy for the vendor exclusion is to change the unmatchable + // vendor strings to a disallowed code point (vendorChar) and to use + // "(anything but that codepoint)*" as the implementation of the ... wildcard. + // This is a bit complicated but the obvious alternative, + // namely a hand-written search like in most shell glob matchers, + // is too easy to make accidentally exponential. + // Using package regexp guarantees linear-time matching. + + const vendorChar = "\x00" + + if strings.Contains(pattern, vendorChar) { + return func(name string) bool { return false } + } + + re := regexp.QuoteMeta(pattern) + re = replaceVendor(re, vendorChar) + switch { + case strings.HasSuffix(re, `/`+vendorChar+`/\.\.\.`): + re = strings.TrimSuffix(re, `/`+vendorChar+`/\.\.\.`) + `(/vendor|/` + vendorChar + `/\.\.\.)` + case re == vendorChar+`/\.\.\.`: + re = `(/vendor|/` + vendorChar + `/\.\.\.)` + case strings.HasSuffix(re, `/\.\.\.`): + re = strings.TrimSuffix(re, `/\.\.\.`) + `(/\.\.\.)?` + } + re = strings.ReplaceAll(re, `\.\.\.`, `[^`+vendorChar+`]*`) + + reg := regexp.MustCompile(`^` + re + `$`) + + return func(name string) bool { + if strings.Contains(name, vendorChar) { + return false + } + return reg.MatchString(replaceVendor(name, vendorChar)) + } +} + +// replaceVendor returns the result of replacing +// non-trailing vendor path elements in x with repl. +func replaceVendor(x, repl string) string { + if !strings.Contains(x, "vendor") { + return x + } + elem := strings.Split(x, "/") + for i := 0; i < len(elem)-1; i++ { + if elem[i] == "vendor" { + elem[i] = repl + } + } + return strings.Join(elem, "/") +} diff --git a/cluster-autoscaler/vendor/golang.org/x/tools/go/packages/loadmode_string.go b/cluster-autoscaler/vendor/golang.org/x/tools/go/packages/loadmode_string.go new file mode 100644 index 000000000000..5c080d21b54a --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/tools/go/packages/loadmode_string.go @@ -0,0 +1,57 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packages + +import ( + "fmt" + "strings" +) + +var allModes = []LoadMode{ + NeedName, + NeedFiles, + NeedCompiledGoFiles, + NeedImports, + NeedDeps, + NeedExportFile, + NeedTypes, + NeedSyntax, + NeedTypesInfo, + NeedTypesSizes, +} + +var modeStrings = []string{ + "NeedName", + "NeedFiles", + "NeedCompiledGoFiles", + "NeedImports", + "NeedDeps", + "NeedExportFile", + "NeedTypes", + "NeedSyntax", + "NeedTypesInfo", + "NeedTypesSizes", +} + +func (mod LoadMode) String() string { + m := mod + if m == 0 { + return "LoadMode(0)" + } + var out []string + for i, x := range allModes { + if x > m { + break + } + if (m & x) != 0 { + out = append(out, modeStrings[i]) + m = m ^ x + } + } + if m != 0 { + out = append(out, "Unknown") + } + return fmt.Sprintf("LoadMode(%s)", strings.Join(out, "|")) +} diff --git a/cluster-autoscaler/vendor/golang.org/x/tools/go/packages/packages.go b/cluster-autoscaler/vendor/golang.org/x/tools/go/packages/packages.go new file mode 100644 index 000000000000..0f1505b808a6 --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/tools/go/packages/packages.go @@ -0,0 +1,1326 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packages + +// See doc.go for package documentation and implementation notes. + +import ( + "context" + "encoding/json" + "fmt" + "go/ast" + "go/parser" + "go/scanner" + "go/token" + "go/types" + "io" + "io/ioutil" + "log" + "os" + "path/filepath" + "runtime" + "strings" + "sync" + "time" + + "golang.org/x/tools/go/gcexportdata" + "golang.org/x/tools/internal/gocommand" + "golang.org/x/tools/internal/packagesinternal" + "golang.org/x/tools/internal/typeparams" + "golang.org/x/tools/internal/typesinternal" +) + +// A LoadMode controls the amount of detail to return when loading. +// The bits below can be combined to specify which fields should be +// filled in the result packages. +// The zero value is a special case, equivalent to combining +// the NeedName, NeedFiles, and NeedCompiledGoFiles bits. +// ID and Errors (if present) will always be filled. +// Load may return more information than requested. +type LoadMode int + +const ( + // NeedName adds Name and PkgPath. + NeedName LoadMode = 1 << iota + + // NeedFiles adds GoFiles and OtherFiles. + NeedFiles + + // NeedCompiledGoFiles adds CompiledGoFiles. + NeedCompiledGoFiles + + // NeedImports adds Imports. If NeedDeps is not set, the Imports field will contain + // "placeholder" Packages with only the ID set. + NeedImports + + // NeedDeps adds the fields requested by the LoadMode in the packages in Imports. + NeedDeps + + // NeedExportFile adds ExportFile. + NeedExportFile + + // NeedTypes adds Types, Fset, and IllTyped. + NeedTypes + + // NeedSyntax adds Syntax. + NeedSyntax + + // NeedTypesInfo adds TypesInfo. + NeedTypesInfo + + // NeedTypesSizes adds TypesSizes. + NeedTypesSizes + + // needInternalDepsErrors adds the internal deps errors field for use by gopls. + needInternalDepsErrors + + // needInternalForTest adds the internal forTest field. + // Tests must also be set on the context for this field to be populated. + needInternalForTest + + // typecheckCgo enables full support for type checking cgo. Requires Go 1.15+. + // Modifies CompiledGoFiles and Types, and has no effect on its own. + typecheckCgo + + // NeedModule adds Module. + NeedModule + + // NeedEmbedFiles adds EmbedFiles. + NeedEmbedFiles + + // NeedEmbedPatterns adds EmbedPatterns. + NeedEmbedPatterns +) + +const ( + // Deprecated: LoadFiles exists for historical compatibility + // and should not be used. Please directly specify the needed fields using the Need values. + LoadFiles = NeedName | NeedFiles | NeedCompiledGoFiles + + // Deprecated: LoadImports exists for historical compatibility + // and should not be used. Please directly specify the needed fields using the Need values. + LoadImports = LoadFiles | NeedImports + + // Deprecated: LoadTypes exists for historical compatibility + // and should not be used. Please directly specify the needed fields using the Need values. + LoadTypes = LoadImports | NeedTypes | NeedTypesSizes + + // Deprecated: LoadSyntax exists for historical compatibility + // and should not be used. Please directly specify the needed fields using the Need values. + LoadSyntax = LoadTypes | NeedSyntax | NeedTypesInfo + + // Deprecated: LoadAllSyntax exists for historical compatibility + // and should not be used. Please directly specify the needed fields using the Need values. + LoadAllSyntax = LoadSyntax | NeedDeps + + // Deprecated: NeedExportsFile is a historical misspelling of NeedExportFile. + NeedExportsFile = NeedExportFile +) + +// A Config specifies details about how packages should be loaded. +// The zero value is a valid configuration. +// Calls to Load do not modify this struct. +type Config struct { + // Mode controls the level of information returned for each package. + Mode LoadMode + + // Context specifies the context for the load operation. + // If the context is cancelled, the loader may stop early + // and return an ErrCancelled error. + // If Context is nil, the load cannot be cancelled. + Context context.Context + + // Logf is the logger for the config. + // If the user provides a logger, debug logging is enabled. + // If the GOPACKAGESDEBUG environment variable is set to true, + // but the logger is nil, default to log.Printf. + Logf func(format string, args ...interface{}) + + // Dir is the directory in which to run the build system's query tool + // that provides information about the packages. + // If Dir is empty, the tool is run in the current directory. + Dir string + + // Env is the environment to use when invoking the build system's query tool. + // If Env is nil, the current environment is used. + // As in os/exec's Cmd, only the last value in the slice for + // each environment key is used. To specify the setting of only + // a few variables, append to the current environment, as in: + // + // opt.Env = append(os.Environ(), "GOOS=plan9", "GOARCH=386") + // + Env []string + + // gocmdRunner guards go command calls from concurrency errors. + gocmdRunner *gocommand.Runner + + // BuildFlags is a list of command-line flags to be passed through to + // the build system's query tool. + BuildFlags []string + + // modFile will be used for -modfile in go command invocations. + modFile string + + // modFlag will be used for -modfile in go command invocations. + modFlag string + + // Fset provides source position information for syntax trees and types. + // If Fset is nil, Load will use a new fileset, but preserve Fset's value. + Fset *token.FileSet + + // ParseFile is called to read and parse each file + // when preparing a package's type-checked syntax tree. + // It must be safe to call ParseFile simultaneously from multiple goroutines. + // If ParseFile is nil, the loader will uses parser.ParseFile. + // + // ParseFile should parse the source from src and use filename only for + // recording position information. + // + // An application may supply a custom implementation of ParseFile + // to change the effective file contents or the behavior of the parser, + // or to modify the syntax tree. For example, selectively eliminating + // unwanted function bodies can significantly accelerate type checking. + ParseFile func(fset *token.FileSet, filename string, src []byte) (*ast.File, error) + + // If Tests is set, the loader includes not just the packages + // matching a particular pattern but also any related test packages, + // including test-only variants of the package and the test executable. + // + // For example, when using the go command, loading "fmt" with Tests=true + // returns four packages, with IDs "fmt" (the standard package), + // "fmt [fmt.test]" (the package as compiled for the test), + // "fmt_test" (the test functions from source files in package fmt_test), + // and "fmt.test" (the test binary). + // + // In build systems with explicit names for tests, + // setting Tests may have no effect. + Tests bool + + // Overlay provides a mapping of absolute file paths to file contents. + // If the file with the given path already exists, the parser will use the + // alternative file contents provided by the map. + // + // Overlays provide incomplete support for when a given file doesn't + // already exist on disk. See the package doc above for more details. + Overlay map[string][]byte +} + +// driver is the type for functions that query the build system for the +// packages named by the patterns. +type driver func(cfg *Config, patterns ...string) (*driverResponse, error) + +// driverResponse contains the results for a driver query. +type driverResponse struct { + // NotHandled is returned if the request can't be handled by the current + // driver. If an external driver returns a response with NotHandled, the + // rest of the driverResponse is ignored, and go/packages will fallback + // to the next driver. If go/packages is extended in the future to support + // lists of multiple drivers, go/packages will fall back to the next driver. + NotHandled bool + + // Sizes, if not nil, is the types.Sizes to use when type checking. + Sizes *types.StdSizes + + // Roots is the set of package IDs that make up the root packages. + // We have to encode this separately because when we encode a single package + // we cannot know if it is one of the roots as that requires knowledge of the + // graph it is part of. + Roots []string `json:",omitempty"` + + // Packages is the full set of packages in the graph. + // The packages are not connected into a graph. + // The Imports if populated will be stubs that only have their ID set. + // Imports will be connected and then type and syntax information added in a + // later pass (see refine). + Packages []*Package + + // GoVersion is the minor version number used by the driver + // (e.g. the go command on the PATH) when selecting .go files. + // Zero means unknown. + GoVersion int +} + +// Load loads and returns the Go packages named by the given patterns. +// +// Config specifies loading options; +// nil behaves the same as an empty Config. +// +// Load returns an error if any of the patterns was invalid +// as defined by the underlying build system. +// It may return an empty list of packages without an error, +// for instance for an empty expansion of a valid wildcard. +// Errors associated with a particular package are recorded in the +// corresponding Package's Errors list, and do not cause Load to +// return an error. Clients may need to handle such errors before +// proceeding with further analysis. The PrintErrors function is +// provided for convenient display of all errors. +func Load(cfg *Config, patterns ...string) ([]*Package, error) { + l := newLoader(cfg) + response, err := defaultDriver(&l.Config, patterns...) + if err != nil { + return nil, err + } + l.sizes = response.Sizes + return l.refine(response) +} + +// defaultDriver is a driver that implements go/packages' fallback behavior. +// It will try to request to an external driver, if one exists. If there's +// no external driver, or the driver returns a response with NotHandled set, +// defaultDriver will fall back to the go list driver. +func defaultDriver(cfg *Config, patterns ...string) (*driverResponse, error) { + driver := findExternalDriver(cfg) + if driver == nil { + driver = goListDriver + } + response, err := driver(cfg, patterns...) + if err != nil { + return response, err + } else if response.NotHandled { + return goListDriver(cfg, patterns...) + } + return response, nil +} + +// A Package describes a loaded Go package. +type Package struct { + // ID is a unique identifier for a package, + // in a syntax provided by the underlying build system. + // + // Because the syntax varies based on the build system, + // clients should treat IDs as opaque and not attempt to + // interpret them. + ID string + + // Name is the package name as it appears in the package source code. + Name string + + // PkgPath is the package path as used by the go/types package. + PkgPath string + + // Errors contains any errors encountered querying the metadata + // of the package, or while parsing or type-checking its files. + Errors []Error + + // TypeErrors contains the subset of errors produced during type checking. + TypeErrors []types.Error + + // GoFiles lists the absolute file paths of the package's Go source files. + GoFiles []string + + // CompiledGoFiles lists the absolute file paths of the package's source + // files that are suitable for type checking. + // This may differ from GoFiles if files are processed before compilation. + CompiledGoFiles []string + + // OtherFiles lists the absolute file paths of the package's non-Go source files, + // including assembly, C, C++, Fortran, Objective-C, SWIG, and so on. + OtherFiles []string + + // EmbedFiles lists the absolute file paths of the package's files + // embedded with go:embed. + EmbedFiles []string + + // EmbedPatterns lists the absolute file patterns of the package's + // files embedded with go:embed. + EmbedPatterns []string + + // IgnoredFiles lists source files that are not part of the package + // using the current build configuration but that might be part of + // the package using other build configurations. + IgnoredFiles []string + + // ExportFile is the absolute path to a file containing type + // information for the package as provided by the build system. + ExportFile string + + // Imports maps import paths appearing in the package's Go source files + // to corresponding loaded Packages. + Imports map[string]*Package + + // Types provides type information for the package. + // The NeedTypes LoadMode bit sets this field for packages matching the + // patterns; type information for dependencies may be missing or incomplete, + // unless NeedDeps and NeedImports are also set. + Types *types.Package + + // Fset provides position information for Types, TypesInfo, and Syntax. + // It is set only when Types is set. + Fset *token.FileSet + + // IllTyped indicates whether the package or any dependency contains errors. + // It is set only when Types is set. + IllTyped bool + + // Syntax is the package's syntax trees, for the files listed in CompiledGoFiles. + // + // The NeedSyntax LoadMode bit populates this field for packages matching the patterns. + // If NeedDeps and NeedImports are also set, this field will also be populated + // for dependencies. + // + // Syntax is kept in the same order as CompiledGoFiles, with the caveat that nils are + // removed. If parsing returned nil, Syntax may be shorter than CompiledGoFiles. + Syntax []*ast.File + + // TypesInfo provides type information about the package's syntax trees. + // It is set only when Syntax is set. + TypesInfo *types.Info + + // TypesSizes provides the effective size function for types in TypesInfo. + TypesSizes types.Sizes + + // forTest is the package under test, if any. + forTest string + + // depsErrors is the DepsErrors field from the go list response, if any. + depsErrors []*packagesinternal.PackageError + + // module is the module information for the package if it exists. + Module *Module +} + +// Module provides module information for a package. +type Module struct { + Path string // module path + Version string // module version + Replace *Module // replaced by this module + Time *time.Time // time version was created + Main bool // is this the main module? + Indirect bool // is this module only an indirect dependency of main module? + Dir string // directory holding files for this module, if any + GoMod string // path to go.mod file used when loading this module, if any + GoVersion string // go version used in module + Error *ModuleError // error loading module +} + +// ModuleError holds errors loading a module. +type ModuleError struct { + Err string // the error itself +} + +func init() { + packagesinternal.GetForTest = func(p interface{}) string { + return p.(*Package).forTest + } + packagesinternal.GetDepsErrors = func(p interface{}) []*packagesinternal.PackageError { + return p.(*Package).depsErrors + } + packagesinternal.GetGoCmdRunner = func(config interface{}) *gocommand.Runner { + return config.(*Config).gocmdRunner + } + packagesinternal.SetGoCmdRunner = func(config interface{}, runner *gocommand.Runner) { + config.(*Config).gocmdRunner = runner + } + packagesinternal.SetModFile = func(config interface{}, value string) { + config.(*Config).modFile = value + } + packagesinternal.SetModFlag = func(config interface{}, value string) { + config.(*Config).modFlag = value + } + packagesinternal.TypecheckCgo = int(typecheckCgo) + packagesinternal.DepsErrors = int(needInternalDepsErrors) + packagesinternal.ForTest = int(needInternalForTest) +} + +// An Error describes a problem with a package's metadata, syntax, or types. +type Error struct { + Pos string // "file:line:col" or "file:line" or "" or "-" + Msg string + Kind ErrorKind +} + +// ErrorKind describes the source of the error, allowing the user to +// differentiate between errors generated by the driver, the parser, or the +// type-checker. +type ErrorKind int + +const ( + UnknownError ErrorKind = iota + ListError + ParseError + TypeError +) + +func (err Error) Error() string { + pos := err.Pos + if pos == "" { + pos = "-" // like token.Position{}.String() + } + return pos + ": " + err.Msg +} + +// flatPackage is the JSON form of Package +// It drops all the type and syntax fields, and transforms the Imports +// +// TODO(adonovan): identify this struct with Package, effectively +// publishing the JSON protocol. +type flatPackage struct { + ID string + Name string `json:",omitempty"` + PkgPath string `json:",omitempty"` + Errors []Error `json:",omitempty"` + GoFiles []string `json:",omitempty"` + CompiledGoFiles []string `json:",omitempty"` + OtherFiles []string `json:",omitempty"` + EmbedFiles []string `json:",omitempty"` + EmbedPatterns []string `json:",omitempty"` + IgnoredFiles []string `json:",omitempty"` + ExportFile string `json:",omitempty"` + Imports map[string]string `json:",omitempty"` +} + +// MarshalJSON returns the Package in its JSON form. +// For the most part, the structure fields are written out unmodified, and +// the type and syntax fields are skipped. +// The imports are written out as just a map of path to package id. +// The errors are written using a custom type that tries to preserve the +// structure of error types we know about. +// +// This method exists to enable support for additional build systems. It is +// not intended for use by clients of the API and we may change the format. +func (p *Package) MarshalJSON() ([]byte, error) { + flat := &flatPackage{ + ID: p.ID, + Name: p.Name, + PkgPath: p.PkgPath, + Errors: p.Errors, + GoFiles: p.GoFiles, + CompiledGoFiles: p.CompiledGoFiles, + OtherFiles: p.OtherFiles, + EmbedFiles: p.EmbedFiles, + EmbedPatterns: p.EmbedPatterns, + IgnoredFiles: p.IgnoredFiles, + ExportFile: p.ExportFile, + } + if len(p.Imports) > 0 { + flat.Imports = make(map[string]string, len(p.Imports)) + for path, ipkg := range p.Imports { + flat.Imports[path] = ipkg.ID + } + } + return json.Marshal(flat) +} + +// UnmarshalJSON reads in a Package from its JSON format. +// See MarshalJSON for details about the format accepted. +func (p *Package) UnmarshalJSON(b []byte) error { + flat := &flatPackage{} + if err := json.Unmarshal(b, &flat); err != nil { + return err + } + *p = Package{ + ID: flat.ID, + Name: flat.Name, + PkgPath: flat.PkgPath, + Errors: flat.Errors, + GoFiles: flat.GoFiles, + CompiledGoFiles: flat.CompiledGoFiles, + OtherFiles: flat.OtherFiles, + EmbedFiles: flat.EmbedFiles, + EmbedPatterns: flat.EmbedPatterns, + ExportFile: flat.ExportFile, + } + if len(flat.Imports) > 0 { + p.Imports = make(map[string]*Package, len(flat.Imports)) + for path, id := range flat.Imports { + p.Imports[path] = &Package{ID: id} + } + } + return nil +} + +func (p *Package) String() string { return p.ID } + +// loaderPackage augments Package with state used during the loading phase +type loaderPackage struct { + *Package + importErrors map[string]error // maps each bad import to its error + loadOnce sync.Once + color uint8 // for cycle detection + needsrc bool // load from source (Mode >= LoadTypes) + needtypes bool // type information is either requested or depended on + initial bool // package was matched by a pattern + goVersion int // minor version number of go command on PATH +} + +// loader holds the working state of a single call to load. +type loader struct { + pkgs map[string]*loaderPackage + Config + sizes types.Sizes + parseCache map[string]*parseValue + parseCacheMu sync.Mutex + exportMu sync.Mutex // enforces mutual exclusion of exportdata operations + + // Config.Mode contains the implied mode (see impliedLoadMode). + // Implied mode contains all the fields we need the data for. + // In requestedMode there are the actually requested fields. + // We'll zero them out before returning packages to the user. + // This makes it easier for us to get the conditions where + // we need certain modes right. + requestedMode LoadMode +} + +type parseValue struct { + f *ast.File + err error + ready chan struct{} +} + +func newLoader(cfg *Config) *loader { + ld := &loader{ + parseCache: map[string]*parseValue{}, + } + if cfg != nil { + ld.Config = *cfg + // If the user has provided a logger, use it. + ld.Config.Logf = cfg.Logf + } + if ld.Config.Logf == nil { + // If the GOPACKAGESDEBUG environment variable is set to true, + // but the user has not provided a logger, default to log.Printf. + if debug { + ld.Config.Logf = log.Printf + } else { + ld.Config.Logf = func(format string, args ...interface{}) {} + } + } + if ld.Config.Mode == 0 { + ld.Config.Mode = NeedName | NeedFiles | NeedCompiledGoFiles // Preserve zero behavior of Mode for backwards compatibility. + } + if ld.Config.Env == nil { + ld.Config.Env = os.Environ() + } + if ld.Config.gocmdRunner == nil { + ld.Config.gocmdRunner = &gocommand.Runner{} + } + if ld.Context == nil { + ld.Context = context.Background() + } + if ld.Dir == "" { + if dir, err := os.Getwd(); err == nil { + ld.Dir = dir + } + } + + // Save the actually requested fields. We'll zero them out before returning packages to the user. + ld.requestedMode = ld.Mode + ld.Mode = impliedLoadMode(ld.Mode) + + if ld.Mode&NeedTypes != 0 || ld.Mode&NeedSyntax != 0 { + if ld.Fset == nil { + ld.Fset = token.NewFileSet() + } + + // ParseFile is required even in LoadTypes mode + // because we load source if export data is missing. + if ld.ParseFile == nil { + ld.ParseFile = func(fset *token.FileSet, filename string, src []byte) (*ast.File, error) { + const mode = parser.AllErrors | parser.ParseComments + return parser.ParseFile(fset, filename, src, mode) + } + } + } + + return ld +} + +// refine connects the supplied packages into a graph and then adds type and +// and syntax information as requested by the LoadMode. +func (ld *loader) refine(response *driverResponse) ([]*Package, error) { + roots := response.Roots + rootMap := make(map[string]int, len(roots)) + for i, root := range roots { + rootMap[root] = i + } + ld.pkgs = make(map[string]*loaderPackage) + // first pass, fixup and build the map and roots + var initial = make([]*loaderPackage, len(roots)) + for _, pkg := range response.Packages { + rootIndex := -1 + if i, found := rootMap[pkg.ID]; found { + rootIndex = i + } + + // Overlays can invalidate export data. + // TODO(matloob): make this check fine-grained based on dependencies on overlaid files + exportDataInvalid := len(ld.Overlay) > 0 || pkg.ExportFile == "" && pkg.PkgPath != "unsafe" + // This package needs type information if the caller requested types and the package is + // either a root, or it's a non-root and the user requested dependencies ... + needtypes := (ld.Mode&NeedTypes|NeedTypesInfo != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0)) + // This package needs source if the call requested source (or types info, which implies source) + // and the package is either a root, or itas a non- root and the user requested dependencies... + needsrc := ((ld.Mode&(NeedSyntax|NeedTypesInfo) != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0)) || + // ... or if we need types and the exportData is invalid. We fall back to (incompletely) + // typechecking packages from source if they fail to compile. + (ld.Mode&(NeedTypes|NeedTypesInfo) != 0 && exportDataInvalid)) && pkg.PkgPath != "unsafe" + lpkg := &loaderPackage{ + Package: pkg, + needtypes: needtypes, + needsrc: needsrc, + goVersion: response.GoVersion, + } + ld.pkgs[lpkg.ID] = lpkg + if rootIndex >= 0 { + initial[rootIndex] = lpkg + lpkg.initial = true + } + } + for i, root := range roots { + if initial[i] == nil { + return nil, fmt.Errorf("root package %v is missing", root) + } + } + + // Materialize the import graph. + + const ( + white = 0 // new + grey = 1 // in progress + black = 2 // complete + ) + + // visit traverses the import graph, depth-first, + // and materializes the graph as Packages.Imports. + // + // Valid imports are saved in the Packages.Import map. + // Invalid imports (cycles and missing nodes) are saved in the importErrors map. + // Thus, even in the presence of both kinds of errors, the Import graph remains a DAG. + // + // visit returns whether the package needs src or has a transitive + // dependency on a package that does. These are the only packages + // for which we load source code. + var stack []*loaderPackage + var visit func(lpkg *loaderPackage) bool + var srcPkgs []*loaderPackage + visit = func(lpkg *loaderPackage) bool { + switch lpkg.color { + case black: + return lpkg.needsrc + case grey: + panic("internal error: grey node") + } + lpkg.color = grey + stack = append(stack, lpkg) // push + stubs := lpkg.Imports // the structure form has only stubs with the ID in the Imports + // If NeedImports isn't set, the imports fields will all be zeroed out. + if ld.Mode&NeedImports != 0 { + lpkg.Imports = make(map[string]*Package, len(stubs)) + for importPath, ipkg := range stubs { + var importErr error + imp := ld.pkgs[ipkg.ID] + if imp == nil { + // (includes package "C" when DisableCgo) + importErr = fmt.Errorf("missing package: %q", ipkg.ID) + } else if imp.color == grey { + importErr = fmt.Errorf("import cycle: %s", stack) + } + if importErr != nil { + if lpkg.importErrors == nil { + lpkg.importErrors = make(map[string]error) + } + lpkg.importErrors[importPath] = importErr + continue + } + + if visit(imp) { + lpkg.needsrc = true + } + lpkg.Imports[importPath] = imp.Package + } + } + if lpkg.needsrc { + srcPkgs = append(srcPkgs, lpkg) + } + if ld.Mode&NeedTypesSizes != 0 { + lpkg.TypesSizes = ld.sizes + } + stack = stack[:len(stack)-1] // pop + lpkg.color = black + + return lpkg.needsrc + } + + if ld.Mode&NeedImports == 0 { + // We do this to drop the stub import packages that we are not even going to try to resolve. + for _, lpkg := range initial { + lpkg.Imports = nil + } + } else { + // For each initial package, create its import DAG. + for _, lpkg := range initial { + visit(lpkg) + } + } + if ld.Mode&NeedImports != 0 && ld.Mode&NeedTypes != 0 { + for _, lpkg := range srcPkgs { + // Complete type information is required for the + // immediate dependencies of each source package. + for _, ipkg := range lpkg.Imports { + imp := ld.pkgs[ipkg.ID] + imp.needtypes = true + } + } + } + // Load type data and syntax if needed, starting at + // the initial packages (roots of the import DAG). + if ld.Mode&NeedTypes != 0 || ld.Mode&NeedSyntax != 0 { + var wg sync.WaitGroup + for _, lpkg := range initial { + wg.Add(1) + go func(lpkg *loaderPackage) { + ld.loadRecursive(lpkg) + wg.Done() + }(lpkg) + } + wg.Wait() + } + + result := make([]*Package, len(initial)) + for i, lpkg := range initial { + result[i] = lpkg.Package + } + for i := range ld.pkgs { + // Clear all unrequested fields, + // to catch programs that use more than they request. + if ld.requestedMode&NeedName == 0 { + ld.pkgs[i].Name = "" + ld.pkgs[i].PkgPath = "" + } + if ld.requestedMode&NeedFiles == 0 { + ld.pkgs[i].GoFiles = nil + ld.pkgs[i].OtherFiles = nil + ld.pkgs[i].IgnoredFiles = nil + } + if ld.requestedMode&NeedEmbedFiles == 0 { + ld.pkgs[i].EmbedFiles = nil + } + if ld.requestedMode&NeedEmbedPatterns == 0 { + ld.pkgs[i].EmbedPatterns = nil + } + if ld.requestedMode&NeedCompiledGoFiles == 0 { + ld.pkgs[i].CompiledGoFiles = nil + } + if ld.requestedMode&NeedImports == 0 { + ld.pkgs[i].Imports = nil + } + if ld.requestedMode&NeedExportFile == 0 { + ld.pkgs[i].ExportFile = "" + } + if ld.requestedMode&NeedTypes == 0 { + ld.pkgs[i].Types = nil + ld.pkgs[i].Fset = nil + ld.pkgs[i].IllTyped = false + } + if ld.requestedMode&NeedSyntax == 0 { + ld.pkgs[i].Syntax = nil + } + if ld.requestedMode&NeedTypesInfo == 0 { + ld.pkgs[i].TypesInfo = nil + } + if ld.requestedMode&NeedTypesSizes == 0 { + ld.pkgs[i].TypesSizes = nil + } + if ld.requestedMode&NeedModule == 0 { + ld.pkgs[i].Module = nil + } + } + + return result, nil +} + +// loadRecursive loads the specified package and its dependencies, +// recursively, in parallel, in topological order. +// It is atomic and idempotent. +// Precondition: ld.Mode&NeedTypes. +func (ld *loader) loadRecursive(lpkg *loaderPackage) { + lpkg.loadOnce.Do(func() { + // Load the direct dependencies, in parallel. + var wg sync.WaitGroup + for _, ipkg := range lpkg.Imports { + imp := ld.pkgs[ipkg.ID] + wg.Add(1) + go func(imp *loaderPackage) { + ld.loadRecursive(imp) + wg.Done() + }(imp) + } + wg.Wait() + ld.loadPackage(lpkg) + }) +} + +// loadPackage loads the specified package. +// It must be called only once per Package, +// after immediate dependencies are loaded. +// Precondition: ld.Mode & NeedTypes. +func (ld *loader) loadPackage(lpkg *loaderPackage) { + if lpkg.PkgPath == "unsafe" { + // Fill in the blanks to avoid surprises. + lpkg.Types = types.Unsafe + lpkg.Fset = ld.Fset + lpkg.Syntax = []*ast.File{} + lpkg.TypesInfo = new(types.Info) + lpkg.TypesSizes = ld.sizes + return + } + + // Call NewPackage directly with explicit name. + // This avoids skew between golist and go/types when the files' + // package declarations are inconsistent. + lpkg.Types = types.NewPackage(lpkg.PkgPath, lpkg.Name) + lpkg.Fset = ld.Fset + + // Subtle: we populate all Types fields with an empty Package + // before loading export data so that export data processing + // never has to create a types.Package for an indirect dependency, + // which would then require that such created packages be explicitly + // inserted back into the Import graph as a final step after export data loading. + // (Hence this return is after the Types assignment.) + // The Diamond test exercises this case. + if !lpkg.needtypes && !lpkg.needsrc { + return + } + if !lpkg.needsrc { + if err := ld.loadFromExportData(lpkg); err != nil { + lpkg.Errors = append(lpkg.Errors, Error{ + Pos: "-", + Msg: err.Error(), + Kind: UnknownError, // e.g. can't find/open/parse export data + }) + } + return // not a source package, don't get syntax trees + } + + appendError := func(err error) { + // Convert various error types into the one true Error. + var errs []Error + switch err := err.(type) { + case Error: + // from driver + errs = append(errs, err) + + case *os.PathError: + // from parser + errs = append(errs, Error{ + Pos: err.Path + ":1", + Msg: err.Err.Error(), + Kind: ParseError, + }) + + case scanner.ErrorList: + // from parser + for _, err := range err { + errs = append(errs, Error{ + Pos: err.Pos.String(), + Msg: err.Msg, + Kind: ParseError, + }) + } + + case types.Error: + // from type checker + lpkg.TypeErrors = append(lpkg.TypeErrors, err) + errs = append(errs, Error{ + Pos: err.Fset.Position(err.Pos).String(), + Msg: err.Msg, + Kind: TypeError, + }) + + default: + // unexpected impoverished error from parser? + errs = append(errs, Error{ + Pos: "-", + Msg: err.Error(), + Kind: UnknownError, + }) + + // If you see this error message, please file a bug. + log.Printf("internal error: error %q (%T) without position", err, err) + } + + lpkg.Errors = append(lpkg.Errors, errs...) + } + + // If the go command on the PATH is newer than the runtime, + // then the go/{scanner,ast,parser,types} packages from the + // standard library may be unable to process the files + // selected by go list. + // + // There is currently no way to downgrade the effective + // version of the go command (see issue 52078), so we proceed + // with the newer go command but, in case of parse or type + // errors, we emit an additional diagnostic. + // + // See: + // - golang.org/issue/52078 (flag to set release tags) + // - golang.org/issue/50825 (gopls legacy version support) + // - golang.org/issue/55883 (go/packages confusing error) + // + // Should we assert a hard minimum of (currently) go1.16 here? + var runtimeVersion int + if _, err := fmt.Sscanf(runtime.Version(), "go1.%d", &runtimeVersion); err == nil && runtimeVersion < lpkg.goVersion { + defer func() { + if len(lpkg.Errors) > 0 { + appendError(Error{ + Pos: "-", + Msg: fmt.Sprintf("This application uses version go1.%d of the source-processing packages but runs version go1.%d of 'go list'. It may fail to process source files that rely on newer language features. If so, rebuild the application using a newer version of Go.", runtimeVersion, lpkg.goVersion), + Kind: UnknownError, + }) + } + }() + } + + if ld.Config.Mode&NeedTypes != 0 && len(lpkg.CompiledGoFiles) == 0 && lpkg.ExportFile != "" { + // The config requested loading sources and types, but sources are missing. + // Add an error to the package and fall back to loading from export data. + appendError(Error{"-", fmt.Sprintf("sources missing for package %s", lpkg.ID), ParseError}) + _ = ld.loadFromExportData(lpkg) // ignore any secondary errors + + return // can't get syntax trees for this package + } + + files, errs := ld.parseFiles(lpkg.CompiledGoFiles) + for _, err := range errs { + appendError(err) + } + + lpkg.Syntax = files + if ld.Config.Mode&NeedTypes == 0 { + return + } + + lpkg.TypesInfo = &types.Info{ + Types: make(map[ast.Expr]types.TypeAndValue), + Defs: make(map[*ast.Ident]types.Object), + Uses: make(map[*ast.Ident]types.Object), + Implicits: make(map[ast.Node]types.Object), + Scopes: make(map[ast.Node]*types.Scope), + Selections: make(map[*ast.SelectorExpr]*types.Selection), + } + typeparams.InitInstanceInfo(lpkg.TypesInfo) + lpkg.TypesSizes = ld.sizes + + importer := importerFunc(func(path string) (*types.Package, error) { + if path == "unsafe" { + return types.Unsafe, nil + } + + // The imports map is keyed by import path. + ipkg := lpkg.Imports[path] + if ipkg == nil { + if err := lpkg.importErrors[path]; err != nil { + return nil, err + } + // There was skew between the metadata and the + // import declarations, likely due to an edit + // race, or because the ParseFile feature was + // used to supply alternative file contents. + return nil, fmt.Errorf("no metadata for %s", path) + } + + if ipkg.Types != nil && ipkg.Types.Complete() { + return ipkg.Types, nil + } + log.Fatalf("internal error: package %q without types was imported from %q", path, lpkg) + panic("unreachable") + }) + + // type-check + tc := &types.Config{ + Importer: importer, + + // Type-check bodies of functions only in initial packages. + // Example: for import graph A->B->C and initial packages {A,C}, + // we can ignore function bodies in B. + IgnoreFuncBodies: ld.Mode&NeedDeps == 0 && !lpkg.initial, + + Error: appendError, + Sizes: ld.sizes, + } + if (ld.Mode & typecheckCgo) != 0 { + if !typesinternal.SetUsesCgo(tc) { + appendError(Error{ + Msg: "typecheckCgo requires Go 1.15+", + Kind: ListError, + }) + return + } + } + types.NewChecker(tc, ld.Fset, lpkg.Types, lpkg.TypesInfo).Files(lpkg.Syntax) + + lpkg.importErrors = nil // no longer needed + + // If !Cgo, the type-checker uses FakeImportC mode, so + // it doesn't invoke the importer for import "C", + // nor report an error for the import, + // or for any undefined C.f reference. + // We must detect this explicitly and correctly + // mark the package as IllTyped (by reporting an error). + // TODO(adonovan): if these errors are annoying, + // we could just set IllTyped quietly. + if tc.FakeImportC { + outer: + for _, f := range lpkg.Syntax { + for _, imp := range f.Imports { + if imp.Path.Value == `"C"` { + err := types.Error{Fset: ld.Fset, Pos: imp.Pos(), Msg: `import "C" ignored`} + appendError(err) + break outer + } + } + } + } + + // Record accumulated errors. + illTyped := len(lpkg.Errors) > 0 + if !illTyped { + for _, imp := range lpkg.Imports { + if imp.IllTyped { + illTyped = true + break + } + } + } + lpkg.IllTyped = illTyped +} + +// An importFunc is an implementation of the single-method +// types.Importer interface based on a function value. +type importerFunc func(path string) (*types.Package, error) + +func (f importerFunc) Import(path string) (*types.Package, error) { return f(path) } + +// We use a counting semaphore to limit +// the number of parallel I/O calls per process. +var ioLimit = make(chan bool, 20) + +func (ld *loader) parseFile(filename string) (*ast.File, error) { + ld.parseCacheMu.Lock() + v, ok := ld.parseCache[filename] + if ok { + // cache hit + ld.parseCacheMu.Unlock() + <-v.ready + } else { + // cache miss + v = &parseValue{ready: make(chan struct{})} + ld.parseCache[filename] = v + ld.parseCacheMu.Unlock() + + var src []byte + for f, contents := range ld.Config.Overlay { + if sameFile(f, filename) { + src = contents + } + } + var err error + if src == nil { + ioLimit <- true // wait + src, err = ioutil.ReadFile(filename) + <-ioLimit // signal + } + if err != nil { + v.err = err + } else { + v.f, v.err = ld.ParseFile(ld.Fset, filename, src) + } + + close(v.ready) + } + return v.f, v.err +} + +// parseFiles reads and parses the Go source files and returns the ASTs +// of the ones that could be at least partially parsed, along with a +// list of I/O and parse errors encountered. +// +// Because files are scanned in parallel, the token.Pos +// positions of the resulting ast.Files are not ordered. +func (ld *loader) parseFiles(filenames []string) ([]*ast.File, []error) { + var wg sync.WaitGroup + n := len(filenames) + parsed := make([]*ast.File, n) + errors := make([]error, n) + for i, file := range filenames { + if ld.Config.Context.Err() != nil { + parsed[i] = nil + errors[i] = ld.Config.Context.Err() + continue + } + wg.Add(1) + go func(i int, filename string) { + parsed[i], errors[i] = ld.parseFile(filename) + wg.Done() + }(i, file) + } + wg.Wait() + + // Eliminate nils, preserving order. + var o int + for _, f := range parsed { + if f != nil { + parsed[o] = f + o++ + } + } + parsed = parsed[:o] + + o = 0 + for _, err := range errors { + if err != nil { + errors[o] = err + o++ + } + } + errors = errors[:o] + + return parsed, errors +} + +// sameFile returns true if x and y have the same basename and denote +// the same file. +func sameFile(x, y string) bool { + if x == y { + // It could be the case that y doesn't exist. + // For instance, it may be an overlay file that + // hasn't been written to disk. To handle that case + // let x == y through. (We added the exact absolute path + // string to the CompiledGoFiles list, so the unwritten + // overlay case implies x==y.) + return true + } + if strings.EqualFold(filepath.Base(x), filepath.Base(y)) { // (optimisation) + if xi, err := os.Stat(x); err == nil { + if yi, err := os.Stat(y); err == nil { + return os.SameFile(xi, yi) + } + } + } + return false +} + +// loadFromExportData ensures that type information is present for the specified +// package, loading it from an export data file on the first request. +// On success it sets lpkg.Types to a new Package. +func (ld *loader) loadFromExportData(lpkg *loaderPackage) error { + if lpkg.PkgPath == "" { + log.Fatalf("internal error: Package %s has no PkgPath", lpkg) + } + + // Because gcexportdata.Read has the potential to create or + // modify the types.Package for each node in the transitive + // closure of dependencies of lpkg, all exportdata operations + // must be sequential. (Finer-grained locking would require + // changes to the gcexportdata API.) + // + // The exportMu lock guards the lpkg.Types field and the + // types.Package it points to, for each loaderPackage in the graph. + // + // Not all accesses to Package.Pkg need to be protected by exportMu: + // graph ordering ensures that direct dependencies of source + // packages are fully loaded before the importer reads their Pkg field. + ld.exportMu.Lock() + defer ld.exportMu.Unlock() + + if tpkg := lpkg.Types; tpkg != nil && tpkg.Complete() { + return nil // cache hit + } + + lpkg.IllTyped = true // fail safe + + if lpkg.ExportFile == "" { + // Errors while building export data will have been printed to stderr. + return fmt.Errorf("no export data file") + } + f, err := os.Open(lpkg.ExportFile) + if err != nil { + return err + } + defer f.Close() + + // Read gc export data. + // + // We don't currently support gccgo export data because all + // underlying workspaces use the gc toolchain. (Even build + // systems that support gccgo don't use it for workspace + // queries.) + r, err := gcexportdata.NewReader(f) + if err != nil { + return fmt.Errorf("reading %s: %v", lpkg.ExportFile, err) + } + + // Build the view. + // + // The gcexportdata machinery has no concept of package ID. + // It identifies packages by their PkgPath, which although not + // globally unique is unique within the scope of one invocation + // of the linker, type-checker, or gcexportdata. + // + // So, we must build a PkgPath-keyed view of the global + // (conceptually ID-keyed) cache of packages and pass it to + // gcexportdata. The view must contain every existing + // package that might possibly be mentioned by the + // current package---its transitive closure. + // + // In loadPackage, we unconditionally create a types.Package for + // each dependency so that export data loading does not + // create new ones. + // + // TODO(adonovan): it would be simpler and more efficient + // if the export data machinery invoked a callback to + // get-or-create a package instead of a map. + // + view := make(map[string]*types.Package) // view seen by gcexportdata + seen := make(map[*loaderPackage]bool) // all visited packages + var visit func(pkgs map[string]*Package) + visit = func(pkgs map[string]*Package) { + for _, p := range pkgs { + lpkg := ld.pkgs[p.ID] + if !seen[lpkg] { + seen[lpkg] = true + view[lpkg.PkgPath] = lpkg.Types + visit(lpkg.Imports) + } + } + } + visit(lpkg.Imports) + + viewLen := len(view) + 1 // adding the self package + // Parse the export data. + // (May modify incomplete packages in view but not create new ones.) + tpkg, err := gcexportdata.Read(r, ld.Fset, view, lpkg.PkgPath) + if err != nil { + return fmt.Errorf("reading %s: %v", lpkg.ExportFile, err) + } + if _, ok := view["go.shape"]; ok { + // Account for the pseudopackage "go.shape" that gets + // created by generic code. + viewLen++ + } + if viewLen != len(view) { + log.Panicf("golang.org/x/tools/go/packages: unexpected new packages during load of %s", lpkg.PkgPath) + } + + lpkg.Types = tpkg + lpkg.IllTyped = false + return nil +} + +// impliedLoadMode returns loadMode with its dependencies. +func impliedLoadMode(loadMode LoadMode) LoadMode { + if loadMode&(NeedDeps|NeedTypes|NeedTypesInfo) != 0 { + // All these things require knowing the import graph. + loadMode |= NeedImports + } + + return loadMode +} + +func usesExportData(cfg *Config) bool { + return cfg.Mode&NeedExportFile != 0 || cfg.Mode&NeedTypes != 0 && cfg.Mode&NeedDeps == 0 +} + +var _ interface{} = io.Discard // assert build toolchain is go1.16 or later diff --git a/cluster-autoscaler/vendor/golang.org/x/tools/go/packages/visit.go b/cluster-autoscaler/vendor/golang.org/x/tools/go/packages/visit.go new file mode 100644 index 000000000000..a1dcc40b7270 --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/tools/go/packages/visit.go @@ -0,0 +1,59 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packages + +import ( + "fmt" + "os" + "sort" +) + +// Visit visits all the packages in the import graph whose roots are +// pkgs, calling the optional pre function the first time each package +// is encountered (preorder), and the optional post function after a +// package's dependencies have been visited (postorder). +// The boolean result of pre(pkg) determines whether +// the imports of package pkg are visited. +func Visit(pkgs []*Package, pre func(*Package) bool, post func(*Package)) { + seen := make(map[*Package]bool) + var visit func(*Package) + visit = func(pkg *Package) { + if !seen[pkg] { + seen[pkg] = true + + if pre == nil || pre(pkg) { + paths := make([]string, 0, len(pkg.Imports)) + for path := range pkg.Imports { + paths = append(paths, path) + } + sort.Strings(paths) // Imports is a map, this makes visit stable + for _, path := range paths { + visit(pkg.Imports[path]) + } + } + + if post != nil { + post(pkg) + } + } + } + for _, pkg := range pkgs { + visit(pkg) + } +} + +// PrintErrors prints to os.Stderr the accumulated errors of all +// packages in the import graph rooted at pkgs, dependencies first. +// PrintErrors returns the number of errors printed. +func PrintErrors(pkgs []*Package) int { + var n int + Visit(pkgs, nil, func(pkg *Package) { + for _, err := range pkg.Errors { + fmt.Fprintln(os.Stderr, err) + n++ + } + }) + return n +} diff --git a/cluster-autoscaler/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go b/cluster-autoscaler/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go new file mode 100644 index 000000000000..be8f5a867e65 --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go @@ -0,0 +1,762 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package objectpath defines a naming scheme for types.Objects +// (that is, named entities in Go programs) relative to their enclosing +// package. +// +// Type-checker objects are canonical, so they are usually identified by +// their address in memory (a pointer), but a pointer has meaning only +// within one address space. By contrast, objectpath names allow the +// identity of an object to be sent from one program to another, +// establishing a correspondence between types.Object variables that are +// distinct but logically equivalent. +// +// A single object may have multiple paths. In this example, +// +// type A struct{ X int } +// type B A +// +// the field X has two paths due to its membership of both A and B. +// The For(obj) function always returns one of these paths, arbitrarily +// but consistently. +package objectpath + +import ( + "fmt" + "go/types" + "sort" + "strconv" + "strings" + + "golang.org/x/tools/internal/typeparams" + + _ "unsafe" // for go:linkname +) + +// A Path is an opaque name that identifies a types.Object +// relative to its package. Conceptually, the name consists of a +// sequence of destructuring operations applied to the package scope +// to obtain the original object. +// The name does not include the package itself. +type Path string + +// Encoding +// +// An object path is a textual and (with training) human-readable encoding +// of a sequence of destructuring operators, starting from a types.Package. +// The sequences represent a path through the package/object/type graph. +// We classify these operators by their type: +// +// PO package->object Package.Scope.Lookup +// OT object->type Object.Type +// TT type->type Type.{Elem,Key,Params,Results,Underlying} [EKPRU] +// TO type->object Type.{At,Field,Method,Obj} [AFMO] +// +// All valid paths start with a package and end at an object +// and thus may be defined by the regular language: +// +// objectpath = PO (OT TT* TO)* +// +// The concrete encoding follows directly: +// - The only PO operator is Package.Scope.Lookup, which requires an identifier. +// - The only OT operator is Object.Type, +// which we encode as '.' because dot cannot appear in an identifier. +// - The TT operators are encoded as [EKPRUTC]; +// one of these (TypeParam) requires an integer operand, +// which is encoded as a string of decimal digits. +// - The TO operators are encoded as [AFMO]; +// three of these (At,Field,Method) require an integer operand, +// which is encoded as a string of decimal digits. +// These indices are stable across different representations +// of the same package, even source and export data. +// The indices used are implementation specific and may not correspond to +// the argument to the go/types function. +// +// In the example below, +// +// package p +// +// type T interface { +// f() (a string, b struct{ X int }) +// } +// +// field X has the path "T.UM0.RA1.F0", +// representing the following sequence of operations: +// +// p.Lookup("T") T +// .Type().Underlying().Method(0). f +// .Type().Results().At(1) b +// .Type().Field(0) X +// +// The encoding is not maximally compact---every R or P is +// followed by an A, for example---but this simplifies the +// encoder and decoder. +const ( + // object->type operators + opType = '.' // .Type() (Object) + + // type->type operators + opElem = 'E' // .Elem() (Pointer, Slice, Array, Chan, Map) + opKey = 'K' // .Key() (Map) + opParams = 'P' // .Params() (Signature) + opResults = 'R' // .Results() (Signature) + opUnderlying = 'U' // .Underlying() (Named) + opTypeParam = 'T' // .TypeParams.At(i) (Named, Signature) + opConstraint = 'C' // .Constraint() (TypeParam) + + // type->object operators + opAt = 'A' // .At(i) (Tuple) + opField = 'F' // .Field(i) (Struct) + opMethod = 'M' // .Method(i) (Named or Interface; not Struct: "promoted" names are ignored) + opObj = 'O' // .Obj() (Named, TypeParam) +) + +// For returns the path to an object relative to its package, +// or an error if the object is not accessible from the package's Scope. +// +// The For function guarantees to return a path only for the following objects: +// - package-level types +// - exported package-level non-types +// - methods +// - parameter and result variables +// - struct fields +// These objects are sufficient to define the API of their package. +// The objects described by a package's export data are drawn from this set. +// +// For does not return a path for predeclared names, imported package +// names, local names, and unexported package-level names (except +// types). +// +// Example: given this definition, +// +// package p +// +// type T interface { +// f() (a string, b struct{ X int }) +// } +// +// For(X) would return a path that denotes the following sequence of operations: +// +// p.Scope().Lookup("T") (TypeName T) +// .Type().Underlying().Method(0). (method Func f) +// .Type().Results().At(1) (field Var b) +// .Type().Field(0) (field Var X) +// +// where p is the package (*types.Package) to which X belongs. +func For(obj types.Object) (Path, error) { + return newEncoderFor()(obj) +} + +// An encoder amortizes the cost of encoding the paths of multiple objects. +// Nonexported pending approval of proposal 58668. +type encoder struct { + scopeNamesMemo map[*types.Scope][]string // memoization of Scope.Names() + namedMethodsMemo map[*types.Named][]*types.Func // memoization of namedMethods() +} + +// Exposed to gopls via golang.org/x/tools/internal/typesinternal +// pending approval of proposal 58668. +// +//go:linkname newEncoderFor +func newEncoderFor() func(types.Object) (Path, error) { return new(encoder).For } + +func (enc *encoder) For(obj types.Object) (Path, error) { + pkg := obj.Pkg() + + // This table lists the cases of interest. + // + // Object Action + // ------ ------ + // nil reject + // builtin reject + // pkgname reject + // label reject + // var + // package-level accept + // func param/result accept + // local reject + // struct field accept + // const + // package-level accept + // local reject + // func + // package-level accept + // init functions reject + // concrete method accept + // interface method accept + // type + // package-level accept + // local reject + // + // The only accessible package-level objects are members of pkg itself. + // + // The cases are handled in four steps: + // + // 1. reject nil and builtin + // 2. accept package-level objects + // 3. reject obviously invalid objects + // 4. search the API for the path to the param/result/field/method. + + // 1. reference to nil or builtin? + if pkg == nil { + return "", fmt.Errorf("predeclared %s has no path", obj) + } + scope := pkg.Scope() + + // 2. package-level object? + if scope.Lookup(obj.Name()) == obj { + // Only exported objects (and non-exported types) have a path. + // Non-exported types may be referenced by other objects. + if _, ok := obj.(*types.TypeName); !ok && !obj.Exported() { + return "", fmt.Errorf("no path for non-exported %v", obj) + } + return Path(obj.Name()), nil + } + + // 3. Not a package-level object. + // Reject obviously non-viable cases. + switch obj := obj.(type) { + case *types.TypeName: + if _, ok := obj.Type().(*typeparams.TypeParam); !ok { + // With the exception of type parameters, only package-level type names + // have a path. + return "", fmt.Errorf("no path for %v", obj) + } + case *types.Const, // Only package-level constants have a path. + *types.Label, // Labels are function-local. + *types.PkgName: // PkgNames are file-local. + return "", fmt.Errorf("no path for %v", obj) + + case *types.Var: + // Could be: + // - a field (obj.IsField()) + // - a func parameter or result + // - a local var. + // Sadly there is no way to distinguish + // a param/result from a local + // so we must proceed to the find. + + case *types.Func: + // A func, if not package-level, must be a method. + if recv := obj.Type().(*types.Signature).Recv(); recv == nil { + return "", fmt.Errorf("func is not a method: %v", obj) + } + + if path, ok := enc.concreteMethod(obj); ok { + // Fast path for concrete methods that avoids looping over scope. + return path, nil + } + + default: + panic(obj) + } + + // 4. Search the API for the path to the var (field/param/result) or method. + + // First inspect package-level named types. + // In the presence of path aliases, these give + // the best paths because non-types may + // refer to types, but not the reverse. + empty := make([]byte, 0, 48) // initial space + names := enc.scopeNames(scope) + for _, name := range names { + o := scope.Lookup(name) + tname, ok := o.(*types.TypeName) + if !ok { + continue // handle non-types in second pass + } + + path := append(empty, name...) + path = append(path, opType) + + T := o.Type() + + if tname.IsAlias() { + // type alias + if r := find(obj, T, path, nil); r != nil { + return Path(r), nil + } + } else { + if named, _ := T.(*types.Named); named != nil { + if r := findTypeParam(obj, typeparams.ForNamed(named), path, nil); r != nil { + // generic named type + return Path(r), nil + } + } + // defined (named) type + if r := find(obj, T.Underlying(), append(path, opUnderlying), nil); r != nil { + return Path(r), nil + } + } + } + + // Then inspect everything else: + // non-types, and declared methods of defined types. + for _, name := range names { + o := scope.Lookup(name) + path := append(empty, name...) + if _, ok := o.(*types.TypeName); !ok { + if o.Exported() { + // exported non-type (const, var, func) + if r := find(obj, o.Type(), append(path, opType), nil); r != nil { + return Path(r), nil + } + } + continue + } + + // Inspect declared methods of defined types. + if T, ok := o.Type().(*types.Named); ok { + path = append(path, opType) + // Note that method index here is always with respect + // to canonical ordering of methods, regardless of how + // they appear in the underlying type. + for i, m := range enc.namedMethods(T) { + path2 := appendOpArg(path, opMethod, i) + if m == obj { + return Path(path2), nil // found declared method + } + if r := find(obj, m.Type(), append(path2, opType), nil); r != nil { + return Path(r), nil + } + } + } + } + + return "", fmt.Errorf("can't find path for %v in %s", obj, pkg.Path()) +} + +func appendOpArg(path []byte, op byte, arg int) []byte { + path = append(path, op) + path = strconv.AppendInt(path, int64(arg), 10) + return path +} + +// concreteMethod returns the path for meth, which must have a non-nil receiver. +// The second return value indicates success and may be false if the method is +// an interface method or if it is an instantiated method. +// +// This function is just an optimization that avoids the general scope walking +// approach. You are expected to fall back to the general approach if this +// function fails. +func (enc *encoder) concreteMethod(meth *types.Func) (Path, bool) { + // Concrete methods can only be declared on package-scoped named types. For + // that reason we can skip the expensive walk over the package scope: the + // path will always be package -> named type -> method. We can trivially get + // the type name from the receiver, and only have to look over the type's + // methods to find the method index. + // + // Methods on generic types require special consideration, however. Consider + // the following package: + // + // L1: type S[T any] struct{} + // L2: func (recv S[A]) Foo() { recv.Bar() } + // L3: func (recv S[B]) Bar() { } + // L4: type Alias = S[int] + // L5: func _[T any]() { var s S[int]; s.Foo() } + // + // The receivers of methods on generic types are instantiations. L2 and L3 + // instantiate S with the type-parameters A and B, which are scoped to the + // respective methods. L4 and L5 each instantiate S with int. Each of these + // instantiations has its own method set, full of methods (and thus objects) + // with receivers whose types are the respective instantiations. In other + // words, we have + // + // S[A].Foo, S[A].Bar + // S[B].Foo, S[B].Bar + // S[int].Foo, S[int].Bar + // + // We may thus be trying to produce object paths for any of these objects. + // + // S[A].Foo and S[B].Bar are the origin methods, and their paths are S.Foo + // and S.Bar, which are the paths that this function naturally produces. + // + // S[A].Bar, S[B].Foo, and both methods on S[int] are instantiations that + // don't correspond to the origin methods. For S[int], this is significant. + // The most precise object path for S[int].Foo, for example, is Alias.Foo, + // not S.Foo. Our function, however, would produce S.Foo, which would + // resolve to a different object. + // + // For S[A].Bar and S[B].Foo it could be argued that S.Bar and S.Foo are + // still the correct paths, since only the origin methods have meaningful + // paths. But this is likely only true for trivial cases and has edge cases. + // Since this function is only an optimization, we err on the side of giving + // up, deferring to the slower but definitely correct algorithm. Most users + // of objectpath will only be giving us origin methods, anyway, as referring + // to instantiated methods is usually not useful. + + if typeparams.OriginMethod(meth) != meth { + return "", false + } + + recvT := meth.Type().(*types.Signature).Recv().Type() + if ptr, ok := recvT.(*types.Pointer); ok { + recvT = ptr.Elem() + } + + named, ok := recvT.(*types.Named) + if !ok { + return "", false + } + + if types.IsInterface(named) { + // Named interfaces don't have to be package-scoped + // + // TODO(dominikh): opt: if scope.Lookup(name) == named, then we can apply this optimization to interface + // methods, too, I think. + return "", false + } + + // Preallocate space for the name, opType, opMethod, and some digits. + name := named.Obj().Name() + path := make([]byte, 0, len(name)+8) + path = append(path, name...) + path = append(path, opType) + for i, m := range enc.namedMethods(named) { + if m == meth { + path = appendOpArg(path, opMethod, i) + return Path(path), true + } + } + + panic(fmt.Sprintf("couldn't find method %s on type %s", meth, named)) +} + +// find finds obj within type T, returning the path to it, or nil if not found. +// +// The seen map is used to short circuit cycles through type parameters. If +// nil, it will be allocated as necessary. +func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName]bool) []byte { + switch T := T.(type) { + case *types.Basic, *types.Named: + // Named types belonging to pkg were handled already, + // so T must belong to another package. No path. + return nil + case *types.Pointer: + return find(obj, T.Elem(), append(path, opElem), seen) + case *types.Slice: + return find(obj, T.Elem(), append(path, opElem), seen) + case *types.Array: + return find(obj, T.Elem(), append(path, opElem), seen) + case *types.Chan: + return find(obj, T.Elem(), append(path, opElem), seen) + case *types.Map: + if r := find(obj, T.Key(), append(path, opKey), seen); r != nil { + return r + } + return find(obj, T.Elem(), append(path, opElem), seen) + case *types.Signature: + if r := findTypeParam(obj, typeparams.ForSignature(T), path, seen); r != nil { + return r + } + if r := find(obj, T.Params(), append(path, opParams), seen); r != nil { + return r + } + return find(obj, T.Results(), append(path, opResults), seen) + case *types.Struct: + for i := 0; i < T.NumFields(); i++ { + fld := T.Field(i) + path2 := appendOpArg(path, opField, i) + if fld == obj { + return path2 // found field var + } + if r := find(obj, fld.Type(), append(path2, opType), seen); r != nil { + return r + } + } + return nil + case *types.Tuple: + for i := 0; i < T.Len(); i++ { + v := T.At(i) + path2 := appendOpArg(path, opAt, i) + if v == obj { + return path2 // found param/result var + } + if r := find(obj, v.Type(), append(path2, opType), seen); r != nil { + return r + } + } + return nil + case *types.Interface: + for i := 0; i < T.NumMethods(); i++ { + m := T.Method(i) + path2 := appendOpArg(path, opMethod, i) + if m == obj { + return path2 // found interface method + } + if r := find(obj, m.Type(), append(path2, opType), seen); r != nil { + return r + } + } + return nil + case *typeparams.TypeParam: + name := T.Obj() + if name == obj { + return append(path, opObj) + } + if seen[name] { + return nil + } + if seen == nil { + seen = make(map[*types.TypeName]bool) + } + seen[name] = true + if r := find(obj, T.Constraint(), append(path, opConstraint), seen); r != nil { + return r + } + return nil + } + panic(T) +} + +func findTypeParam(obj types.Object, list *typeparams.TypeParamList, path []byte, seen map[*types.TypeName]bool) []byte { + for i := 0; i < list.Len(); i++ { + tparam := list.At(i) + path2 := appendOpArg(path, opTypeParam, i) + if r := find(obj, tparam, path2, seen); r != nil { + return r + } + } + return nil +} + +// Object returns the object denoted by path p within the package pkg. +func Object(pkg *types.Package, p Path) (types.Object, error) { + if p == "" { + return nil, fmt.Errorf("empty path") + } + + pathstr := string(p) + var pkgobj, suffix string + if dot := strings.IndexByte(pathstr, opType); dot < 0 { + pkgobj = pathstr + } else { + pkgobj = pathstr[:dot] + suffix = pathstr[dot:] // suffix starts with "." + } + + obj := pkg.Scope().Lookup(pkgobj) + if obj == nil { + return nil, fmt.Errorf("package %s does not contain %q", pkg.Path(), pkgobj) + } + + // abstraction of *types.{Pointer,Slice,Array,Chan,Map} + type hasElem interface { + Elem() types.Type + } + // abstraction of *types.{Named,Signature} + type hasTypeParams interface { + TypeParams() *typeparams.TypeParamList + } + // abstraction of *types.{Named,TypeParam} + type hasObj interface { + Obj() *types.TypeName + } + + // The loop state is the pair (t, obj), + // exactly one of which is non-nil, initially obj. + // All suffixes start with '.' (the only object->type operation), + // followed by optional type->type operations, + // then a type->object operation. + // The cycle then repeats. + var t types.Type + for suffix != "" { + code := suffix[0] + suffix = suffix[1:] + + // Codes [AFM] have an integer operand. + var index int + switch code { + case opAt, opField, opMethod, opTypeParam: + rest := strings.TrimLeft(suffix, "0123456789") + numerals := suffix[:len(suffix)-len(rest)] + suffix = rest + i, err := strconv.Atoi(numerals) + if err != nil { + return nil, fmt.Errorf("invalid path: bad numeric operand %q for code %q", numerals, code) + } + index = int(i) + case opObj: + // no operand + default: + // The suffix must end with a type->object operation. + if suffix == "" { + return nil, fmt.Errorf("invalid path: ends with %q, want [AFMO]", code) + } + } + + if code == opType { + if t != nil { + return nil, fmt.Errorf("invalid path: unexpected %q in type context", opType) + } + t = obj.Type() + obj = nil + continue + } + + if t == nil { + return nil, fmt.Errorf("invalid path: code %q in object context", code) + } + + // Inv: t != nil, obj == nil + + switch code { + case opElem: + hasElem, ok := t.(hasElem) // Pointer, Slice, Array, Chan, Map + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want pointer, slice, array, chan or map)", code, t, t) + } + t = hasElem.Elem() + + case opKey: + mapType, ok := t.(*types.Map) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want map)", code, t, t) + } + t = mapType.Key() + + case opParams: + sig, ok := t.(*types.Signature) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t) + } + t = sig.Params() + + case opResults: + sig, ok := t.(*types.Signature) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t) + } + t = sig.Results() + + case opUnderlying: + named, ok := t.(*types.Named) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want named)", code, t, t) + } + t = named.Underlying() + + case opTypeParam: + hasTypeParams, ok := t.(hasTypeParams) // Named, Signature + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want named or signature)", code, t, t) + } + tparams := hasTypeParams.TypeParams() + if n := tparams.Len(); index >= n { + return nil, fmt.Errorf("tuple index %d out of range [0-%d)", index, n) + } + t = tparams.At(index) + + case opConstraint: + tparam, ok := t.(*typeparams.TypeParam) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want type parameter)", code, t, t) + } + t = tparam.Constraint() + + case opAt: + tuple, ok := t.(*types.Tuple) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want tuple)", code, t, t) + } + if n := tuple.Len(); index >= n { + return nil, fmt.Errorf("tuple index %d out of range [0-%d)", index, n) + } + obj = tuple.At(index) + t = nil + + case opField: + structType, ok := t.(*types.Struct) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want struct)", code, t, t) + } + if n := structType.NumFields(); index >= n { + return nil, fmt.Errorf("field index %d out of range [0-%d)", index, n) + } + obj = structType.Field(index) + t = nil + + case opMethod: + switch t := t.(type) { + case *types.Interface: + if index >= t.NumMethods() { + return nil, fmt.Errorf("method index %d out of range [0-%d)", index, t.NumMethods()) + } + obj = t.Method(index) // Id-ordered + + case *types.Named: + methods := namedMethods(t) // (unmemoized) + if index >= len(methods) { + return nil, fmt.Errorf("method index %d out of range [0-%d)", index, len(methods)) + } + obj = methods[index] // Id-ordered + + default: + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want interface or named)", code, t, t) + } + t = nil + + case opObj: + hasObj, ok := t.(hasObj) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want named or type param)", code, t, t) + } + obj = hasObj.Obj() + t = nil + + default: + return nil, fmt.Errorf("invalid path: unknown code %q", code) + } + } + + if obj.Pkg() != pkg { + return nil, fmt.Errorf("path denotes %s, which belongs to a different package", obj) + } + + return obj, nil // success +} + +// namedMethods returns the methods of a Named type in ascending Id order. +func namedMethods(named *types.Named) []*types.Func { + methods := make([]*types.Func, named.NumMethods()) + for i := range methods { + methods[i] = named.Method(i) + } + sort.Slice(methods, func(i, j int) bool { + return methods[i].Id() < methods[j].Id() + }) + return methods +} + +// scopeNames is a memoization of scope.Names. Callers must not modify the result. +func (enc *encoder) scopeNames(scope *types.Scope) []string { + m := enc.scopeNamesMemo + if m == nil { + m = make(map[*types.Scope][]string) + enc.scopeNamesMemo = m + } + names, ok := m[scope] + if !ok { + names = scope.Names() // allocates and sorts + m[scope] = names + } + return names +} + +// namedMethods is a memoization of the namedMethods function. Callers must not modify the result. +func (enc *encoder) namedMethods(named *types.Named) []*types.Func { + m := enc.namedMethodsMemo + if m == nil { + m = make(map[*types.Named][]*types.Func) + enc.namedMethodsMemo = m + } + methods, ok := m[named] + if !ok { + methods = namedMethods(named) // allocates and sorts + m[named] = methods + } + return methods + +} diff --git a/cluster-autoscaler/vendor/golang.org/x/tools/internal/event/core/event.go b/cluster-autoscaler/vendor/golang.org/x/tools/internal/event/core/event.go new file mode 100644 index 000000000000..a6cf0e64a4b1 --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/tools/internal/event/core/event.go @@ -0,0 +1,85 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package core provides support for event based telemetry. +package core + +import ( + "fmt" + "time" + + "golang.org/x/tools/internal/event/label" +) + +// Event holds the information about an event of note that occurred. +type Event struct { + at time.Time + + // As events are often on the stack, storing the first few labels directly + // in the event can avoid an allocation at all for the very common cases of + // simple events. + // The length needs to be large enough to cope with the majority of events + // but no so large as to cause undue stack pressure. + // A log message with two values will use 3 labels (one for each value and + // one for the message itself). + + static [3]label.Label // inline storage for the first few labels + dynamic []label.Label // dynamically sized storage for remaining labels +} + +// eventLabelMap implements label.Map for a the labels of an Event. +type eventLabelMap struct { + event Event +} + +func (ev Event) At() time.Time { return ev.at } + +func (ev Event) Format(f fmt.State, r rune) { + if !ev.at.IsZero() { + fmt.Fprint(f, ev.at.Format("2006/01/02 15:04:05 ")) + } + for index := 0; ev.Valid(index); index++ { + if l := ev.Label(index); l.Valid() { + fmt.Fprintf(f, "\n\t%v", l) + } + } +} + +func (ev Event) Valid(index int) bool { + return index >= 0 && index < len(ev.static)+len(ev.dynamic) +} + +func (ev Event) Label(index int) label.Label { + if index < len(ev.static) { + return ev.static[index] + } + return ev.dynamic[index-len(ev.static)] +} + +func (ev Event) Find(key label.Key) label.Label { + for _, l := range ev.static { + if l.Key() == key { + return l + } + } + for _, l := range ev.dynamic { + if l.Key() == key { + return l + } + } + return label.Label{} +} + +func MakeEvent(static [3]label.Label, labels []label.Label) Event { + return Event{ + static: static, + dynamic: labels, + } +} + +// CloneEvent event returns a copy of the event with the time adjusted to at. +func CloneEvent(ev Event, at time.Time) Event { + ev.at = at + return ev +} diff --git a/cluster-autoscaler/vendor/golang.org/x/tools/internal/event/core/export.go b/cluster-autoscaler/vendor/golang.org/x/tools/internal/event/core/export.go new file mode 100644 index 000000000000..05f3a9a5791a --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/tools/internal/event/core/export.go @@ -0,0 +1,70 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package core + +import ( + "context" + "sync/atomic" + "time" + "unsafe" + + "golang.org/x/tools/internal/event/label" +) + +// Exporter is a function that handles events. +// It may return a modified context and event. +type Exporter func(context.Context, Event, label.Map) context.Context + +var ( + exporter unsafe.Pointer +) + +// SetExporter sets the global exporter function that handles all events. +// The exporter is called synchronously from the event call site, so it should +// return quickly so as not to hold up user code. +func SetExporter(e Exporter) { + p := unsafe.Pointer(&e) + if e == nil { + // &e is always valid, and so p is always valid, but for the early abort + // of ProcessEvent to be efficient it needs to make the nil check on the + // pointer without having to dereference it, so we make the nil function + // also a nil pointer + p = nil + } + atomic.StorePointer(&exporter, p) +} + +// deliver is called to deliver an event to the supplied exporter. +// it will fill in the time. +func deliver(ctx context.Context, exporter Exporter, ev Event) context.Context { + // add the current time to the event + ev.at = time.Now() + // hand the event off to the current exporter + return exporter(ctx, ev, ev) +} + +// Export is called to deliver an event to the global exporter if set. +func Export(ctx context.Context, ev Event) context.Context { + // get the global exporter and abort early if there is not one + exporterPtr := (*Exporter)(atomic.LoadPointer(&exporter)) + if exporterPtr == nil { + return ctx + } + return deliver(ctx, *exporterPtr, ev) +} + +// ExportPair is called to deliver a start event to the supplied exporter. +// It also returns a function that will deliver the end event to the same +// exporter. +// It will fill in the time. +func ExportPair(ctx context.Context, begin, end Event) (context.Context, func()) { + // get the global exporter and abort early if there is not one + exporterPtr := (*Exporter)(atomic.LoadPointer(&exporter)) + if exporterPtr == nil { + return ctx, func() {} + } + ctx = deliver(ctx, *exporterPtr, begin) + return ctx, func() { deliver(ctx, *exporterPtr, end) } +} diff --git a/cluster-autoscaler/vendor/golang.org/x/tools/internal/event/core/fast.go b/cluster-autoscaler/vendor/golang.org/x/tools/internal/event/core/fast.go new file mode 100644 index 000000000000..06c1d4615e6b --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/tools/internal/event/core/fast.go @@ -0,0 +1,77 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package core + +import ( + "context" + + "golang.org/x/tools/internal/event/keys" + "golang.org/x/tools/internal/event/label" +) + +// Log1 takes a message and one label delivers a log event to the exporter. +// It is a customized version of Print that is faster and does no allocation. +func Log1(ctx context.Context, message string, t1 label.Label) { + Export(ctx, MakeEvent([3]label.Label{ + keys.Msg.Of(message), + t1, + }, nil)) +} + +// Log2 takes a message and two labels and delivers a log event to the exporter. +// It is a customized version of Print that is faster and does no allocation. +func Log2(ctx context.Context, message string, t1 label.Label, t2 label.Label) { + Export(ctx, MakeEvent([3]label.Label{ + keys.Msg.Of(message), + t1, + t2, + }, nil)) +} + +// Metric1 sends a label event to the exporter with the supplied labels. +func Metric1(ctx context.Context, t1 label.Label) context.Context { + return Export(ctx, MakeEvent([3]label.Label{ + keys.Metric.New(), + t1, + }, nil)) +} + +// Metric2 sends a label event to the exporter with the supplied labels. +func Metric2(ctx context.Context, t1, t2 label.Label) context.Context { + return Export(ctx, MakeEvent([3]label.Label{ + keys.Metric.New(), + t1, + t2, + }, nil)) +} + +// Start1 sends a span start event with the supplied label list to the exporter. +// It also returns a function that will end the span, which should normally be +// deferred. +func Start1(ctx context.Context, name string, t1 label.Label) (context.Context, func()) { + return ExportPair(ctx, + MakeEvent([3]label.Label{ + keys.Start.Of(name), + t1, + }, nil), + MakeEvent([3]label.Label{ + keys.End.New(), + }, nil)) +} + +// Start2 sends a span start event with the supplied label list to the exporter. +// It also returns a function that will end the span, which should normally be +// deferred. +func Start2(ctx context.Context, name string, t1, t2 label.Label) (context.Context, func()) { + return ExportPair(ctx, + MakeEvent([3]label.Label{ + keys.Start.Of(name), + t1, + t2, + }, nil), + MakeEvent([3]label.Label{ + keys.End.New(), + }, nil)) +} diff --git a/cluster-autoscaler/vendor/golang.org/x/tools/internal/event/doc.go b/cluster-autoscaler/vendor/golang.org/x/tools/internal/event/doc.go new file mode 100644 index 000000000000..5dc6e6babedd --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/tools/internal/event/doc.go @@ -0,0 +1,7 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package event provides a set of packages that cover the main +// concepts of telemetry in an implementation agnostic way. +package event diff --git a/cluster-autoscaler/vendor/golang.org/x/tools/internal/event/event.go b/cluster-autoscaler/vendor/golang.org/x/tools/internal/event/event.go new file mode 100644 index 000000000000..4d55e577d1a8 --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/tools/internal/event/event.go @@ -0,0 +1,127 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package event + +import ( + "context" + + "golang.org/x/tools/internal/event/core" + "golang.org/x/tools/internal/event/keys" + "golang.org/x/tools/internal/event/label" +) + +// Exporter is a function that handles events. +// It may return a modified context and event. +type Exporter func(context.Context, core.Event, label.Map) context.Context + +// SetExporter sets the global exporter function that handles all events. +// The exporter is called synchronously from the event call site, so it should +// return quickly so as not to hold up user code. +func SetExporter(e Exporter) { + core.SetExporter(core.Exporter(e)) +} + +// Log takes a message and a label list and combines them into a single event +// before delivering them to the exporter. +func Log(ctx context.Context, message string, labels ...label.Label) { + core.Export(ctx, core.MakeEvent([3]label.Label{ + keys.Msg.Of(message), + }, labels)) +} + +// IsLog returns true if the event was built by the Log function. +// It is intended to be used in exporters to identify the semantics of the +// event when deciding what to do with it. +func IsLog(ev core.Event) bool { + return ev.Label(0).Key() == keys.Msg +} + +// Error takes a message and a label list and combines them into a single event +// before delivering them to the exporter. It captures the error in the +// delivered event. +func Error(ctx context.Context, message string, err error, labels ...label.Label) { + core.Export(ctx, core.MakeEvent([3]label.Label{ + keys.Msg.Of(message), + keys.Err.Of(err), + }, labels)) +} + +// IsError returns true if the event was built by the Error function. +// It is intended to be used in exporters to identify the semantics of the +// event when deciding what to do with it. +func IsError(ev core.Event) bool { + return ev.Label(0).Key() == keys.Msg && + ev.Label(1).Key() == keys.Err +} + +// Metric sends a label event to the exporter with the supplied labels. +func Metric(ctx context.Context, labels ...label.Label) { + core.Export(ctx, core.MakeEvent([3]label.Label{ + keys.Metric.New(), + }, labels)) +} + +// IsMetric returns true if the event was built by the Metric function. +// It is intended to be used in exporters to identify the semantics of the +// event when deciding what to do with it. +func IsMetric(ev core.Event) bool { + return ev.Label(0).Key() == keys.Metric +} + +// Label sends a label event to the exporter with the supplied labels. +func Label(ctx context.Context, labels ...label.Label) context.Context { + return core.Export(ctx, core.MakeEvent([3]label.Label{ + keys.Label.New(), + }, labels)) +} + +// IsLabel returns true if the event was built by the Label function. +// It is intended to be used in exporters to identify the semantics of the +// event when deciding what to do with it. +func IsLabel(ev core.Event) bool { + return ev.Label(0).Key() == keys.Label +} + +// Start sends a span start event with the supplied label list to the exporter. +// It also returns a function that will end the span, which should normally be +// deferred. +func Start(ctx context.Context, name string, labels ...label.Label) (context.Context, func()) { + return core.ExportPair(ctx, + core.MakeEvent([3]label.Label{ + keys.Start.Of(name), + }, labels), + core.MakeEvent([3]label.Label{ + keys.End.New(), + }, nil)) +} + +// IsStart returns true if the event was built by the Start function. +// It is intended to be used in exporters to identify the semantics of the +// event when deciding what to do with it. +func IsStart(ev core.Event) bool { + return ev.Label(0).Key() == keys.Start +} + +// IsEnd returns true if the event was built by the End function. +// It is intended to be used in exporters to identify the semantics of the +// event when deciding what to do with it. +func IsEnd(ev core.Event) bool { + return ev.Label(0).Key() == keys.End +} + +// Detach returns a context without an associated span. +// This allows the creation of spans that are not children of the current span. +func Detach(ctx context.Context) context.Context { + return core.Export(ctx, core.MakeEvent([3]label.Label{ + keys.Detach.New(), + }, nil)) +} + +// IsDetach returns true if the event was built by the Detach function. +// It is intended to be used in exporters to identify the semantics of the +// event when deciding what to do with it. +func IsDetach(ev core.Event) bool { + return ev.Label(0).Key() == keys.Detach +} diff --git a/cluster-autoscaler/vendor/golang.org/x/tools/internal/event/keys/keys.go b/cluster-autoscaler/vendor/golang.org/x/tools/internal/event/keys/keys.go new file mode 100644 index 000000000000..a02206e30150 --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/tools/internal/event/keys/keys.go @@ -0,0 +1,564 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package keys + +import ( + "fmt" + "io" + "math" + "strconv" + + "golang.org/x/tools/internal/event/label" +) + +// Value represents a key for untyped values. +type Value struct { + name string + description string +} + +// New creates a new Key for untyped values. +func New(name, description string) *Value { + return &Value{name: name, description: description} +} + +func (k *Value) Name() string { return k.name } +func (k *Value) Description() string { return k.description } + +func (k *Value) Format(w io.Writer, buf []byte, l label.Label) { + fmt.Fprint(w, k.From(l)) +} + +// Get can be used to get a label for the key from a label.Map. +func (k *Value) Get(lm label.Map) interface{} { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return nil +} + +// From can be used to get a value from a Label. +func (k *Value) From(t label.Label) interface{} { return t.UnpackValue() } + +// Of creates a new Label with this key and the supplied value. +func (k *Value) Of(value interface{}) label.Label { return label.OfValue(k, value) } + +// Tag represents a key for tagging labels that have no value. +// These are used when the existence of the label is the entire information it +// carries, such as marking events to be of a specific kind, or from a specific +// package. +type Tag struct { + name string + description string +} + +// NewTag creates a new Key for tagging labels. +func NewTag(name, description string) *Tag { + return &Tag{name: name, description: description} +} + +func (k *Tag) Name() string { return k.name } +func (k *Tag) Description() string { return k.description } + +func (k *Tag) Format(w io.Writer, buf []byte, l label.Label) {} + +// New creates a new Label with this key. +func (k *Tag) New() label.Label { return label.OfValue(k, nil) } + +// Int represents a key +type Int struct { + name string + description string +} + +// NewInt creates a new Key for int values. +func NewInt(name, description string) *Int { + return &Int{name: name, description: description} +} + +func (k *Int) Name() string { return k.name } +func (k *Int) Description() string { return k.description } + +func (k *Int) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendInt(buf, int64(k.From(l)), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Int) Of(v int) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *Int) Get(lm label.Map) int { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *Int) From(t label.Label) int { return int(t.Unpack64()) } + +// Int8 represents a key +type Int8 struct { + name string + description string +} + +// NewInt8 creates a new Key for int8 values. +func NewInt8(name, description string) *Int8 { + return &Int8{name: name, description: description} +} + +func (k *Int8) Name() string { return k.name } +func (k *Int8) Description() string { return k.description } + +func (k *Int8) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendInt(buf, int64(k.From(l)), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Int8) Of(v int8) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *Int8) Get(lm label.Map) int8 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *Int8) From(t label.Label) int8 { return int8(t.Unpack64()) } + +// Int16 represents a key +type Int16 struct { + name string + description string +} + +// NewInt16 creates a new Key for int16 values. +func NewInt16(name, description string) *Int16 { + return &Int16{name: name, description: description} +} + +func (k *Int16) Name() string { return k.name } +func (k *Int16) Description() string { return k.description } + +func (k *Int16) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendInt(buf, int64(k.From(l)), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Int16) Of(v int16) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *Int16) Get(lm label.Map) int16 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *Int16) From(t label.Label) int16 { return int16(t.Unpack64()) } + +// Int32 represents a key +type Int32 struct { + name string + description string +} + +// NewInt32 creates a new Key for int32 values. +func NewInt32(name, description string) *Int32 { + return &Int32{name: name, description: description} +} + +func (k *Int32) Name() string { return k.name } +func (k *Int32) Description() string { return k.description } + +func (k *Int32) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendInt(buf, int64(k.From(l)), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Int32) Of(v int32) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *Int32) Get(lm label.Map) int32 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *Int32) From(t label.Label) int32 { return int32(t.Unpack64()) } + +// Int64 represents a key +type Int64 struct { + name string + description string +} + +// NewInt64 creates a new Key for int64 values. +func NewInt64(name, description string) *Int64 { + return &Int64{name: name, description: description} +} + +func (k *Int64) Name() string { return k.name } +func (k *Int64) Description() string { return k.description } + +func (k *Int64) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendInt(buf, k.From(l), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Int64) Of(v int64) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *Int64) Get(lm label.Map) int64 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *Int64) From(t label.Label) int64 { return int64(t.Unpack64()) } + +// UInt represents a key +type UInt struct { + name string + description string +} + +// NewUInt creates a new Key for uint values. +func NewUInt(name, description string) *UInt { + return &UInt{name: name, description: description} +} + +func (k *UInt) Name() string { return k.name } +func (k *UInt) Description() string { return k.description } + +func (k *UInt) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendUint(buf, uint64(k.From(l)), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *UInt) Of(v uint) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *UInt) Get(lm label.Map) uint { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *UInt) From(t label.Label) uint { return uint(t.Unpack64()) } + +// UInt8 represents a key +type UInt8 struct { + name string + description string +} + +// NewUInt8 creates a new Key for uint8 values. +func NewUInt8(name, description string) *UInt8 { + return &UInt8{name: name, description: description} +} + +func (k *UInt8) Name() string { return k.name } +func (k *UInt8) Description() string { return k.description } + +func (k *UInt8) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendUint(buf, uint64(k.From(l)), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *UInt8) Of(v uint8) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *UInt8) Get(lm label.Map) uint8 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *UInt8) From(t label.Label) uint8 { return uint8(t.Unpack64()) } + +// UInt16 represents a key +type UInt16 struct { + name string + description string +} + +// NewUInt16 creates a new Key for uint16 values. +func NewUInt16(name, description string) *UInt16 { + return &UInt16{name: name, description: description} +} + +func (k *UInt16) Name() string { return k.name } +func (k *UInt16) Description() string { return k.description } + +func (k *UInt16) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendUint(buf, uint64(k.From(l)), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *UInt16) Of(v uint16) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *UInt16) Get(lm label.Map) uint16 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *UInt16) From(t label.Label) uint16 { return uint16(t.Unpack64()) } + +// UInt32 represents a key +type UInt32 struct { + name string + description string +} + +// NewUInt32 creates a new Key for uint32 values. +func NewUInt32(name, description string) *UInt32 { + return &UInt32{name: name, description: description} +} + +func (k *UInt32) Name() string { return k.name } +func (k *UInt32) Description() string { return k.description } + +func (k *UInt32) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendUint(buf, uint64(k.From(l)), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *UInt32) Of(v uint32) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *UInt32) Get(lm label.Map) uint32 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *UInt32) From(t label.Label) uint32 { return uint32(t.Unpack64()) } + +// UInt64 represents a key +type UInt64 struct { + name string + description string +} + +// NewUInt64 creates a new Key for uint64 values. +func NewUInt64(name, description string) *UInt64 { + return &UInt64{name: name, description: description} +} + +func (k *UInt64) Name() string { return k.name } +func (k *UInt64) Description() string { return k.description } + +func (k *UInt64) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendUint(buf, k.From(l), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *UInt64) Of(v uint64) label.Label { return label.Of64(k, v) } + +// Get can be used to get a label for the key from a label.Map. +func (k *UInt64) Get(lm label.Map) uint64 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *UInt64) From(t label.Label) uint64 { return t.Unpack64() } + +// Float32 represents a key +type Float32 struct { + name string + description string +} + +// NewFloat32 creates a new Key for float32 values. +func NewFloat32(name, description string) *Float32 { + return &Float32{name: name, description: description} +} + +func (k *Float32) Name() string { return k.name } +func (k *Float32) Description() string { return k.description } + +func (k *Float32) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendFloat(buf, float64(k.From(l)), 'E', -1, 32)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Float32) Of(v float32) label.Label { + return label.Of64(k, uint64(math.Float32bits(v))) +} + +// Get can be used to get a label for the key from a label.Map. +func (k *Float32) Get(lm label.Map) float32 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *Float32) From(t label.Label) float32 { + return math.Float32frombits(uint32(t.Unpack64())) +} + +// Float64 represents a key +type Float64 struct { + name string + description string +} + +// NewFloat64 creates a new Key for int64 values. +func NewFloat64(name, description string) *Float64 { + return &Float64{name: name, description: description} +} + +func (k *Float64) Name() string { return k.name } +func (k *Float64) Description() string { return k.description } + +func (k *Float64) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendFloat(buf, k.From(l), 'E', -1, 64)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Float64) Of(v float64) label.Label { + return label.Of64(k, math.Float64bits(v)) +} + +// Get can be used to get a label for the key from a label.Map. +func (k *Float64) Get(lm label.Map) float64 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *Float64) From(t label.Label) float64 { + return math.Float64frombits(t.Unpack64()) +} + +// String represents a key +type String struct { + name string + description string +} + +// NewString creates a new Key for int64 values. +func NewString(name, description string) *String { + return &String{name: name, description: description} +} + +func (k *String) Name() string { return k.name } +func (k *String) Description() string { return k.description } + +func (k *String) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendQuote(buf, k.From(l))) +} + +// Of creates a new Label with this key and the supplied value. +func (k *String) Of(v string) label.Label { return label.OfString(k, v) } + +// Get can be used to get a label for the key from a label.Map. +func (k *String) Get(lm label.Map) string { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return "" +} + +// From can be used to get a value from a Label. +func (k *String) From(t label.Label) string { return t.UnpackString() } + +// Boolean represents a key +type Boolean struct { + name string + description string +} + +// NewBoolean creates a new Key for bool values. +func NewBoolean(name, description string) *Boolean { + return &Boolean{name: name, description: description} +} + +func (k *Boolean) Name() string { return k.name } +func (k *Boolean) Description() string { return k.description } + +func (k *Boolean) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendBool(buf, k.From(l))) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Boolean) Of(v bool) label.Label { + if v { + return label.Of64(k, 1) + } + return label.Of64(k, 0) +} + +// Get can be used to get a label for the key from a label.Map. +func (k *Boolean) Get(lm label.Map) bool { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return false +} + +// From can be used to get a value from a Label. +func (k *Boolean) From(t label.Label) bool { return t.Unpack64() > 0 } + +// Error represents a key +type Error struct { + name string + description string +} + +// NewError creates a new Key for int64 values. +func NewError(name, description string) *Error { + return &Error{name: name, description: description} +} + +func (k *Error) Name() string { return k.name } +func (k *Error) Description() string { return k.description } + +func (k *Error) Format(w io.Writer, buf []byte, l label.Label) { + io.WriteString(w, k.From(l).Error()) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Error) Of(v error) label.Label { return label.OfValue(k, v) } + +// Get can be used to get a label for the key from a label.Map. +func (k *Error) Get(lm label.Map) error { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return nil +} + +// From can be used to get a value from a Label. +func (k *Error) From(t label.Label) error { + err, _ := t.UnpackValue().(error) + return err +} diff --git a/cluster-autoscaler/vendor/golang.org/x/tools/internal/event/keys/standard.go b/cluster-autoscaler/vendor/golang.org/x/tools/internal/event/keys/standard.go new file mode 100644 index 000000000000..7e9586659213 --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/tools/internal/event/keys/standard.go @@ -0,0 +1,22 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package keys + +var ( + // Msg is a key used to add message strings to label lists. + Msg = NewString("message", "a readable message") + // Label is a key used to indicate an event adds labels to the context. + Label = NewTag("label", "a label context marker") + // Start is used for things like traces that have a name. + Start = NewString("start", "span start") + // Metric is a key used to indicate an event records metrics. + End = NewTag("end", "a span end marker") + // Metric is a key used to indicate an event records metrics. + Detach = NewTag("detach", "a span detach marker") + // Err is a key used to add error values to label lists. + Err = NewError("error", "an error that occurred") + // Metric is a key used to indicate an event records metrics. + Metric = NewTag("metric", "a metric event marker") +) diff --git a/cluster-autoscaler/vendor/golang.org/x/tools/internal/event/label/label.go b/cluster-autoscaler/vendor/golang.org/x/tools/internal/event/label/label.go new file mode 100644 index 000000000000..0f526e1f9ab4 --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/tools/internal/event/label/label.go @@ -0,0 +1,215 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package label + +import ( + "fmt" + "io" + "reflect" + "unsafe" +) + +// Key is used as the identity of a Label. +// Keys are intended to be compared by pointer only, the name should be unique +// for communicating with external systems, but it is not required or enforced. +type Key interface { + // Name returns the key name. + Name() string + // Description returns a string that can be used to describe the value. + Description() string + + // Format is used in formatting to append the value of the label to the + // supplied buffer. + // The formatter may use the supplied buf as a scratch area to avoid + // allocations. + Format(w io.Writer, buf []byte, l Label) +} + +// Label holds a key and value pair. +// It is normally used when passing around lists of labels. +type Label struct { + key Key + packed uint64 + untyped interface{} +} + +// Map is the interface to a collection of Labels indexed by key. +type Map interface { + // Find returns the label that matches the supplied key. + Find(key Key) Label +} + +// List is the interface to something that provides an iterable +// list of labels. +// Iteration should start from 0 and continue until Valid returns false. +type List interface { + // Valid returns true if the index is within range for the list. + // It does not imply the label at that index will itself be valid. + Valid(index int) bool + // Label returns the label at the given index. + Label(index int) Label +} + +// list implements LabelList for a list of Labels. +type list struct { + labels []Label +} + +// filter wraps a LabelList filtering out specific labels. +type filter struct { + keys []Key + underlying List +} + +// listMap implements LabelMap for a simple list of labels. +type listMap struct { + labels []Label +} + +// mapChain implements LabelMap for a list of underlying LabelMap. +type mapChain struct { + maps []Map +} + +// OfValue creates a new label from the key and value. +// This method is for implementing new key types, label creation should +// normally be done with the Of method of the key. +func OfValue(k Key, value interface{}) Label { return Label{key: k, untyped: value} } + +// UnpackValue assumes the label was built using LabelOfValue and returns the value +// that was passed to that constructor. +// This method is for implementing new key types, for type safety normal +// access should be done with the From method of the key. +func (t Label) UnpackValue() interface{} { return t.untyped } + +// Of64 creates a new label from a key and a uint64. This is often +// used for non uint64 values that can be packed into a uint64. +// This method is for implementing new key types, label creation should +// normally be done with the Of method of the key. +func Of64(k Key, v uint64) Label { return Label{key: k, packed: v} } + +// Unpack64 assumes the label was built using LabelOf64 and returns the value that +// was passed to that constructor. +// This method is for implementing new key types, for type safety normal +// access should be done with the From method of the key. +func (t Label) Unpack64() uint64 { return t.packed } + +type stringptr unsafe.Pointer + +// OfString creates a new label from a key and a string. +// This method is for implementing new key types, label creation should +// normally be done with the Of method of the key. +func OfString(k Key, v string) Label { + hdr := (*reflect.StringHeader)(unsafe.Pointer(&v)) + return Label{ + key: k, + packed: uint64(hdr.Len), + untyped: stringptr(hdr.Data), + } +} + +// UnpackString assumes the label was built using LabelOfString and returns the +// value that was passed to that constructor. +// This method is for implementing new key types, for type safety normal +// access should be done with the From method of the key. +func (t Label) UnpackString() string { + var v string + hdr := (*reflect.StringHeader)(unsafe.Pointer(&v)) + hdr.Data = uintptr(t.untyped.(stringptr)) + hdr.Len = int(t.packed) + return v +} + +// Valid returns true if the Label is a valid one (it has a key). +func (t Label) Valid() bool { return t.key != nil } + +// Key returns the key of this Label. +func (t Label) Key() Key { return t.key } + +// Format is used for debug printing of labels. +func (t Label) Format(f fmt.State, r rune) { + if !t.Valid() { + io.WriteString(f, `nil`) + return + } + io.WriteString(f, t.Key().Name()) + io.WriteString(f, "=") + var buf [128]byte + t.Key().Format(f, buf[:0], t) +} + +func (l *list) Valid(index int) bool { + return index >= 0 && index < len(l.labels) +} + +func (l *list) Label(index int) Label { + return l.labels[index] +} + +func (f *filter) Valid(index int) bool { + return f.underlying.Valid(index) +} + +func (f *filter) Label(index int) Label { + l := f.underlying.Label(index) + for _, f := range f.keys { + if l.Key() == f { + return Label{} + } + } + return l +} + +func (lm listMap) Find(key Key) Label { + for _, l := range lm.labels { + if l.Key() == key { + return l + } + } + return Label{} +} + +func (c mapChain) Find(key Key) Label { + for _, src := range c.maps { + l := src.Find(key) + if l.Valid() { + return l + } + } + return Label{} +} + +var emptyList = &list{} + +func NewList(labels ...Label) List { + if len(labels) == 0 { + return emptyList + } + return &list{labels: labels} +} + +func Filter(l List, keys ...Key) List { + if len(keys) == 0 { + return l + } + return &filter{keys: keys, underlying: l} +} + +func NewMap(labels ...Label) Map { + return listMap{labels: labels} +} + +func MergeMaps(srcs ...Map) Map { + var nonNil []Map + for _, src := range srcs { + if src != nil { + nonNil = append(nonNil, src) + } + } + if len(nonNil) == 1 { + return nonNil[0] + } + return mapChain{maps: nonNil} +} diff --git a/cluster-autoscaler/vendor/golang.org/x/tools/internal/gcimporter/bexport.go b/cluster-autoscaler/vendor/golang.org/x/tools/internal/gcimporter/bexport.go new file mode 100644 index 000000000000..30582ed6d3d7 --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/tools/internal/gcimporter/bexport.go @@ -0,0 +1,852 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Binary package export. +// This file was derived from $GOROOT/src/cmd/compile/internal/gc/bexport.go; +// see that file for specification of the format. + +package gcimporter + +import ( + "bytes" + "encoding/binary" + "fmt" + "go/constant" + "go/token" + "go/types" + "math" + "math/big" + "sort" + "strings" +) + +// If debugFormat is set, each integer and string value is preceded by a marker +// and position information in the encoding. This mechanism permits an importer +// to recognize immediately when it is out of sync. The importer recognizes this +// mode automatically (i.e., it can import export data produced with debugging +// support even if debugFormat is not set at the time of import). This mode will +// lead to massively larger export data (by a factor of 2 to 3) and should only +// be enabled during development and debugging. +// +// NOTE: This flag is the first flag to enable if importing dies because of +// (suspected) format errors, and whenever a change is made to the format. +const debugFormat = false // default: false + +// Current export format version. Increase with each format change. +// +// Note: The latest binary (non-indexed) export format is at version 6. +// This exporter is still at level 4, but it doesn't matter since +// the binary importer can handle older versions just fine. +// +// 6: package height (CL 105038) -- NOT IMPLEMENTED HERE +// 5: improved position encoding efficiency (issue 20080, CL 41619) -- NOT IMPLEMENTED HERE +// 4: type name objects support type aliases, uses aliasTag +// 3: Go1.8 encoding (same as version 2, aliasTag defined but never used) +// 2: removed unused bool in ODCL export (compiler only) +// 1: header format change (more regular), export package for _ struct fields +// 0: Go1.7 encoding +const exportVersion = 4 + +// trackAllTypes enables cycle tracking for all types, not just named +// types. The existing compiler invariants assume that unnamed types +// that are not completely set up are not used, or else there are spurious +// errors. +// If disabled, only named types are tracked, possibly leading to slightly +// less efficient encoding in rare cases. It also prevents the export of +// some corner-case type declarations (but those are not handled correctly +// with with the textual export format either). +// TODO(gri) enable and remove once issues caused by it are fixed +const trackAllTypes = false + +type exporter struct { + fset *token.FileSet + out bytes.Buffer + + // object -> index maps, indexed in order of serialization + strIndex map[string]int + pkgIndex map[*types.Package]int + typIndex map[types.Type]int + + // position encoding + posInfoFormat bool + prevFile string + prevLine int + + // debugging support + written int // bytes written + indent int // for trace +} + +// internalError represents an error generated inside this package. +type internalError string + +func (e internalError) Error() string { return "gcimporter: " + string(e) } + +func internalErrorf(format string, args ...interface{}) error { + return internalError(fmt.Sprintf(format, args...)) +} + +// BExportData returns binary export data for pkg. +// If no file set is provided, position info will be missing. +func BExportData(fset *token.FileSet, pkg *types.Package) (b []byte, err error) { + if !debug { + defer func() { + if e := recover(); e != nil { + if ierr, ok := e.(internalError); ok { + err = ierr + return + } + // Not an internal error; panic again. + panic(e) + } + }() + } + + p := exporter{ + fset: fset, + strIndex: map[string]int{"": 0}, // empty string is mapped to 0 + pkgIndex: make(map[*types.Package]int), + typIndex: make(map[types.Type]int), + posInfoFormat: true, // TODO(gri) might become a flag, eventually + } + + // write version info + // The version string must start with "version %d" where %d is the version + // number. Additional debugging information may follow after a blank; that + // text is ignored by the importer. + p.rawStringln(fmt.Sprintf("version %d", exportVersion)) + var debug string + if debugFormat { + debug = "debug" + } + p.rawStringln(debug) // cannot use p.bool since it's affected by debugFormat; also want to see this clearly + p.bool(trackAllTypes) + p.bool(p.posInfoFormat) + + // --- generic export data --- + + // populate type map with predeclared "known" types + for index, typ := range predeclared() { + p.typIndex[typ] = index + } + if len(p.typIndex) != len(predeclared()) { + return nil, internalError("duplicate entries in type map?") + } + + // write package data + p.pkg(pkg, true) + if trace { + p.tracef("\n") + } + + // write objects + objcount := 0 + scope := pkg.Scope() + for _, name := range scope.Names() { + if !token.IsExported(name) { + continue + } + if trace { + p.tracef("\n") + } + p.obj(scope.Lookup(name)) + objcount++ + } + + // indicate end of list + if trace { + p.tracef("\n") + } + p.tag(endTag) + + // for self-verification only (redundant) + p.int(objcount) + + if trace { + p.tracef("\n") + } + + // --- end of export data --- + + return p.out.Bytes(), nil +} + +func (p *exporter) pkg(pkg *types.Package, emptypath bool) { + if pkg == nil { + panic(internalError("unexpected nil pkg")) + } + + // if we saw the package before, write its index (>= 0) + if i, ok := p.pkgIndex[pkg]; ok { + p.index('P', i) + return + } + + // otherwise, remember the package, write the package tag (< 0) and package data + if trace { + p.tracef("P%d = { ", len(p.pkgIndex)) + defer p.tracef("} ") + } + p.pkgIndex[pkg] = len(p.pkgIndex) + + p.tag(packageTag) + p.string(pkg.Name()) + if emptypath { + p.string("") + } else { + p.string(pkg.Path()) + } +} + +func (p *exporter) obj(obj types.Object) { + switch obj := obj.(type) { + case *types.Const: + p.tag(constTag) + p.pos(obj) + p.qualifiedName(obj) + p.typ(obj.Type()) + p.value(obj.Val()) + + case *types.TypeName: + if obj.IsAlias() { + p.tag(aliasTag) + p.pos(obj) + p.qualifiedName(obj) + } else { + p.tag(typeTag) + } + p.typ(obj.Type()) + + case *types.Var: + p.tag(varTag) + p.pos(obj) + p.qualifiedName(obj) + p.typ(obj.Type()) + + case *types.Func: + p.tag(funcTag) + p.pos(obj) + p.qualifiedName(obj) + sig := obj.Type().(*types.Signature) + p.paramList(sig.Params(), sig.Variadic()) + p.paramList(sig.Results(), false) + + default: + panic(internalErrorf("unexpected object %v (%T)", obj, obj)) + } +} + +func (p *exporter) pos(obj types.Object) { + if !p.posInfoFormat { + return + } + + file, line := p.fileLine(obj) + if file == p.prevFile { + // common case: write line delta + // delta == 0 means different file or no line change + delta := line - p.prevLine + p.int(delta) + if delta == 0 { + p.int(-1) // -1 means no file change + } + } else { + // different file + p.int(0) + // Encode filename as length of common prefix with previous + // filename, followed by (possibly empty) suffix. Filenames + // frequently share path prefixes, so this can save a lot + // of space and make export data size less dependent on file + // path length. The suffix is unlikely to be empty because + // file names tend to end in ".go". + n := commonPrefixLen(p.prevFile, file) + p.int(n) // n >= 0 + p.string(file[n:]) // write suffix only + p.prevFile = file + p.int(line) + } + p.prevLine = line +} + +func (p *exporter) fileLine(obj types.Object) (file string, line int) { + if p.fset != nil { + pos := p.fset.Position(obj.Pos()) + file = pos.Filename + line = pos.Line + } + return +} + +func commonPrefixLen(a, b string) int { + if len(a) > len(b) { + a, b = b, a + } + // len(a) <= len(b) + i := 0 + for i < len(a) && a[i] == b[i] { + i++ + } + return i +} + +func (p *exporter) qualifiedName(obj types.Object) { + p.string(obj.Name()) + p.pkg(obj.Pkg(), false) +} + +func (p *exporter) typ(t types.Type) { + if t == nil { + panic(internalError("nil type")) + } + + // Possible optimization: Anonymous pointer types *T where + // T is a named type are common. We could canonicalize all + // such types *T to a single type PT = *T. This would lead + // to at most one *T entry in typIndex, and all future *T's + // would be encoded as the respective index directly. Would + // save 1 byte (pointerTag) per *T and reduce the typIndex + // size (at the cost of a canonicalization map). We can do + // this later, without encoding format change. + + // if we saw the type before, write its index (>= 0) + if i, ok := p.typIndex[t]; ok { + p.index('T', i) + return + } + + // otherwise, remember the type, write the type tag (< 0) and type data + if trackAllTypes { + if trace { + p.tracef("T%d = {>\n", len(p.typIndex)) + defer p.tracef("<\n} ") + } + p.typIndex[t] = len(p.typIndex) + } + + switch t := t.(type) { + case *types.Named: + if !trackAllTypes { + // if we don't track all types, track named types now + p.typIndex[t] = len(p.typIndex) + } + + p.tag(namedTag) + p.pos(t.Obj()) + p.qualifiedName(t.Obj()) + p.typ(t.Underlying()) + if !types.IsInterface(t) { + p.assocMethods(t) + } + + case *types.Array: + p.tag(arrayTag) + p.int64(t.Len()) + p.typ(t.Elem()) + + case *types.Slice: + p.tag(sliceTag) + p.typ(t.Elem()) + + case *dddSlice: + p.tag(dddTag) + p.typ(t.elem) + + case *types.Struct: + p.tag(structTag) + p.fieldList(t) + + case *types.Pointer: + p.tag(pointerTag) + p.typ(t.Elem()) + + case *types.Signature: + p.tag(signatureTag) + p.paramList(t.Params(), t.Variadic()) + p.paramList(t.Results(), false) + + case *types.Interface: + p.tag(interfaceTag) + p.iface(t) + + case *types.Map: + p.tag(mapTag) + p.typ(t.Key()) + p.typ(t.Elem()) + + case *types.Chan: + p.tag(chanTag) + p.int(int(3 - t.Dir())) // hack + p.typ(t.Elem()) + + default: + panic(internalErrorf("unexpected type %T: %s", t, t)) + } +} + +func (p *exporter) assocMethods(named *types.Named) { + // Sort methods (for determinism). + var methods []*types.Func + for i := 0; i < named.NumMethods(); i++ { + methods = append(methods, named.Method(i)) + } + sort.Sort(methodsByName(methods)) + + p.int(len(methods)) + + if trace && methods != nil { + p.tracef("associated methods {>\n") + } + + for i, m := range methods { + if trace && i > 0 { + p.tracef("\n") + } + + p.pos(m) + name := m.Name() + p.string(name) + if !exported(name) { + p.pkg(m.Pkg(), false) + } + + sig := m.Type().(*types.Signature) + p.paramList(types.NewTuple(sig.Recv()), false) + p.paramList(sig.Params(), sig.Variadic()) + p.paramList(sig.Results(), false) + p.int(0) // dummy value for go:nointerface pragma - ignored by importer + } + + if trace && methods != nil { + p.tracef("<\n} ") + } +} + +type methodsByName []*types.Func + +func (x methodsByName) Len() int { return len(x) } +func (x methodsByName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } +func (x methodsByName) Less(i, j int) bool { return x[i].Name() < x[j].Name() } + +func (p *exporter) fieldList(t *types.Struct) { + if trace && t.NumFields() > 0 { + p.tracef("fields {>\n") + defer p.tracef("<\n} ") + } + + p.int(t.NumFields()) + for i := 0; i < t.NumFields(); i++ { + if trace && i > 0 { + p.tracef("\n") + } + p.field(t.Field(i)) + p.string(t.Tag(i)) + } +} + +func (p *exporter) field(f *types.Var) { + if !f.IsField() { + panic(internalError("field expected")) + } + + p.pos(f) + p.fieldName(f) + p.typ(f.Type()) +} + +func (p *exporter) iface(t *types.Interface) { + // TODO(gri): enable importer to load embedded interfaces, + // then emit Embeddeds and ExplicitMethods separately here. + p.int(0) + + n := t.NumMethods() + if trace && n > 0 { + p.tracef("methods {>\n") + defer p.tracef("<\n} ") + } + p.int(n) + for i := 0; i < n; i++ { + if trace && i > 0 { + p.tracef("\n") + } + p.method(t.Method(i)) + } +} + +func (p *exporter) method(m *types.Func) { + sig := m.Type().(*types.Signature) + if sig.Recv() == nil { + panic(internalError("method expected")) + } + + p.pos(m) + p.string(m.Name()) + if m.Name() != "_" && !token.IsExported(m.Name()) { + p.pkg(m.Pkg(), false) + } + + // interface method; no need to encode receiver. + p.paramList(sig.Params(), sig.Variadic()) + p.paramList(sig.Results(), false) +} + +func (p *exporter) fieldName(f *types.Var) { + name := f.Name() + + if f.Anonymous() { + // anonymous field - we distinguish between 3 cases: + // 1) field name matches base type name and is exported + // 2) field name matches base type name and is not exported + // 3) field name doesn't match base type name (alias name) + bname := basetypeName(f.Type()) + if name == bname { + if token.IsExported(name) { + name = "" // 1) we don't need to know the field name or package + } else { + name = "?" // 2) use unexported name "?" to force package export + } + } else { + // 3) indicate alias and export name as is + // (this requires an extra "@" but this is a rare case) + p.string("@") + } + } + + p.string(name) + if name != "" && !token.IsExported(name) { + p.pkg(f.Pkg(), false) + } +} + +func basetypeName(typ types.Type) string { + switch typ := deref(typ).(type) { + case *types.Basic: + return typ.Name() + case *types.Named: + return typ.Obj().Name() + default: + return "" // unnamed type + } +} + +func (p *exporter) paramList(params *types.Tuple, variadic bool) { + // use negative length to indicate unnamed parameters + // (look at the first parameter only since either all + // names are present or all are absent) + n := params.Len() + if n > 0 && params.At(0).Name() == "" { + n = -n + } + p.int(n) + for i := 0; i < params.Len(); i++ { + q := params.At(i) + t := q.Type() + if variadic && i == params.Len()-1 { + t = &dddSlice{t.(*types.Slice).Elem()} + } + p.typ(t) + if n > 0 { + name := q.Name() + p.string(name) + if name != "_" { + p.pkg(q.Pkg(), false) + } + } + p.string("") // no compiler-specific info + } +} + +func (p *exporter) value(x constant.Value) { + if trace { + p.tracef("= ") + } + + switch x.Kind() { + case constant.Bool: + tag := falseTag + if constant.BoolVal(x) { + tag = trueTag + } + p.tag(tag) + + case constant.Int: + if v, exact := constant.Int64Val(x); exact { + // common case: x fits into an int64 - use compact encoding + p.tag(int64Tag) + p.int64(v) + return + } + // uncommon case: large x - use float encoding + // (powers of 2 will be encoded efficiently with exponent) + p.tag(floatTag) + p.float(constant.ToFloat(x)) + + case constant.Float: + p.tag(floatTag) + p.float(x) + + case constant.Complex: + p.tag(complexTag) + p.float(constant.Real(x)) + p.float(constant.Imag(x)) + + case constant.String: + p.tag(stringTag) + p.string(constant.StringVal(x)) + + case constant.Unknown: + // package contains type errors + p.tag(unknownTag) + + default: + panic(internalErrorf("unexpected value %v (%T)", x, x)) + } +} + +func (p *exporter) float(x constant.Value) { + if x.Kind() != constant.Float { + panic(internalErrorf("unexpected constant %v, want float", x)) + } + // extract sign (there is no -0) + sign := constant.Sign(x) + if sign == 0 { + // x == 0 + p.int(0) + return + } + // x != 0 + + var f big.Float + if v, exact := constant.Float64Val(x); exact { + // float64 + f.SetFloat64(v) + } else if num, denom := constant.Num(x), constant.Denom(x); num.Kind() == constant.Int { + // TODO(gri): add big.Rat accessor to constant.Value. + r := valueToRat(num) + f.SetRat(r.Quo(r, valueToRat(denom))) + } else { + // Value too large to represent as a fraction => inaccessible. + // TODO(gri): add big.Float accessor to constant.Value. + f.SetFloat64(math.MaxFloat64) // FIXME + } + + // extract exponent such that 0.5 <= m < 1.0 + var m big.Float + exp := f.MantExp(&m) + + // extract mantissa as *big.Int + // - set exponent large enough so mant satisfies mant.IsInt() + // - get *big.Int from mant + m.SetMantExp(&m, int(m.MinPrec())) + mant, acc := m.Int(nil) + if acc != big.Exact { + panic(internalError("internal error")) + } + + p.int(sign) + p.int(exp) + p.string(string(mant.Bytes())) +} + +func valueToRat(x constant.Value) *big.Rat { + // Convert little-endian to big-endian. + // I can't believe this is necessary. + bytes := constant.Bytes(x) + for i := 0; i < len(bytes)/2; i++ { + bytes[i], bytes[len(bytes)-1-i] = bytes[len(bytes)-1-i], bytes[i] + } + return new(big.Rat).SetInt(new(big.Int).SetBytes(bytes)) +} + +func (p *exporter) bool(b bool) bool { + if trace { + p.tracef("[") + defer p.tracef("= %v] ", b) + } + + x := 0 + if b { + x = 1 + } + p.int(x) + return b +} + +// ---------------------------------------------------------------------------- +// Low-level encoders + +func (p *exporter) index(marker byte, index int) { + if index < 0 { + panic(internalError("invalid index < 0")) + } + if debugFormat { + p.marker('t') + } + if trace { + p.tracef("%c%d ", marker, index) + } + p.rawInt64(int64(index)) +} + +func (p *exporter) tag(tag int) { + if tag >= 0 { + panic(internalError("invalid tag >= 0")) + } + if debugFormat { + p.marker('t') + } + if trace { + p.tracef("%s ", tagString[-tag]) + } + p.rawInt64(int64(tag)) +} + +func (p *exporter) int(x int) { + p.int64(int64(x)) +} + +func (p *exporter) int64(x int64) { + if debugFormat { + p.marker('i') + } + if trace { + p.tracef("%d ", x) + } + p.rawInt64(x) +} + +func (p *exporter) string(s string) { + if debugFormat { + p.marker('s') + } + if trace { + p.tracef("%q ", s) + } + // if we saw the string before, write its index (>= 0) + // (the empty string is mapped to 0) + if i, ok := p.strIndex[s]; ok { + p.rawInt64(int64(i)) + return + } + // otherwise, remember string and write its negative length and bytes + p.strIndex[s] = len(p.strIndex) + p.rawInt64(-int64(len(s))) + for i := 0; i < len(s); i++ { + p.rawByte(s[i]) + } +} + +// marker emits a marker byte and position information which makes +// it easy for a reader to detect if it is "out of sync". Used for +// debugFormat format only. +func (p *exporter) marker(m byte) { + p.rawByte(m) + // Enable this for help tracking down the location + // of an incorrect marker when running in debugFormat. + if false && trace { + p.tracef("#%d ", p.written) + } + p.rawInt64(int64(p.written)) +} + +// rawInt64 should only be used by low-level encoders. +func (p *exporter) rawInt64(x int64) { + var tmp [binary.MaxVarintLen64]byte + n := binary.PutVarint(tmp[:], x) + for i := 0; i < n; i++ { + p.rawByte(tmp[i]) + } +} + +// rawStringln should only be used to emit the initial version string. +func (p *exporter) rawStringln(s string) { + for i := 0; i < len(s); i++ { + p.rawByte(s[i]) + } + p.rawByte('\n') +} + +// rawByte is the bottleneck interface to write to p.out. +// rawByte escapes b as follows (any encoding does that +// hides '$'): +// +// '$' => '|' 'S' +// '|' => '|' '|' +// +// Necessary so other tools can find the end of the +// export data by searching for "$$". +// rawByte should only be used by low-level encoders. +func (p *exporter) rawByte(b byte) { + switch b { + case '$': + // write '$' as '|' 'S' + b = 'S' + fallthrough + case '|': + // write '|' as '|' '|' + p.out.WriteByte('|') + p.written++ + } + p.out.WriteByte(b) + p.written++ +} + +// tracef is like fmt.Printf but it rewrites the format string +// to take care of indentation. +func (p *exporter) tracef(format string, args ...interface{}) { + if strings.ContainsAny(format, "<>\n") { + var buf bytes.Buffer + for i := 0; i < len(format); i++ { + // no need to deal with runes + ch := format[i] + switch ch { + case '>': + p.indent++ + continue + case '<': + p.indent-- + continue + } + buf.WriteByte(ch) + if ch == '\n' { + for j := p.indent; j > 0; j-- { + buf.WriteString(". ") + } + } + } + format = buf.String() + } + fmt.Printf(format, args...) +} + +// Debugging support. +// (tagString is only used when tracing is enabled) +var tagString = [...]string{ + // Packages + -packageTag: "package", + + // Types + -namedTag: "named type", + -arrayTag: "array", + -sliceTag: "slice", + -dddTag: "ddd", + -structTag: "struct", + -pointerTag: "pointer", + -signatureTag: "signature", + -interfaceTag: "interface", + -mapTag: "map", + -chanTag: "chan", + + // Values + -falseTag: "false", + -trueTag: "true", + -int64Tag: "int64", + -floatTag: "float", + -fractionTag: "fraction", + -complexTag: "complex", + -stringTag: "string", + -unknownTag: "unknown", + + // Type aliases + -aliasTag: "alias", +} diff --git a/cluster-autoscaler/vendor/golang.org/x/tools/internal/gcimporter/bimport.go b/cluster-autoscaler/vendor/golang.org/x/tools/internal/gcimporter/bimport.go new file mode 100644 index 000000000000..b85de0147001 --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/tools/internal/gcimporter/bimport.go @@ -0,0 +1,1053 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file is a copy of $GOROOT/src/go/internal/gcimporter/bimport.go. + +package gcimporter + +import ( + "encoding/binary" + "fmt" + "go/constant" + "go/token" + "go/types" + "sort" + "strconv" + "strings" + "sync" + "unicode" + "unicode/utf8" +) + +type importer struct { + imports map[string]*types.Package + data []byte + importpath string + buf []byte // for reading strings + version int // export format version + + // object lists + strList []string // in order of appearance + pathList []string // in order of appearance + pkgList []*types.Package // in order of appearance + typList []types.Type // in order of appearance + interfaceList []*types.Interface // for delayed completion only + trackAllTypes bool + + // position encoding + posInfoFormat bool + prevFile string + prevLine int + fake fakeFileSet + + // debugging support + debugFormat bool + read int // bytes read +} + +// BImportData imports a package from the serialized package data +// and returns the number of bytes consumed and a reference to the package. +// If the export data version is not recognized or the format is otherwise +// compromised, an error is returned. +func BImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) { + // catch panics and return them as errors + const currentVersion = 6 + version := -1 // unknown version + defer func() { + if e := recover(); e != nil { + // Return a (possibly nil or incomplete) package unchanged (see #16088). + if version > currentVersion { + err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e) + } else { + err = fmt.Errorf("cannot import %q (%v), possibly version skew - reinstall package", path, e) + } + } + }() + + p := importer{ + imports: imports, + data: data, + importpath: path, + version: version, + strList: []string{""}, // empty string is mapped to 0 + pathList: []string{""}, // empty string is mapped to 0 + fake: fakeFileSet{ + fset: fset, + files: make(map[string]*fileInfo), + }, + } + defer p.fake.setLines() // set lines for files in fset + + // read version info + var versionstr string + if b := p.rawByte(); b == 'c' || b == 'd' { + // Go1.7 encoding; first byte encodes low-level + // encoding format (compact vs debug). + // For backward-compatibility only (avoid problems with + // old installed packages). Newly compiled packages use + // the extensible format string. + // TODO(gri) Remove this support eventually; after Go1.8. + if b == 'd' { + p.debugFormat = true + } + p.trackAllTypes = p.rawByte() == 'a' + p.posInfoFormat = p.int() != 0 + versionstr = p.string() + if versionstr == "v1" { + version = 0 + } + } else { + // Go1.8 extensible encoding + // read version string and extract version number (ignore anything after the version number) + versionstr = p.rawStringln(b) + if s := strings.SplitN(versionstr, " ", 3); len(s) >= 2 && s[0] == "version" { + if v, err := strconv.Atoi(s[1]); err == nil && v > 0 { + version = v + } + } + } + p.version = version + + // read version specific flags - extend as necessary + switch p.version { + // case currentVersion: + // ... + // fallthrough + case currentVersion, 5, 4, 3, 2, 1: + p.debugFormat = p.rawStringln(p.rawByte()) == "debug" + p.trackAllTypes = p.int() != 0 + p.posInfoFormat = p.int() != 0 + case 0: + // Go1.7 encoding format - nothing to do here + default: + errorf("unknown bexport format version %d (%q)", p.version, versionstr) + } + + // --- generic export data --- + + // populate typList with predeclared "known" types + p.typList = append(p.typList, predeclared()...) + + // read package data + pkg = p.pkg() + + // read objects of phase 1 only (see cmd/compile/internal/gc/bexport.go) + objcount := 0 + for { + tag := p.tagOrIndex() + if tag == endTag { + break + } + p.obj(tag) + objcount++ + } + + // self-verification + if count := p.int(); count != objcount { + errorf("got %d objects; want %d", objcount, count) + } + + // ignore compiler-specific import data + + // complete interfaces + // TODO(gri) re-investigate if we still need to do this in a delayed fashion + for _, typ := range p.interfaceList { + typ.Complete() + } + + // record all referenced packages as imports + list := append(([]*types.Package)(nil), p.pkgList[1:]...) + sort.Sort(byPath(list)) + pkg.SetImports(list) + + // package was imported completely and without errors + pkg.MarkComplete() + + return p.read, pkg, nil +} + +func errorf(format string, args ...interface{}) { + panic(fmt.Sprintf(format, args...)) +} + +func (p *importer) pkg() *types.Package { + // if the package was seen before, i is its index (>= 0) + i := p.tagOrIndex() + if i >= 0 { + return p.pkgList[i] + } + + // otherwise, i is the package tag (< 0) + if i != packageTag { + errorf("unexpected package tag %d version %d", i, p.version) + } + + // read package data + name := p.string() + var path string + if p.version >= 5 { + path = p.path() + } else { + path = p.string() + } + if p.version >= 6 { + p.int() // package height; unused by go/types + } + + // we should never see an empty package name + if name == "" { + errorf("empty package name in import") + } + + // an empty path denotes the package we are currently importing; + // it must be the first package we see + if (path == "") != (len(p.pkgList) == 0) { + errorf("package path %q for pkg index %d", path, len(p.pkgList)) + } + + // if the package was imported before, use that one; otherwise create a new one + if path == "" { + path = p.importpath + } + pkg := p.imports[path] + if pkg == nil { + pkg = types.NewPackage(path, name) + p.imports[path] = pkg + } else if pkg.Name() != name { + errorf("conflicting names %s and %s for package %q", pkg.Name(), name, path) + } + p.pkgList = append(p.pkgList, pkg) + + return pkg +} + +// objTag returns the tag value for each object kind. +func objTag(obj types.Object) int { + switch obj.(type) { + case *types.Const: + return constTag + case *types.TypeName: + return typeTag + case *types.Var: + return varTag + case *types.Func: + return funcTag + default: + errorf("unexpected object: %v (%T)", obj, obj) // panics + panic("unreachable") + } +} + +func sameObj(a, b types.Object) bool { + // Because unnamed types are not canonicalized, we cannot simply compare types for + // (pointer) identity. + // Ideally we'd check equality of constant values as well, but this is good enough. + return objTag(a) == objTag(b) && types.Identical(a.Type(), b.Type()) +} + +func (p *importer) declare(obj types.Object) { + pkg := obj.Pkg() + if alt := pkg.Scope().Insert(obj); alt != nil { + // This can only trigger if we import a (non-type) object a second time. + // Excluding type aliases, this cannot happen because 1) we only import a package + // once; and b) we ignore compiler-specific export data which may contain + // functions whose inlined function bodies refer to other functions that + // were already imported. + // However, type aliases require reexporting the original type, so we need + // to allow it (see also the comment in cmd/compile/internal/gc/bimport.go, + // method importer.obj, switch case importing functions). + // TODO(gri) review/update this comment once the gc compiler handles type aliases. + if !sameObj(obj, alt) { + errorf("inconsistent import:\n\t%v\npreviously imported as:\n\t%v\n", obj, alt) + } + } +} + +func (p *importer) obj(tag int) { + switch tag { + case constTag: + pos := p.pos() + pkg, name := p.qualifiedName() + typ := p.typ(nil, nil) + val := p.value() + p.declare(types.NewConst(pos, pkg, name, typ, val)) + + case aliasTag: + // TODO(gri) verify type alias hookup is correct + pos := p.pos() + pkg, name := p.qualifiedName() + typ := p.typ(nil, nil) + p.declare(types.NewTypeName(pos, pkg, name, typ)) + + case typeTag: + p.typ(nil, nil) + + case varTag: + pos := p.pos() + pkg, name := p.qualifiedName() + typ := p.typ(nil, nil) + p.declare(types.NewVar(pos, pkg, name, typ)) + + case funcTag: + pos := p.pos() + pkg, name := p.qualifiedName() + params, isddd := p.paramList() + result, _ := p.paramList() + sig := types.NewSignature(nil, params, result, isddd) + p.declare(types.NewFunc(pos, pkg, name, sig)) + + default: + errorf("unexpected object tag %d", tag) + } +} + +const deltaNewFile = -64 // see cmd/compile/internal/gc/bexport.go + +func (p *importer) pos() token.Pos { + if !p.posInfoFormat { + return token.NoPos + } + + file := p.prevFile + line := p.prevLine + delta := p.int() + line += delta + if p.version >= 5 { + if delta == deltaNewFile { + if n := p.int(); n >= 0 { + // file changed + file = p.path() + line = n + } + } + } else { + if delta == 0 { + if n := p.int(); n >= 0 { + // file changed + file = p.prevFile[:n] + p.string() + line = p.int() + } + } + } + p.prevFile = file + p.prevLine = line + + return p.fake.pos(file, line, 0) +} + +// Synthesize a token.Pos +type fakeFileSet struct { + fset *token.FileSet + files map[string]*fileInfo +} + +type fileInfo struct { + file *token.File + lastline int +} + +const maxlines = 64 * 1024 + +func (s *fakeFileSet) pos(file string, line, column int) token.Pos { + // TODO(mdempsky): Make use of column. + + // Since we don't know the set of needed file positions, we reserve maxlines + // positions per file. We delay calling token.File.SetLines until all + // positions have been calculated (by way of fakeFileSet.setLines), so that + // we can avoid setting unnecessary lines. See also golang/go#46586. + f := s.files[file] + if f == nil { + f = &fileInfo{file: s.fset.AddFile(file, -1, maxlines)} + s.files[file] = f + } + if line > maxlines { + line = 1 + } + if line > f.lastline { + f.lastline = line + } + + // Return a fake position assuming that f.file consists only of newlines. + return token.Pos(f.file.Base() + line - 1) +} + +func (s *fakeFileSet) setLines() { + fakeLinesOnce.Do(func() { + fakeLines = make([]int, maxlines) + for i := range fakeLines { + fakeLines[i] = i + } + }) + for _, f := range s.files { + f.file.SetLines(fakeLines[:f.lastline]) + } +} + +var ( + fakeLines []int + fakeLinesOnce sync.Once +) + +func (p *importer) qualifiedName() (pkg *types.Package, name string) { + name = p.string() + pkg = p.pkg() + return +} + +func (p *importer) record(t types.Type) { + p.typList = append(p.typList, t) +} + +// A dddSlice is a types.Type representing ...T parameters. +// It only appears for parameter types and does not escape +// the importer. +type dddSlice struct { + elem types.Type +} + +func (t *dddSlice) Underlying() types.Type { return t } +func (t *dddSlice) String() string { return "..." + t.elem.String() } + +// parent is the package which declared the type; parent == nil means +// the package currently imported. The parent package is needed for +// exported struct fields and interface methods which don't contain +// explicit package information in the export data. +// +// A non-nil tname is used as the "owner" of the result type; i.e., +// the result type is the underlying type of tname. tname is used +// to give interface methods a named receiver type where possible. +func (p *importer) typ(parent *types.Package, tname *types.Named) types.Type { + // if the type was seen before, i is its index (>= 0) + i := p.tagOrIndex() + if i >= 0 { + return p.typList[i] + } + + // otherwise, i is the type tag (< 0) + switch i { + case namedTag: + // read type object + pos := p.pos() + parent, name := p.qualifiedName() + scope := parent.Scope() + obj := scope.Lookup(name) + + // if the object doesn't exist yet, create and insert it + if obj == nil { + obj = types.NewTypeName(pos, parent, name, nil) + scope.Insert(obj) + } + + if _, ok := obj.(*types.TypeName); !ok { + errorf("pkg = %s, name = %s => %s", parent, name, obj) + } + + // associate new named type with obj if it doesn't exist yet + t0 := types.NewNamed(obj.(*types.TypeName), nil, nil) + + // but record the existing type, if any + tname := obj.Type().(*types.Named) // tname is either t0 or the existing type + p.record(tname) + + // read underlying type + t0.SetUnderlying(p.typ(parent, t0)) + + // interfaces don't have associated methods + if types.IsInterface(t0) { + return tname + } + + // read associated methods + for i := p.int(); i > 0; i-- { + // TODO(gri) replace this with something closer to fieldName + pos := p.pos() + name := p.string() + if !exported(name) { + p.pkg() + } + + recv, _ := p.paramList() // TODO(gri) do we need a full param list for the receiver? + params, isddd := p.paramList() + result, _ := p.paramList() + p.int() // go:nointerface pragma - discarded + + sig := types.NewSignature(recv.At(0), params, result, isddd) + t0.AddMethod(types.NewFunc(pos, parent, name, sig)) + } + + return tname + + case arrayTag: + t := new(types.Array) + if p.trackAllTypes { + p.record(t) + } + + n := p.int64() + *t = *types.NewArray(p.typ(parent, nil), n) + return t + + case sliceTag: + t := new(types.Slice) + if p.trackAllTypes { + p.record(t) + } + + *t = *types.NewSlice(p.typ(parent, nil)) + return t + + case dddTag: + t := new(dddSlice) + if p.trackAllTypes { + p.record(t) + } + + t.elem = p.typ(parent, nil) + return t + + case structTag: + t := new(types.Struct) + if p.trackAllTypes { + p.record(t) + } + + *t = *types.NewStruct(p.fieldList(parent)) + return t + + case pointerTag: + t := new(types.Pointer) + if p.trackAllTypes { + p.record(t) + } + + *t = *types.NewPointer(p.typ(parent, nil)) + return t + + case signatureTag: + t := new(types.Signature) + if p.trackAllTypes { + p.record(t) + } + + params, isddd := p.paramList() + result, _ := p.paramList() + *t = *types.NewSignature(nil, params, result, isddd) + return t + + case interfaceTag: + // Create a dummy entry in the type list. This is safe because we + // cannot expect the interface type to appear in a cycle, as any + // such cycle must contain a named type which would have been + // first defined earlier. + // TODO(gri) Is this still true now that we have type aliases? + // See issue #23225. + n := len(p.typList) + if p.trackAllTypes { + p.record(nil) + } + + var embeddeds []types.Type + for n := p.int(); n > 0; n-- { + p.pos() + embeddeds = append(embeddeds, p.typ(parent, nil)) + } + + t := newInterface(p.methodList(parent, tname), embeddeds) + p.interfaceList = append(p.interfaceList, t) + if p.trackAllTypes { + p.typList[n] = t + } + return t + + case mapTag: + t := new(types.Map) + if p.trackAllTypes { + p.record(t) + } + + key := p.typ(parent, nil) + val := p.typ(parent, nil) + *t = *types.NewMap(key, val) + return t + + case chanTag: + t := new(types.Chan) + if p.trackAllTypes { + p.record(t) + } + + dir := chanDir(p.int()) + val := p.typ(parent, nil) + *t = *types.NewChan(dir, val) + return t + + default: + errorf("unexpected type tag %d", i) // panics + panic("unreachable") + } +} + +func chanDir(d int) types.ChanDir { + // tag values must match the constants in cmd/compile/internal/gc/go.go + switch d { + case 1 /* Crecv */ : + return types.RecvOnly + case 2 /* Csend */ : + return types.SendOnly + case 3 /* Cboth */ : + return types.SendRecv + default: + errorf("unexpected channel dir %d", d) + return 0 + } +} + +func (p *importer) fieldList(parent *types.Package) (fields []*types.Var, tags []string) { + if n := p.int(); n > 0 { + fields = make([]*types.Var, n) + tags = make([]string, n) + for i := range fields { + fields[i], tags[i] = p.field(parent) + } + } + return +} + +func (p *importer) field(parent *types.Package) (*types.Var, string) { + pos := p.pos() + pkg, name, alias := p.fieldName(parent) + typ := p.typ(parent, nil) + tag := p.string() + + anonymous := false + if name == "" { + // anonymous field - typ must be T or *T and T must be a type name + switch typ := deref(typ).(type) { + case *types.Basic: // basic types are named types + pkg = nil // // objects defined in Universe scope have no package + name = typ.Name() + case *types.Named: + name = typ.Obj().Name() + default: + errorf("named base type expected") + } + anonymous = true + } else if alias { + // anonymous field: we have an explicit name because it's an alias + anonymous = true + } + + return types.NewField(pos, pkg, name, typ, anonymous), tag +} + +func (p *importer) methodList(parent *types.Package, baseType *types.Named) (methods []*types.Func) { + if n := p.int(); n > 0 { + methods = make([]*types.Func, n) + for i := range methods { + methods[i] = p.method(parent, baseType) + } + } + return +} + +func (p *importer) method(parent *types.Package, baseType *types.Named) *types.Func { + pos := p.pos() + pkg, name, _ := p.fieldName(parent) + // If we don't have a baseType, use a nil receiver. + // A receiver using the actual interface type (which + // we don't know yet) will be filled in when we call + // types.Interface.Complete. + var recv *types.Var + if baseType != nil { + recv = types.NewVar(token.NoPos, parent, "", baseType) + } + params, isddd := p.paramList() + result, _ := p.paramList() + sig := types.NewSignature(recv, params, result, isddd) + return types.NewFunc(pos, pkg, name, sig) +} + +func (p *importer) fieldName(parent *types.Package) (pkg *types.Package, name string, alias bool) { + name = p.string() + pkg = parent + if pkg == nil { + // use the imported package instead + pkg = p.pkgList[0] + } + if p.version == 0 && name == "_" { + // version 0 didn't export a package for _ fields + return + } + switch name { + case "": + // 1) field name matches base type name and is exported: nothing to do + case "?": + // 2) field name matches base type name and is not exported: need package + name = "" + pkg = p.pkg() + case "@": + // 3) field name doesn't match type name (alias) + name = p.string() + alias = true + fallthrough + default: + if !exported(name) { + pkg = p.pkg() + } + } + return +} + +func (p *importer) paramList() (*types.Tuple, bool) { + n := p.int() + if n == 0 { + return nil, false + } + // negative length indicates unnamed parameters + named := true + if n < 0 { + n = -n + named = false + } + // n > 0 + params := make([]*types.Var, n) + isddd := false + for i := range params { + params[i], isddd = p.param(named) + } + return types.NewTuple(params...), isddd +} + +func (p *importer) param(named bool) (*types.Var, bool) { + t := p.typ(nil, nil) + td, isddd := t.(*dddSlice) + if isddd { + t = types.NewSlice(td.elem) + } + + var pkg *types.Package + var name string + if named { + name = p.string() + if name == "" { + errorf("expected named parameter") + } + if name != "_" { + pkg = p.pkg() + } + if i := strings.Index(name, "·"); i > 0 { + name = name[:i] // cut off gc-specific parameter numbering + } + } + + // read and discard compiler-specific info + p.string() + + return types.NewVar(token.NoPos, pkg, name, t), isddd +} + +func exported(name string) bool { + ch, _ := utf8.DecodeRuneInString(name) + return unicode.IsUpper(ch) +} + +func (p *importer) value() constant.Value { + switch tag := p.tagOrIndex(); tag { + case falseTag: + return constant.MakeBool(false) + case trueTag: + return constant.MakeBool(true) + case int64Tag: + return constant.MakeInt64(p.int64()) + case floatTag: + return p.float() + case complexTag: + re := p.float() + im := p.float() + return constant.BinaryOp(re, token.ADD, constant.MakeImag(im)) + case stringTag: + return constant.MakeString(p.string()) + case unknownTag: + return constant.MakeUnknown() + default: + errorf("unexpected value tag %d", tag) // panics + panic("unreachable") + } +} + +func (p *importer) float() constant.Value { + sign := p.int() + if sign == 0 { + return constant.MakeInt64(0) + } + + exp := p.int() + mant := []byte(p.string()) // big endian + + // remove leading 0's if any + for len(mant) > 0 && mant[0] == 0 { + mant = mant[1:] + } + + // convert to little endian + // TODO(gri) go/constant should have a more direct conversion function + // (e.g., once it supports a big.Float based implementation) + for i, j := 0, len(mant)-1; i < j; i, j = i+1, j-1 { + mant[i], mant[j] = mant[j], mant[i] + } + + // adjust exponent (constant.MakeFromBytes creates an integer value, + // but mant represents the mantissa bits such that 0.5 <= mant < 1.0) + exp -= len(mant) << 3 + if len(mant) > 0 { + for msd := mant[len(mant)-1]; msd&0x80 == 0; msd <<= 1 { + exp++ + } + } + + x := constant.MakeFromBytes(mant) + switch { + case exp < 0: + d := constant.Shift(constant.MakeInt64(1), token.SHL, uint(-exp)) + x = constant.BinaryOp(x, token.QUO, d) + case exp > 0: + x = constant.Shift(x, token.SHL, uint(exp)) + } + + if sign < 0 { + x = constant.UnaryOp(token.SUB, x, 0) + } + return x +} + +// ---------------------------------------------------------------------------- +// Low-level decoders + +func (p *importer) tagOrIndex() int { + if p.debugFormat { + p.marker('t') + } + + return int(p.rawInt64()) +} + +func (p *importer) int() int { + x := p.int64() + if int64(int(x)) != x { + errorf("exported integer too large") + } + return int(x) +} + +func (p *importer) int64() int64 { + if p.debugFormat { + p.marker('i') + } + + return p.rawInt64() +} + +func (p *importer) path() string { + if p.debugFormat { + p.marker('p') + } + // if the path was seen before, i is its index (>= 0) + // (the empty string is at index 0) + i := p.rawInt64() + if i >= 0 { + return p.pathList[i] + } + // otherwise, i is the negative path length (< 0) + a := make([]string, -i) + for n := range a { + a[n] = p.string() + } + s := strings.Join(a, "/") + p.pathList = append(p.pathList, s) + return s +} + +func (p *importer) string() string { + if p.debugFormat { + p.marker('s') + } + // if the string was seen before, i is its index (>= 0) + // (the empty string is at index 0) + i := p.rawInt64() + if i >= 0 { + return p.strList[i] + } + // otherwise, i is the negative string length (< 0) + if n := int(-i); n <= cap(p.buf) { + p.buf = p.buf[:n] + } else { + p.buf = make([]byte, n) + } + for i := range p.buf { + p.buf[i] = p.rawByte() + } + s := string(p.buf) + p.strList = append(p.strList, s) + return s +} + +func (p *importer) marker(want byte) { + if got := p.rawByte(); got != want { + errorf("incorrect marker: got %c; want %c (pos = %d)", got, want, p.read) + } + + pos := p.read + if n := int(p.rawInt64()); n != pos { + errorf("incorrect position: got %d; want %d", n, pos) + } +} + +// rawInt64 should only be used by low-level decoders. +func (p *importer) rawInt64() int64 { + i, err := binary.ReadVarint(p) + if err != nil { + errorf("read error: %v", err) + } + return i +} + +// rawStringln should only be used to read the initial version string. +func (p *importer) rawStringln(b byte) string { + p.buf = p.buf[:0] + for b != '\n' { + p.buf = append(p.buf, b) + b = p.rawByte() + } + return string(p.buf) +} + +// needed for binary.ReadVarint in rawInt64 +func (p *importer) ReadByte() (byte, error) { + return p.rawByte(), nil +} + +// byte is the bottleneck interface for reading p.data. +// It unescapes '|' 'S' to '$' and '|' '|' to '|'. +// rawByte should only be used by low-level decoders. +func (p *importer) rawByte() byte { + b := p.data[0] + r := 1 + if b == '|' { + b = p.data[1] + r = 2 + switch b { + case 'S': + b = '$' + case '|': + // nothing to do + default: + errorf("unexpected escape sequence in export data") + } + } + p.data = p.data[r:] + p.read += r + return b + +} + +// ---------------------------------------------------------------------------- +// Export format + +// Tags. Must be < 0. +const ( + // Objects + packageTag = -(iota + 1) + constTag + typeTag + varTag + funcTag + endTag + + // Types + namedTag + arrayTag + sliceTag + dddTag + structTag + pointerTag + signatureTag + interfaceTag + mapTag + chanTag + + // Values + falseTag + trueTag + int64Tag + floatTag + fractionTag // not used by gc + complexTag + stringTag + nilTag // only used by gc (appears in exported inlined function bodies) + unknownTag // not used by gc (only appears in packages with errors) + + // Type aliases + aliasTag +) + +var predeclOnce sync.Once +var predecl []types.Type // initialized lazily + +func predeclared() []types.Type { + predeclOnce.Do(func() { + // initialize lazily to be sure that all + // elements have been initialized before + predecl = []types.Type{ // basic types + types.Typ[types.Bool], + types.Typ[types.Int], + types.Typ[types.Int8], + types.Typ[types.Int16], + types.Typ[types.Int32], + types.Typ[types.Int64], + types.Typ[types.Uint], + types.Typ[types.Uint8], + types.Typ[types.Uint16], + types.Typ[types.Uint32], + types.Typ[types.Uint64], + types.Typ[types.Uintptr], + types.Typ[types.Float32], + types.Typ[types.Float64], + types.Typ[types.Complex64], + types.Typ[types.Complex128], + types.Typ[types.String], + + // basic type aliases + types.Universe.Lookup("byte").Type(), + types.Universe.Lookup("rune").Type(), + + // error + types.Universe.Lookup("error").Type(), + + // untyped types + types.Typ[types.UntypedBool], + types.Typ[types.UntypedInt], + types.Typ[types.UntypedRune], + types.Typ[types.UntypedFloat], + types.Typ[types.UntypedComplex], + types.Typ[types.UntypedString], + types.Typ[types.UntypedNil], + + // package unsafe + types.Typ[types.UnsafePointer], + + // invalid type + types.Typ[types.Invalid], // only appears in packages with errors + + // used internally by gc; never used by this package or in .a files + anyType{}, + } + predecl = append(predecl, additionalPredeclared()...) + }) + return predecl +} + +type anyType struct{} + +func (t anyType) Underlying() types.Type { return t } +func (t anyType) String() string { return "any" } diff --git a/cluster-autoscaler/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go b/cluster-autoscaler/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go new file mode 100644 index 000000000000..f6437feb1cfd --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go @@ -0,0 +1,99 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file is a copy of $GOROOT/src/go/internal/gcimporter/exportdata.go. + +// This file implements FindExportData. + +package gcimporter + +import ( + "bufio" + "fmt" + "io" + "strconv" + "strings" +) + +func readGopackHeader(r *bufio.Reader) (name string, size int64, err error) { + // See $GOROOT/include/ar.h. + hdr := make([]byte, 16+12+6+6+8+10+2) + _, err = io.ReadFull(r, hdr) + if err != nil { + return + } + // leave for debugging + if false { + fmt.Printf("header: %s", hdr) + } + s := strings.TrimSpace(string(hdr[16+12+6+6+8:][:10])) + length, err := strconv.Atoi(s) + size = int64(length) + if err != nil || hdr[len(hdr)-2] != '`' || hdr[len(hdr)-1] != '\n' { + err = fmt.Errorf("invalid archive header") + return + } + name = strings.TrimSpace(string(hdr[:16])) + return +} + +// FindExportData positions the reader r at the beginning of the +// export data section of an underlying GC-created object/archive +// file by reading from it. The reader must be positioned at the +// start of the file before calling this function. The hdr result +// is the string before the export data, either "$$" or "$$B". +// The size result is the length of the export data in bytes, or -1 if not known. +func FindExportData(r *bufio.Reader) (hdr string, size int64, err error) { + // Read first line to make sure this is an object file. + line, err := r.ReadSlice('\n') + if err != nil { + err = fmt.Errorf("can't find export data (%v)", err) + return + } + + if string(line) == "!\n" { + // Archive file. Scan to __.PKGDEF. + var name string + if name, size, err = readGopackHeader(r); err != nil { + return + } + + // First entry should be __.PKGDEF. + if name != "__.PKGDEF" { + err = fmt.Errorf("go archive is missing __.PKGDEF") + return + } + + // Read first line of __.PKGDEF data, so that line + // is once again the first line of the input. + if line, err = r.ReadSlice('\n'); err != nil { + err = fmt.Errorf("can't find export data (%v)", err) + return + } + size -= int64(len(line)) + } + + // Now at __.PKGDEF in archive or still at beginning of file. + // Either way, line should begin with "go object ". + if !strings.HasPrefix(string(line), "go object ") { + err = fmt.Errorf("not a Go object file") + return + } + + // Skip over object header to export data. + // Begins after first line starting with $$. + for line[0] != '$' { + if line, err = r.ReadSlice('\n'); err != nil { + err = fmt.Errorf("can't find export data (%v)", err) + return + } + size -= int64(len(line)) + } + hdr = string(line) + if size < 0 { + size = -1 + } + + return +} diff --git a/cluster-autoscaler/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go b/cluster-autoscaler/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go new file mode 100644 index 000000000000..a973dece9360 --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go @@ -0,0 +1,277 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file is a reduced copy of $GOROOT/src/go/internal/gcimporter/gcimporter.go. + +// Package gcimporter provides various functions for reading +// gc-generated object files that can be used to implement the +// Importer interface defined by the Go 1.5 standard library package. +// +// The encoding is deterministic: if the encoder is applied twice to +// the same types.Package data structure, both encodings are equal. +// This property may be important to avoid spurious changes in +// applications such as build systems. +// +// However, the encoder is not necessarily idempotent. Importing an +// exported package may yield a types.Package that, while it +// represents the same set of Go types as the original, may differ in +// the details of its internal representation. Because of these +// differences, re-encoding the imported package may yield a +// different, but equally valid, encoding of the package. +package gcimporter // import "golang.org/x/tools/internal/gcimporter" + +import ( + "bufio" + "bytes" + "fmt" + "go/build" + "go/token" + "go/types" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "strings" + "sync" +) + +const ( + // Enable debug during development: it adds some additional checks, and + // prevents errors from being recovered. + debug = false + + // If trace is set, debugging output is printed to std out. + trace = false +) + +var exportMap sync.Map // package dir → func() (string, bool) + +// lookupGorootExport returns the location of the export data +// (normally found in the build cache, but located in GOROOT/pkg +// in prior Go releases) for the package located in pkgDir. +// +// (We use the package's directory instead of its import path +// mainly to simplify handling of the packages in src/vendor +// and cmd/vendor.) +func lookupGorootExport(pkgDir string) (string, bool) { + f, ok := exportMap.Load(pkgDir) + if !ok { + var ( + listOnce sync.Once + exportPath string + ) + f, _ = exportMap.LoadOrStore(pkgDir, func() (string, bool) { + listOnce.Do(func() { + cmd := exec.Command("go", "list", "-export", "-f", "{{.Export}}", pkgDir) + cmd.Dir = build.Default.GOROOT + var output []byte + output, err := cmd.Output() + if err != nil { + return + } + + exports := strings.Split(string(bytes.TrimSpace(output)), "\n") + if len(exports) != 1 { + return + } + + exportPath = exports[0] + }) + + return exportPath, exportPath != "" + }) + } + + return f.(func() (string, bool))() +} + +var pkgExts = [...]string{".a", ".o"} + +// FindPkg returns the filename and unique package id for an import +// path based on package information provided by build.Import (using +// the build.Default build.Context). A relative srcDir is interpreted +// relative to the current working directory. +// If no file was found, an empty filename is returned. +func FindPkg(path, srcDir string) (filename, id string) { + if path == "" { + return + } + + var noext string + switch { + default: + // "x" -> "$GOPATH/pkg/$GOOS_$GOARCH/x.ext", "x" + // Don't require the source files to be present. + if abs, err := filepath.Abs(srcDir); err == nil { // see issue 14282 + srcDir = abs + } + bp, _ := build.Import(path, srcDir, build.FindOnly|build.AllowBinary) + if bp.PkgObj == "" { + var ok bool + if bp.Goroot && bp.Dir != "" { + filename, ok = lookupGorootExport(bp.Dir) + } + if !ok { + id = path // make sure we have an id to print in error message + return + } + } else { + noext = strings.TrimSuffix(bp.PkgObj, ".a") + id = bp.ImportPath + } + + case build.IsLocalImport(path): + // "./x" -> "/this/directory/x.ext", "/this/directory/x" + noext = filepath.Join(srcDir, path) + id = noext + + case filepath.IsAbs(path): + // for completeness only - go/build.Import + // does not support absolute imports + // "/x" -> "/x.ext", "/x" + noext = path + id = path + } + + if false { // for debugging + if path != id { + fmt.Printf("%s -> %s\n", path, id) + } + } + + if filename != "" { + if f, err := os.Stat(filename); err == nil && !f.IsDir() { + return + } + } + + // try extensions + for _, ext := range pkgExts { + filename = noext + ext + if f, err := os.Stat(filename); err == nil && !f.IsDir() { + return + } + } + + filename = "" // not found + return +} + +// Import imports a gc-generated package given its import path and srcDir, adds +// the corresponding package object to the packages map, and returns the object. +// The packages map must contain all packages already imported. +func Import(packages map[string]*types.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types.Package, err error) { + var rc io.ReadCloser + var filename, id string + if lookup != nil { + // With custom lookup specified, assume that caller has + // converted path to a canonical import path for use in the map. + if path == "unsafe" { + return types.Unsafe, nil + } + id = path + + // No need to re-import if the package was imported completely before. + if pkg = packages[id]; pkg != nil && pkg.Complete() { + return + } + f, err := lookup(path) + if err != nil { + return nil, err + } + rc = f + } else { + filename, id = FindPkg(path, srcDir) + if filename == "" { + if path == "unsafe" { + return types.Unsafe, nil + } + return nil, fmt.Errorf("can't find import: %q", id) + } + + // no need to re-import if the package was imported completely before + if pkg = packages[id]; pkg != nil && pkg.Complete() { + return + } + + // open file + f, err := os.Open(filename) + if err != nil { + return nil, err + } + defer func() { + if err != nil { + // add file name to error + err = fmt.Errorf("%s: %v", filename, err) + } + }() + rc = f + } + defer rc.Close() + + var hdr string + var size int64 + buf := bufio.NewReader(rc) + if hdr, size, err = FindExportData(buf); err != nil { + return + } + + switch hdr { + case "$$B\n": + var data []byte + data, err = ioutil.ReadAll(buf) + if err != nil { + break + } + + // TODO(gri): allow clients of go/importer to provide a FileSet. + // Or, define a new standard go/types/gcexportdata package. + fset := token.NewFileSet() + + // The indexed export format starts with an 'i'; the older + // binary export format starts with a 'c', 'd', or 'v' + // (from "version"). Select appropriate importer. + if len(data) > 0 { + switch data[0] { + case 'i': + _, pkg, err := IImportData(fset, packages, data[1:], id) + return pkg, err + + case 'v', 'c', 'd': + _, pkg, err := BImportData(fset, packages, data, id) + return pkg, err + + case 'u': + _, pkg, err := UImportData(fset, packages, data[1:size], id) + return pkg, err + + default: + l := len(data) + if l > 10 { + l = 10 + } + return nil, fmt.Errorf("unexpected export data with prefix %q for path %s", string(data[:l]), id) + } + } + + default: + err = fmt.Errorf("unknown export data header: %q", hdr) + } + + return +} + +func deref(typ types.Type) types.Type { + if p, _ := typ.(*types.Pointer); p != nil { + return p.Elem() + } + return typ +} + +type byPath []*types.Package + +func (a byPath) Len() int { return len(a) } +func (a byPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byPath) Less(i, j int) bool { return a[i].Path() < a[j].Path() } diff --git a/cluster-autoscaler/vendor/golang.org/x/tools/internal/gcimporter/iexport.go b/cluster-autoscaler/vendor/golang.org/x/tools/internal/gcimporter/iexport.go new file mode 100644 index 000000000000..a0dc0b5e27dd --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/tools/internal/gcimporter/iexport.go @@ -0,0 +1,1180 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Indexed binary package export. +// This file was derived from $GOROOT/src/cmd/compile/internal/gc/iexport.go; +// see that file for specification of the format. + +package gcimporter + +import ( + "bytes" + "encoding/binary" + "fmt" + "go/constant" + "go/token" + "go/types" + "io" + "math/big" + "reflect" + "sort" + "strconv" + "strings" + + "golang.org/x/tools/internal/tokeninternal" + "golang.org/x/tools/internal/typeparams" +) + +// IExportShallow encodes "shallow" export data for the specified package. +// +// No promises are made about the encoding other than that it can be +// decoded by the same version of IIExportShallow. If you plan to save +// export data in the file system, be sure to include a cryptographic +// digest of the executable in the key to avoid version skew. +func IExportShallow(fset *token.FileSet, pkg *types.Package) ([]byte, error) { + // In principle this operation can only fail if out.Write fails, + // but that's impossible for bytes.Buffer---and as a matter of + // fact iexportCommon doesn't even check for I/O errors. + // TODO(adonovan): handle I/O errors properly. + // TODO(adonovan): use byte slices throughout, avoiding copying. + const bundle, shallow = false, true + var out bytes.Buffer + err := iexportCommon(&out, fset, bundle, shallow, iexportVersion, []*types.Package{pkg}) + return out.Bytes(), err +} + +// IImportShallow decodes "shallow" types.Package data encoded by +// IExportShallow in the same executable. This function cannot import data from +// cmd/compile or gcexportdata.Write. +func IImportShallow(fset *token.FileSet, getPackage GetPackageFunc, data []byte, path string, insert InsertType) (*types.Package, error) { + const bundle = false + pkgs, err := iimportCommon(fset, getPackage, data, bundle, path, insert) + if err != nil { + return nil, err + } + return pkgs[0], nil +} + +// InsertType is the type of a function that creates a types.TypeName +// object for a named type and inserts it into the scope of the +// specified Package. +type InsertType = func(pkg *types.Package, name string) + +// Current bundled export format version. Increase with each format change. +// 0: initial implementation +const bundleVersion = 0 + +// IExportData writes indexed export data for pkg to out. +// +// If no file set is provided, position info will be missing. +// The package path of the top-level package will not be recorded, +// so that calls to IImportData can override with a provided package path. +func IExportData(out io.Writer, fset *token.FileSet, pkg *types.Package) error { + const bundle, shallow = false, false + return iexportCommon(out, fset, bundle, shallow, iexportVersion, []*types.Package{pkg}) +} + +// IExportBundle writes an indexed export bundle for pkgs to out. +func IExportBundle(out io.Writer, fset *token.FileSet, pkgs []*types.Package) error { + const bundle, shallow = true, false + return iexportCommon(out, fset, bundle, shallow, iexportVersion, pkgs) +} + +func iexportCommon(out io.Writer, fset *token.FileSet, bundle, shallow bool, version int, pkgs []*types.Package) (err error) { + if !debug { + defer func() { + if e := recover(); e != nil { + if ierr, ok := e.(internalError); ok { + err = ierr + return + } + // Not an internal error; panic again. + panic(e) + } + }() + } + + p := iexporter{ + fset: fset, + version: version, + shallow: shallow, + allPkgs: map[*types.Package]bool{}, + stringIndex: map[string]uint64{}, + declIndex: map[types.Object]uint64{}, + tparamNames: map[types.Object]string{}, + typIndex: map[types.Type]uint64{}, + } + if !bundle { + p.localpkg = pkgs[0] + } + + for i, pt := range predeclared() { + p.typIndex[pt] = uint64(i) + } + if len(p.typIndex) > predeclReserved { + panic(internalErrorf("too many predeclared types: %d > %d", len(p.typIndex), predeclReserved)) + } + + // Initialize work queue with exported declarations. + for _, pkg := range pkgs { + scope := pkg.Scope() + for _, name := range scope.Names() { + if token.IsExported(name) { + p.pushDecl(scope.Lookup(name)) + } + } + + if bundle { + // Ensure pkg and its imports are included in the index. + p.allPkgs[pkg] = true + for _, imp := range pkg.Imports() { + p.allPkgs[imp] = true + } + } + } + + // Loop until no more work. + for !p.declTodo.empty() { + p.doDecl(p.declTodo.popHead()) + } + + // Produce index of offset of each file record in files. + var files intWriter + var fileOffset []uint64 // fileOffset[i] is offset in files of file encoded as i + if p.shallow { + fileOffset = make([]uint64, len(p.fileInfos)) + for i, info := range p.fileInfos { + fileOffset[i] = uint64(files.Len()) + p.encodeFile(&files, info.file, info.needed) + } + } + + // Append indices to data0 section. + dataLen := uint64(p.data0.Len()) + w := p.newWriter() + w.writeIndex(p.declIndex) + + if bundle { + w.uint64(uint64(len(pkgs))) + for _, pkg := range pkgs { + w.pkg(pkg) + imps := pkg.Imports() + w.uint64(uint64(len(imps))) + for _, imp := range imps { + w.pkg(imp) + } + } + } + w.flush() + + // Assemble header. + var hdr intWriter + if bundle { + hdr.uint64(bundleVersion) + } + hdr.uint64(uint64(p.version)) + hdr.uint64(uint64(p.strings.Len())) + if p.shallow { + hdr.uint64(uint64(files.Len())) + hdr.uint64(uint64(len(fileOffset))) + for _, offset := range fileOffset { + hdr.uint64(offset) + } + } + hdr.uint64(dataLen) + + // Flush output. + io.Copy(out, &hdr) + io.Copy(out, &p.strings) + if p.shallow { + io.Copy(out, &files) + } + io.Copy(out, &p.data0) + + return nil +} + +// encodeFile writes to w a representation of the file sufficient to +// faithfully restore position information about all needed offsets. +// Mutates the needed array. +func (p *iexporter) encodeFile(w *intWriter, file *token.File, needed []uint64) { + _ = needed[0] // precondition: needed is non-empty + + w.uint64(p.stringOff(file.Name())) + + size := uint64(file.Size()) + w.uint64(size) + + // Sort the set of needed offsets. Duplicates are harmless. + sort.Slice(needed, func(i, j int) bool { return needed[i] < needed[j] }) + + lines := tokeninternal.GetLines(file) // byte offset of each line start + w.uint64(uint64(len(lines))) + + // Rather than record the entire array of line start offsets, + // we save only a sparse list of (index, offset) pairs for + // the start of each line that contains a needed position. + var sparse [][2]int // (index, offset) pairs +outer: + for i, lineStart := range lines { + lineEnd := size + if i < len(lines)-1 { + lineEnd = uint64(lines[i+1]) + } + // Does this line contains a needed offset? + if needed[0] < lineEnd { + sparse = append(sparse, [2]int{i, lineStart}) + for needed[0] < lineEnd { + needed = needed[1:] + if len(needed) == 0 { + break outer + } + } + } + } + + // Delta-encode the columns. + w.uint64(uint64(len(sparse))) + var prev [2]int + for _, pair := range sparse { + w.uint64(uint64(pair[0] - prev[0])) + w.uint64(uint64(pair[1] - prev[1])) + prev = pair + } +} + +// writeIndex writes out an object index. mainIndex indicates whether +// we're writing out the main index, which is also read by +// non-compiler tools and includes a complete package description +// (i.e., name and height). +func (w *exportWriter) writeIndex(index map[types.Object]uint64) { + type pkgObj struct { + obj types.Object + name string // qualified name; differs from obj.Name for type params + } + // Build a map from packages to objects from that package. + pkgObjs := map[*types.Package][]pkgObj{} + + // For the main index, make sure to include every package that + // we reference, even if we're not exporting (or reexporting) + // any symbols from it. + if w.p.localpkg != nil { + pkgObjs[w.p.localpkg] = nil + } + for pkg := range w.p.allPkgs { + pkgObjs[pkg] = nil + } + + for obj := range index { + name := w.p.exportName(obj) + pkgObjs[obj.Pkg()] = append(pkgObjs[obj.Pkg()], pkgObj{obj, name}) + } + + var pkgs []*types.Package + for pkg, objs := range pkgObjs { + pkgs = append(pkgs, pkg) + + sort.Slice(objs, func(i, j int) bool { + return objs[i].name < objs[j].name + }) + } + + sort.Slice(pkgs, func(i, j int) bool { + return w.exportPath(pkgs[i]) < w.exportPath(pkgs[j]) + }) + + w.uint64(uint64(len(pkgs))) + for _, pkg := range pkgs { + w.string(w.exportPath(pkg)) + w.string(pkg.Name()) + w.uint64(uint64(0)) // package height is not needed for go/types + + objs := pkgObjs[pkg] + w.uint64(uint64(len(objs))) + for _, obj := range objs { + w.string(obj.name) + w.uint64(index[obj.obj]) + } + } +} + +// exportName returns the 'exported' name of an object. It differs from +// obj.Name() only for type parameters (see tparamExportName for details). +func (p *iexporter) exportName(obj types.Object) (res string) { + if name := p.tparamNames[obj]; name != "" { + return name + } + return obj.Name() +} + +type iexporter struct { + fset *token.FileSet + out *bytes.Buffer + version int + + shallow bool // don't put types from other packages in the index + localpkg *types.Package // (nil in bundle mode) + + // allPkgs tracks all packages that have been referenced by + // the export data, so we can ensure to include them in the + // main index. + allPkgs map[*types.Package]bool + + declTodo objQueue + + strings intWriter + stringIndex map[string]uint64 + + // In shallow mode, object positions are encoded as (file, offset). + // Each file is recorded as a line-number table. + // Only the lines of needed positions are saved faithfully. + fileInfo map[*token.File]uint64 // value is index in fileInfos + fileInfos []*filePositions + + data0 intWriter + declIndex map[types.Object]uint64 + tparamNames map[types.Object]string // typeparam->exported name + typIndex map[types.Type]uint64 + + indent int // for tracing support +} + +type filePositions struct { + file *token.File + needed []uint64 // unordered list of needed file offsets +} + +func (p *iexporter) trace(format string, args ...interface{}) { + if !trace { + // Call sites should also be guarded, but having this check here allows + // easily enabling/disabling debug trace statements. + return + } + fmt.Printf(strings.Repeat("..", p.indent)+format+"\n", args...) +} + +// stringOff returns the offset of s within the string section. +// If not already present, it's added to the end. +func (p *iexporter) stringOff(s string) uint64 { + off, ok := p.stringIndex[s] + if !ok { + off = uint64(p.strings.Len()) + p.stringIndex[s] = off + + p.strings.uint64(uint64(len(s))) + p.strings.WriteString(s) + } + return off +} + +// fileIndexAndOffset returns the index of the token.File and the byte offset of pos within it. +func (p *iexporter) fileIndexAndOffset(file *token.File, pos token.Pos) (uint64, uint64) { + index, ok := p.fileInfo[file] + if !ok { + index = uint64(len(p.fileInfo)) + p.fileInfos = append(p.fileInfos, &filePositions{file: file}) + if p.fileInfo == nil { + p.fileInfo = make(map[*token.File]uint64) + } + p.fileInfo[file] = index + } + // Record each needed offset. + info := p.fileInfos[index] + offset := uint64(file.Offset(pos)) + info.needed = append(info.needed, offset) + + return index, offset +} + +// pushDecl adds n to the declaration work queue, if not already present. +func (p *iexporter) pushDecl(obj types.Object) { + // Package unsafe is known to the compiler and predeclared. + // Caller should not ask us to do export it. + if obj.Pkg() == types.Unsafe { + panic("cannot export package unsafe") + } + + // Shallow export data: don't index decls from other packages. + if p.shallow && obj.Pkg() != p.localpkg { + return + } + + if _, ok := p.declIndex[obj]; ok { + return + } + + p.declIndex[obj] = ^uint64(0) // mark obj present in work queue + p.declTodo.pushTail(obj) +} + +// exportWriter handles writing out individual data section chunks. +type exportWriter struct { + p *iexporter + + data intWriter + currPkg *types.Package + prevFile string + prevLine int64 + prevColumn int64 +} + +func (w *exportWriter) exportPath(pkg *types.Package) string { + if pkg == w.p.localpkg { + return "" + } + return pkg.Path() +} + +func (p *iexporter) doDecl(obj types.Object) { + if trace { + p.trace("exporting decl %v (%T)", obj, obj) + p.indent++ + defer func() { + p.indent-- + p.trace("=> %s", obj) + }() + } + w := p.newWriter() + w.setPkg(obj.Pkg(), false) + + switch obj := obj.(type) { + case *types.Var: + w.tag('V') + w.pos(obj.Pos()) + w.typ(obj.Type(), obj.Pkg()) + + case *types.Func: + sig, _ := obj.Type().(*types.Signature) + if sig.Recv() != nil { + // We shouldn't see methods in the package scope, + // but the type checker may repair "func () F() {}" + // to "func (Invalid) F()" and then treat it like "func F()", + // so allow that. See golang/go#57729. + if sig.Recv().Type() != types.Typ[types.Invalid] { + panic(internalErrorf("unexpected method: %v", sig)) + } + } + + // Function. + if typeparams.ForSignature(sig).Len() == 0 { + w.tag('F') + } else { + w.tag('G') + } + w.pos(obj.Pos()) + // The tparam list of the function type is the declaration of the type + // params. So, write out the type params right now. Then those type params + // will be referenced via their type offset (via typOff) in all other + // places in the signature and function where they are used. + // + // While importing the type parameters, tparamList computes and records + // their export name, so that it can be later used when writing the index. + if tparams := typeparams.ForSignature(sig); tparams.Len() > 0 { + w.tparamList(obj.Name(), tparams, obj.Pkg()) + } + w.signature(sig) + + case *types.Const: + w.tag('C') + w.pos(obj.Pos()) + w.value(obj.Type(), obj.Val()) + + case *types.TypeName: + t := obj.Type() + + if tparam, ok := t.(*typeparams.TypeParam); ok { + w.tag('P') + w.pos(obj.Pos()) + constraint := tparam.Constraint() + if p.version >= iexportVersionGo1_18 { + implicit := false + if iface, _ := constraint.(*types.Interface); iface != nil { + implicit = typeparams.IsImplicit(iface) + } + w.bool(implicit) + } + w.typ(constraint, obj.Pkg()) + break + } + + if obj.IsAlias() { + w.tag('A') + w.pos(obj.Pos()) + w.typ(t, obj.Pkg()) + break + } + + // Defined type. + named, ok := t.(*types.Named) + if !ok { + panic(internalErrorf("%s is not a defined type", t)) + } + + if typeparams.ForNamed(named).Len() == 0 { + w.tag('T') + } else { + w.tag('U') + } + w.pos(obj.Pos()) + + if typeparams.ForNamed(named).Len() > 0 { + // While importing the type parameters, tparamList computes and records + // their export name, so that it can be later used when writing the index. + w.tparamList(obj.Name(), typeparams.ForNamed(named), obj.Pkg()) + } + + underlying := obj.Type().Underlying() + w.typ(underlying, obj.Pkg()) + + if types.IsInterface(t) { + break + } + + n := named.NumMethods() + w.uint64(uint64(n)) + for i := 0; i < n; i++ { + m := named.Method(i) + w.pos(m.Pos()) + w.string(m.Name()) + sig, _ := m.Type().(*types.Signature) + + // Receiver type parameters are type arguments of the receiver type, so + // their name must be qualified before exporting recv. + if rparams := typeparams.RecvTypeParams(sig); rparams.Len() > 0 { + prefix := obj.Name() + "." + m.Name() + for i := 0; i < rparams.Len(); i++ { + rparam := rparams.At(i) + name := tparamExportName(prefix, rparam) + w.p.tparamNames[rparam.Obj()] = name + } + } + w.param(sig.Recv()) + w.signature(sig) + } + + default: + panic(internalErrorf("unexpected object: %v", obj)) + } + + p.declIndex[obj] = w.flush() +} + +func (w *exportWriter) tag(tag byte) { + w.data.WriteByte(tag) +} + +func (w *exportWriter) pos(pos token.Pos) { + if w.p.shallow { + w.posV2(pos) + } else if w.p.version >= iexportVersionPosCol { + w.posV1(pos) + } else { + w.posV0(pos) + } +} + +// posV2 encoding (used only in shallow mode) records positions as +// (file, offset), where file is the index in the token.File table +// (which records the file name and newline offsets) and offset is a +// byte offset. It effectively ignores //line directives. +func (w *exportWriter) posV2(pos token.Pos) { + if pos == token.NoPos { + w.uint64(0) + return + } + file := w.p.fset.File(pos) // fset must be non-nil + index, offset := w.p.fileIndexAndOffset(file, pos) + w.uint64(1 + index) + w.uint64(offset) +} + +func (w *exportWriter) posV1(pos token.Pos) { + if w.p.fset == nil { + w.int64(0) + return + } + + p := w.p.fset.Position(pos) + file := p.Filename + line := int64(p.Line) + column := int64(p.Column) + + deltaColumn := (column - w.prevColumn) << 1 + deltaLine := (line - w.prevLine) << 1 + + if file != w.prevFile { + deltaLine |= 1 + } + if deltaLine != 0 { + deltaColumn |= 1 + } + + w.int64(deltaColumn) + if deltaColumn&1 != 0 { + w.int64(deltaLine) + if deltaLine&1 != 0 { + w.string(file) + } + } + + w.prevFile = file + w.prevLine = line + w.prevColumn = column +} + +func (w *exportWriter) posV0(pos token.Pos) { + if w.p.fset == nil { + w.int64(0) + return + } + + p := w.p.fset.Position(pos) + file := p.Filename + line := int64(p.Line) + + // When file is the same as the last position (common case), + // we can save a few bytes by delta encoding just the line + // number. + // + // Note: Because data objects may be read out of order (or not + // at all), we can only apply delta encoding within a single + // object. This is handled implicitly by tracking prevFile and + // prevLine as fields of exportWriter. + + if file == w.prevFile { + delta := line - w.prevLine + w.int64(delta) + if delta == deltaNewFile { + w.int64(-1) + } + } else { + w.int64(deltaNewFile) + w.int64(line) // line >= 0 + w.string(file) + w.prevFile = file + } + w.prevLine = line +} + +func (w *exportWriter) pkg(pkg *types.Package) { + // Ensure any referenced packages are declared in the main index. + w.p.allPkgs[pkg] = true + + w.string(w.exportPath(pkg)) +} + +func (w *exportWriter) qualifiedType(obj *types.TypeName) { + name := w.p.exportName(obj) + + // Ensure any referenced declarations are written out too. + w.p.pushDecl(obj) + w.string(name) + w.pkg(obj.Pkg()) +} + +func (w *exportWriter) typ(t types.Type, pkg *types.Package) { + w.data.uint64(w.p.typOff(t, pkg)) +} + +func (p *iexporter) newWriter() *exportWriter { + return &exportWriter{p: p} +} + +func (w *exportWriter) flush() uint64 { + off := uint64(w.p.data0.Len()) + io.Copy(&w.p.data0, &w.data) + return off +} + +func (p *iexporter) typOff(t types.Type, pkg *types.Package) uint64 { + off, ok := p.typIndex[t] + if !ok { + w := p.newWriter() + w.doTyp(t, pkg) + off = predeclReserved + w.flush() + p.typIndex[t] = off + } + return off +} + +func (w *exportWriter) startType(k itag) { + w.data.uint64(uint64(k)) +} + +func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { + if trace { + w.p.trace("exporting type %s (%T)", t, t) + w.p.indent++ + defer func() { + w.p.indent-- + w.p.trace("=> %s", t) + }() + } + switch t := t.(type) { + case *types.Named: + if targs := typeparams.NamedTypeArgs(t); targs.Len() > 0 { + w.startType(instanceType) + // TODO(rfindley): investigate if this position is correct, and if it + // matters. + w.pos(t.Obj().Pos()) + w.typeList(targs, pkg) + w.typ(typeparams.NamedTypeOrigin(t), pkg) + return + } + w.startType(definedType) + w.qualifiedType(t.Obj()) + + case *typeparams.TypeParam: + w.startType(typeParamType) + w.qualifiedType(t.Obj()) + + case *types.Pointer: + w.startType(pointerType) + w.typ(t.Elem(), pkg) + + case *types.Slice: + w.startType(sliceType) + w.typ(t.Elem(), pkg) + + case *types.Array: + w.startType(arrayType) + w.uint64(uint64(t.Len())) + w.typ(t.Elem(), pkg) + + case *types.Chan: + w.startType(chanType) + // 1 RecvOnly; 2 SendOnly; 3 SendRecv + var dir uint64 + switch t.Dir() { + case types.RecvOnly: + dir = 1 + case types.SendOnly: + dir = 2 + case types.SendRecv: + dir = 3 + } + w.uint64(dir) + w.typ(t.Elem(), pkg) + + case *types.Map: + w.startType(mapType) + w.typ(t.Key(), pkg) + w.typ(t.Elem(), pkg) + + case *types.Signature: + w.startType(signatureType) + w.setPkg(pkg, true) + w.signature(t) + + case *types.Struct: + w.startType(structType) + n := t.NumFields() + if n > 0 { + w.setPkg(t.Field(0).Pkg(), true) // qualifying package for field objects + } else { + w.setPkg(pkg, true) + } + w.uint64(uint64(n)) + for i := 0; i < n; i++ { + f := t.Field(i) + w.pos(f.Pos()) + w.string(f.Name()) // unexported fields implicitly qualified by prior setPkg + w.typ(f.Type(), pkg) + w.bool(f.Anonymous()) + w.string(t.Tag(i)) // note (or tag) + } + + case *types.Interface: + w.startType(interfaceType) + w.setPkg(pkg, true) + + n := t.NumEmbeddeds() + w.uint64(uint64(n)) + for i := 0; i < n; i++ { + ft := t.EmbeddedType(i) + tPkg := pkg + if named, _ := ft.(*types.Named); named != nil { + w.pos(named.Obj().Pos()) + } else { + w.pos(token.NoPos) + } + w.typ(ft, tPkg) + } + + n = t.NumExplicitMethods() + w.uint64(uint64(n)) + for i := 0; i < n; i++ { + m := t.ExplicitMethod(i) + w.pos(m.Pos()) + w.string(m.Name()) + sig, _ := m.Type().(*types.Signature) + w.signature(sig) + } + + case *typeparams.Union: + w.startType(unionType) + nt := t.Len() + w.uint64(uint64(nt)) + for i := 0; i < nt; i++ { + term := t.Term(i) + w.bool(term.Tilde()) + w.typ(term.Type(), pkg) + } + + default: + panic(internalErrorf("unexpected type: %v, %v", t, reflect.TypeOf(t))) + } +} + +func (w *exportWriter) setPkg(pkg *types.Package, write bool) { + if write { + w.pkg(pkg) + } + + w.currPkg = pkg +} + +func (w *exportWriter) signature(sig *types.Signature) { + w.paramList(sig.Params()) + w.paramList(sig.Results()) + if sig.Params().Len() > 0 { + w.bool(sig.Variadic()) + } +} + +func (w *exportWriter) typeList(ts *typeparams.TypeList, pkg *types.Package) { + w.uint64(uint64(ts.Len())) + for i := 0; i < ts.Len(); i++ { + w.typ(ts.At(i), pkg) + } +} + +func (w *exportWriter) tparamList(prefix string, list *typeparams.TypeParamList, pkg *types.Package) { + ll := uint64(list.Len()) + w.uint64(ll) + for i := 0; i < list.Len(); i++ { + tparam := list.At(i) + // Set the type parameter exportName before exporting its type. + exportName := tparamExportName(prefix, tparam) + w.p.tparamNames[tparam.Obj()] = exportName + w.typ(list.At(i), pkg) + } +} + +const blankMarker = "$" + +// tparamExportName returns the 'exported' name of a type parameter, which +// differs from its actual object name: it is prefixed with a qualifier, and +// blank type parameter names are disambiguated by their index in the type +// parameter list. +func tparamExportName(prefix string, tparam *typeparams.TypeParam) string { + assert(prefix != "") + name := tparam.Obj().Name() + if name == "_" { + name = blankMarker + strconv.Itoa(tparam.Index()) + } + return prefix + "." + name +} + +// tparamName returns the real name of a type parameter, after stripping its +// qualifying prefix and reverting blank-name encoding. See tparamExportName +// for details. +func tparamName(exportName string) string { + // Remove the "path" from the type param name that makes it unique. + ix := strings.LastIndex(exportName, ".") + if ix < 0 { + errorf("malformed type parameter export name %s: missing prefix", exportName) + } + name := exportName[ix+1:] + if strings.HasPrefix(name, blankMarker) { + return "_" + } + return name +} + +func (w *exportWriter) paramList(tup *types.Tuple) { + n := tup.Len() + w.uint64(uint64(n)) + for i := 0; i < n; i++ { + w.param(tup.At(i)) + } +} + +func (w *exportWriter) param(obj types.Object) { + w.pos(obj.Pos()) + w.localIdent(obj) + w.typ(obj.Type(), obj.Pkg()) +} + +func (w *exportWriter) value(typ types.Type, v constant.Value) { + w.typ(typ, nil) + if w.p.version >= iexportVersionGo1_18 { + w.int64(int64(v.Kind())) + } + + switch b := typ.Underlying().(*types.Basic); b.Info() & types.IsConstType { + case types.IsBoolean: + w.bool(constant.BoolVal(v)) + case types.IsInteger: + var i big.Int + if i64, exact := constant.Int64Val(v); exact { + i.SetInt64(i64) + } else if ui64, exact := constant.Uint64Val(v); exact { + i.SetUint64(ui64) + } else { + i.SetString(v.ExactString(), 10) + } + w.mpint(&i, typ) + case types.IsFloat: + f := constantToFloat(v) + w.mpfloat(f, typ) + case types.IsComplex: + w.mpfloat(constantToFloat(constant.Real(v)), typ) + w.mpfloat(constantToFloat(constant.Imag(v)), typ) + case types.IsString: + w.string(constant.StringVal(v)) + default: + if b.Kind() == types.Invalid { + // package contains type errors + break + } + panic(internalErrorf("unexpected type %v (%v)", typ, typ.Underlying())) + } +} + +// constantToFloat converts a constant.Value with kind constant.Float to a +// big.Float. +func constantToFloat(x constant.Value) *big.Float { + x = constant.ToFloat(x) + // Use the same floating-point precision (512) as cmd/compile + // (see Mpprec in cmd/compile/internal/gc/mpfloat.go). + const mpprec = 512 + var f big.Float + f.SetPrec(mpprec) + if v, exact := constant.Float64Val(x); exact { + // float64 + f.SetFloat64(v) + } else if num, denom := constant.Num(x), constant.Denom(x); num.Kind() == constant.Int { + // TODO(gri): add big.Rat accessor to constant.Value. + n := valueToRat(num) + d := valueToRat(denom) + f.SetRat(n.Quo(n, d)) + } else { + // Value too large to represent as a fraction => inaccessible. + // TODO(gri): add big.Float accessor to constant.Value. + _, ok := f.SetString(x.ExactString()) + assert(ok) + } + return &f +} + +// mpint exports a multi-precision integer. +// +// For unsigned types, small values are written out as a single +// byte. Larger values are written out as a length-prefixed big-endian +// byte string, where the length prefix is encoded as its complement. +// For example, bytes 0, 1, and 2 directly represent the integer +// values 0, 1, and 2; while bytes 255, 254, and 253 indicate a 1-, +// 2-, and 3-byte big-endian string follow. +// +// Encoding for signed types use the same general approach as for +// unsigned types, except small values use zig-zag encoding and the +// bottom bit of length prefix byte for large values is reserved as a +// sign bit. +// +// The exact boundary between small and large encodings varies +// according to the maximum number of bytes needed to encode a value +// of type typ. As a special case, 8-bit types are always encoded as a +// single byte. +// +// TODO(mdempsky): Is this level of complexity really worthwhile? +func (w *exportWriter) mpint(x *big.Int, typ types.Type) { + basic, ok := typ.Underlying().(*types.Basic) + if !ok { + panic(internalErrorf("unexpected type %v (%T)", typ.Underlying(), typ.Underlying())) + } + + signed, maxBytes := intSize(basic) + + negative := x.Sign() < 0 + if !signed && negative { + panic(internalErrorf("negative unsigned integer; type %v, value %v", typ, x)) + } + + b := x.Bytes() + if len(b) > 0 && b[0] == 0 { + panic(internalErrorf("leading zeros")) + } + if uint(len(b)) > maxBytes { + panic(internalErrorf("bad mpint length: %d > %d (type %v, value %v)", len(b), maxBytes, typ, x)) + } + + maxSmall := 256 - maxBytes + if signed { + maxSmall = 256 - 2*maxBytes + } + if maxBytes == 1 { + maxSmall = 256 + } + + // Check if x can use small value encoding. + if len(b) <= 1 { + var ux uint + if len(b) == 1 { + ux = uint(b[0]) + } + if signed { + ux <<= 1 + if negative { + ux-- + } + } + if ux < maxSmall { + w.data.WriteByte(byte(ux)) + return + } + } + + n := 256 - uint(len(b)) + if signed { + n = 256 - 2*uint(len(b)) + if negative { + n |= 1 + } + } + if n < maxSmall || n >= 256 { + panic(internalErrorf("encoding mistake: %d, %v, %v => %d", len(b), signed, negative, n)) + } + + w.data.WriteByte(byte(n)) + w.data.Write(b) +} + +// mpfloat exports a multi-precision floating point number. +// +// The number's value is decomposed into mantissa × 2**exponent, where +// mantissa is an integer. The value is written out as mantissa (as a +// multi-precision integer) and then the exponent, except exponent is +// omitted if mantissa is zero. +func (w *exportWriter) mpfloat(f *big.Float, typ types.Type) { + if f.IsInf() { + panic("infinite constant") + } + + // Break into f = mant × 2**exp, with 0.5 <= mant < 1. + var mant big.Float + exp := int64(f.MantExp(&mant)) + + // Scale so that mant is an integer. + prec := mant.MinPrec() + mant.SetMantExp(&mant, int(prec)) + exp -= int64(prec) + + manti, acc := mant.Int(nil) + if acc != big.Exact { + panic(internalErrorf("mantissa scaling failed for %f (%s)", f, acc)) + } + w.mpint(manti, typ) + if manti.Sign() != 0 { + w.int64(exp) + } +} + +func (w *exportWriter) bool(b bool) bool { + var x uint64 + if b { + x = 1 + } + w.uint64(x) + return b +} + +func (w *exportWriter) int64(x int64) { w.data.int64(x) } +func (w *exportWriter) uint64(x uint64) { w.data.uint64(x) } +func (w *exportWriter) string(s string) { w.uint64(w.p.stringOff(s)) } + +func (w *exportWriter) localIdent(obj types.Object) { + // Anonymous parameters. + if obj == nil { + w.string("") + return + } + + name := obj.Name() + if name == "_" { + w.string("_") + return + } + + w.string(name) +} + +type intWriter struct { + bytes.Buffer +} + +func (w *intWriter) int64(x int64) { + var buf [binary.MaxVarintLen64]byte + n := binary.PutVarint(buf[:], x) + w.Write(buf[:n]) +} + +func (w *intWriter) uint64(x uint64) { + var buf [binary.MaxVarintLen64]byte + n := binary.PutUvarint(buf[:], x) + w.Write(buf[:n]) +} + +func assert(cond bool) { + if !cond { + panic("internal error: assertion failed") + } +} + +// The below is copied from go/src/cmd/compile/internal/gc/syntax.go. + +// objQueue is a FIFO queue of types.Object. The zero value of objQueue is +// a ready-to-use empty queue. +type objQueue struct { + ring []types.Object + head, tail int +} + +// empty returns true if q contains no Nodes. +func (q *objQueue) empty() bool { + return q.head == q.tail +} + +// pushTail appends n to the tail of the queue. +func (q *objQueue) pushTail(obj types.Object) { + if len(q.ring) == 0 { + q.ring = make([]types.Object, 16) + } else if q.head+len(q.ring) == q.tail { + // Grow the ring. + nring := make([]types.Object, len(q.ring)*2) + // Copy the old elements. + part := q.ring[q.head%len(q.ring):] + if q.tail-q.head <= len(part) { + part = part[:q.tail-q.head] + copy(nring, part) + } else { + pos := copy(nring, part) + copy(nring[pos:], q.ring[:q.tail%len(q.ring)]) + } + q.ring, q.head, q.tail = nring, 0, q.tail-q.head + } + + q.ring[q.tail%len(q.ring)] = obj + q.tail++ +} + +// popHead pops a node from the head of the queue. It panics if q is empty. +func (q *objQueue) popHead() types.Object { + if q.empty() { + panic("dequeue empty") + } + obj := q.ring[q.head%len(q.ring)] + q.head++ + return obj +} diff --git a/cluster-autoscaler/vendor/golang.org/x/tools/internal/gcimporter/iimport.go b/cluster-autoscaler/vendor/golang.org/x/tools/internal/gcimporter/iimport.go new file mode 100644 index 000000000000..be6dace1534d --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/tools/internal/gcimporter/iimport.go @@ -0,0 +1,998 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Indexed package import. +// See cmd/compile/internal/gc/iexport.go for the export data format. + +// This file is a copy of $GOROOT/src/go/internal/gcimporter/iimport.go. + +package gcimporter + +import ( + "bytes" + "encoding/binary" + "fmt" + "go/constant" + "go/token" + "go/types" + "io" + "math/big" + "sort" + "strings" + + "golang.org/x/tools/internal/typeparams" +) + +type intReader struct { + *bytes.Reader + path string +} + +func (r *intReader) int64() int64 { + i, err := binary.ReadVarint(r.Reader) + if err != nil { + errorf("import %q: read varint error: %v", r.path, err) + } + return i +} + +func (r *intReader) uint64() uint64 { + i, err := binary.ReadUvarint(r.Reader) + if err != nil { + errorf("import %q: read varint error: %v", r.path, err) + } + return i +} + +// Keep this in sync with constants in iexport.go. +const ( + iexportVersionGo1_11 = 0 + iexportVersionPosCol = 1 + iexportVersionGo1_18 = 2 + iexportVersionGenerics = 2 + + iexportVersionCurrent = 2 +) + +type ident struct { + pkg *types.Package + name string +} + +const predeclReserved = 32 + +type itag uint64 + +const ( + // Types + definedType itag = iota + pointerType + sliceType + arrayType + chanType + mapType + signatureType + structType + interfaceType + typeParamType + instanceType + unionType +) + +// IImportData imports a package from the serialized package data +// and returns 0 and a reference to the package. +// If the export data version is not recognized or the format is otherwise +// compromised, an error is returned. +func IImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (int, *types.Package, error) { + pkgs, err := iimportCommon(fset, GetPackageFromMap(imports), data, false, path, nil) + if err != nil { + return 0, nil, err + } + return 0, pkgs[0], nil +} + +// IImportBundle imports a set of packages from the serialized package bundle. +func IImportBundle(fset *token.FileSet, imports map[string]*types.Package, data []byte) ([]*types.Package, error) { + return iimportCommon(fset, GetPackageFromMap(imports), data, true, "", nil) +} + +// A GetPackageFunc is a function that gets the package with the given path +// from the importer state, creating it (with the specified name) if necessary. +// It is an abstraction of the map historically used to memoize package creation. +// +// Two calls with the same path must return the same package. +// +// If the given getPackage func returns nil, the import will fail. +type GetPackageFunc = func(path, name string) *types.Package + +// GetPackageFromMap returns a GetPackageFunc that retrieves packages from the +// given map of package path -> package. +// +// The resulting func may mutate m: if a requested package is not found, a new +// package will be inserted into m. +func GetPackageFromMap(m map[string]*types.Package) GetPackageFunc { + return func(path, name string) *types.Package { + if _, ok := m[path]; !ok { + m[path] = types.NewPackage(path, name) + } + return m[path] + } +} + +func iimportCommon(fset *token.FileSet, getPackage GetPackageFunc, data []byte, bundle bool, path string, insert InsertType) (pkgs []*types.Package, err error) { + const currentVersion = iexportVersionCurrent + version := int64(-1) + if !debug { + defer func() { + if e := recover(); e != nil { + if bundle { + err = fmt.Errorf("%v", e) + } else if version > currentVersion { + err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e) + } else { + err = fmt.Errorf("cannot import %q (%v), possibly version skew - reinstall package", path, e) + } + } + }() + } + + r := &intReader{bytes.NewReader(data), path} + + if bundle { + bundleVersion := r.uint64() + switch bundleVersion { + case bundleVersion: + default: + errorf("unknown bundle format version %d", bundleVersion) + } + } + + version = int64(r.uint64()) + switch version { + case iexportVersionGo1_18, iexportVersionPosCol, iexportVersionGo1_11: + default: + if version > iexportVersionGo1_18 { + errorf("unstable iexport format version %d, just rebuild compiler and std library", version) + } else { + errorf("unknown iexport format version %d", version) + } + } + + sLen := int64(r.uint64()) + var fLen int64 + var fileOffset []uint64 + if insert != nil { + // Shallow mode uses a different position encoding. + fLen = int64(r.uint64()) + fileOffset = make([]uint64, r.uint64()) + for i := range fileOffset { + fileOffset[i] = r.uint64() + } + } + dLen := int64(r.uint64()) + + whence, _ := r.Seek(0, io.SeekCurrent) + stringData := data[whence : whence+sLen] + fileData := data[whence+sLen : whence+sLen+fLen] + declData := data[whence+sLen+fLen : whence+sLen+fLen+dLen] + r.Seek(sLen+fLen+dLen, io.SeekCurrent) + + p := iimporter{ + version: int(version), + ipath: path, + insert: insert, + + stringData: stringData, + stringCache: make(map[uint64]string), + fileOffset: fileOffset, + fileData: fileData, + fileCache: make([]*token.File, len(fileOffset)), + pkgCache: make(map[uint64]*types.Package), + + declData: declData, + pkgIndex: make(map[*types.Package]map[string]uint64), + typCache: make(map[uint64]types.Type), + // Separate map for typeparams, keyed by their package and unique + // name. + tparamIndex: make(map[ident]types.Type), + + fake: fakeFileSet{ + fset: fset, + files: make(map[string]*fileInfo), + }, + } + defer p.fake.setLines() // set lines for files in fset + + for i, pt := range predeclared() { + p.typCache[uint64(i)] = pt + } + + pkgList := make([]*types.Package, r.uint64()) + for i := range pkgList { + pkgPathOff := r.uint64() + pkgPath := p.stringAt(pkgPathOff) + pkgName := p.stringAt(r.uint64()) + _ = r.uint64() // package height; unused by go/types + + if pkgPath == "" { + pkgPath = path + } + pkg := getPackage(pkgPath, pkgName) + if pkg == nil { + errorf("internal error: getPackage returned nil package for %s", pkgPath) + } else if pkg.Name() != pkgName { + errorf("conflicting names %s and %s for package %q", pkg.Name(), pkgName, path) + } + if i == 0 && !bundle { + p.localpkg = pkg + } + + p.pkgCache[pkgPathOff] = pkg + + // Read index for package. + nameIndex := make(map[string]uint64) + nSyms := r.uint64() + // In shallow mode we don't expect an index for other packages. + assert(nSyms == 0 || p.localpkg == pkg || p.insert == nil) + for ; nSyms > 0; nSyms-- { + name := p.stringAt(r.uint64()) + nameIndex[name] = r.uint64() + } + + p.pkgIndex[pkg] = nameIndex + pkgList[i] = pkg + } + + if bundle { + pkgs = make([]*types.Package, r.uint64()) + for i := range pkgs { + pkg := p.pkgAt(r.uint64()) + imps := make([]*types.Package, r.uint64()) + for j := range imps { + imps[j] = p.pkgAt(r.uint64()) + } + pkg.SetImports(imps) + pkgs[i] = pkg + } + } else { + if len(pkgList) == 0 { + errorf("no packages found for %s", path) + panic("unreachable") + } + pkgs = pkgList[:1] + + // record all referenced packages as imports + list := append(([]*types.Package)(nil), pkgList[1:]...) + sort.Sort(byPath(list)) + pkgs[0].SetImports(list) + } + + for _, pkg := range pkgs { + if pkg.Complete() { + continue + } + + names := make([]string, 0, len(p.pkgIndex[pkg])) + for name := range p.pkgIndex[pkg] { + names = append(names, name) + } + sort.Strings(names) + for _, name := range names { + p.doDecl(pkg, name) + } + + // package was imported completely and without errors + pkg.MarkComplete() + } + + // SetConstraint can't be called if the constraint type is not yet complete. + // When type params are created in the 'P' case of (*importReader).obj(), + // the associated constraint type may not be complete due to recursion. + // Therefore, we defer calling SetConstraint there, and call it here instead + // after all types are complete. + for _, d := range p.later { + typeparams.SetTypeParamConstraint(d.t, d.constraint) + } + + for _, typ := range p.interfaceList { + typ.Complete() + } + + return pkgs, nil +} + +type setConstraintArgs struct { + t *typeparams.TypeParam + constraint types.Type +} + +type iimporter struct { + version int + ipath string + + localpkg *types.Package + insert func(pkg *types.Package, name string) // "shallow" mode only + + stringData []byte + stringCache map[uint64]string + fileOffset []uint64 // fileOffset[i] is offset in fileData for info about file encoded as i + fileData []byte + fileCache []*token.File // memoized decoding of file encoded as i + pkgCache map[uint64]*types.Package + + declData []byte + pkgIndex map[*types.Package]map[string]uint64 + typCache map[uint64]types.Type + tparamIndex map[ident]types.Type + + fake fakeFileSet + interfaceList []*types.Interface + + // Arguments for calls to SetConstraint that are deferred due to recursive types + later []setConstraintArgs + + indent int // for tracing support +} + +func (p *iimporter) trace(format string, args ...interface{}) { + if !trace { + // Call sites should also be guarded, but having this check here allows + // easily enabling/disabling debug trace statements. + return + } + fmt.Printf(strings.Repeat("..", p.indent)+format+"\n", args...) +} + +func (p *iimporter) doDecl(pkg *types.Package, name string) { + if debug { + p.trace("import decl %s", name) + p.indent++ + defer func() { + p.indent-- + p.trace("=> %s", name) + }() + } + // See if we've already imported this declaration. + if obj := pkg.Scope().Lookup(name); obj != nil { + return + } + + off, ok := p.pkgIndex[pkg][name] + if !ok { + // In "shallow" mode, call back to the application to + // find the object and insert it into the package scope. + if p.insert != nil { + assert(pkg != p.localpkg) + p.insert(pkg, name) // "can't fail" + return + } + errorf("%v.%v not in index", pkg, name) + } + + r := &importReader{p: p, currPkg: pkg} + r.declReader.Reset(p.declData[off:]) + + r.obj(name) +} + +func (p *iimporter) stringAt(off uint64) string { + if s, ok := p.stringCache[off]; ok { + return s + } + + slen, n := binary.Uvarint(p.stringData[off:]) + if n <= 0 { + errorf("varint failed") + } + spos := off + uint64(n) + s := string(p.stringData[spos : spos+slen]) + p.stringCache[off] = s + return s +} + +func (p *iimporter) fileAt(index uint64) *token.File { + file := p.fileCache[index] + if file == nil { + off := p.fileOffset[index] + file = p.decodeFile(intReader{bytes.NewReader(p.fileData[off:]), p.ipath}) + p.fileCache[index] = file + } + return file +} + +func (p *iimporter) decodeFile(rd intReader) *token.File { + filename := p.stringAt(rd.uint64()) + size := int(rd.uint64()) + file := p.fake.fset.AddFile(filename, -1, size) + + // SetLines requires a nondecreasing sequence. + // Because it is common for clients to derive the interval + // [start, start+len(name)] from a start position, and we + // want to ensure that the end offset is on the same line, + // we fill in the gaps of the sparse encoding with values + // that strictly increase by the largest possible amount. + // This allows us to avoid having to record the actual end + // offset of each needed line. + + lines := make([]int, int(rd.uint64())) + var index, offset int + for i, n := 0, int(rd.uint64()); i < n; i++ { + index += int(rd.uint64()) + offset += int(rd.uint64()) + lines[index] = offset + + // Ensure monotonicity between points. + for j := index - 1; j > 0 && lines[j] == 0; j-- { + lines[j] = lines[j+1] - 1 + } + } + + // Ensure monotonicity after last point. + for j := len(lines) - 1; j > 0 && lines[j] == 0; j-- { + size-- + lines[j] = size + } + + if !file.SetLines(lines) { + errorf("SetLines failed: %d", lines) // can't happen + } + return file +} + +func (p *iimporter) pkgAt(off uint64) *types.Package { + if pkg, ok := p.pkgCache[off]; ok { + return pkg + } + path := p.stringAt(off) + errorf("missing package %q in %q", path, p.ipath) + return nil +} + +func (p *iimporter) typAt(off uint64, base *types.Named) types.Type { + if t, ok := p.typCache[off]; ok && canReuse(base, t) { + return t + } + + if off < predeclReserved { + errorf("predeclared type missing from cache: %v", off) + } + + r := &importReader{p: p} + r.declReader.Reset(p.declData[off-predeclReserved:]) + t := r.doType(base) + + if canReuse(base, t) { + p.typCache[off] = t + } + return t +} + +// canReuse reports whether the type rhs on the RHS of the declaration for def +// may be re-used. +// +// Specifically, if def is non-nil and rhs is an interface type with methods, it +// may not be re-used because we have a convention of setting the receiver type +// for interface methods to def. +func canReuse(def *types.Named, rhs types.Type) bool { + if def == nil { + return true + } + iface, _ := rhs.(*types.Interface) + if iface == nil { + return true + } + // Don't use iface.Empty() here as iface may not be complete. + return iface.NumEmbeddeds() == 0 && iface.NumExplicitMethods() == 0 +} + +type importReader struct { + p *iimporter + declReader bytes.Reader + currPkg *types.Package + prevFile string + prevLine int64 + prevColumn int64 +} + +func (r *importReader) obj(name string) { + tag := r.byte() + pos := r.pos() + + switch tag { + case 'A': + typ := r.typ() + + r.declare(types.NewTypeName(pos, r.currPkg, name, typ)) + + case 'C': + typ, val := r.value() + + r.declare(types.NewConst(pos, r.currPkg, name, typ, val)) + + case 'F', 'G': + var tparams []*typeparams.TypeParam + if tag == 'G' { + tparams = r.tparamList() + } + sig := r.signature(nil, nil, tparams) + r.declare(types.NewFunc(pos, r.currPkg, name, sig)) + + case 'T', 'U': + // Types can be recursive. We need to setup a stub + // declaration before recursing. + obj := types.NewTypeName(pos, r.currPkg, name, nil) + named := types.NewNamed(obj, nil, nil) + // Declare obj before calling r.tparamList, so the new type name is recognized + // if used in the constraint of one of its own typeparams (see #48280). + r.declare(obj) + if tag == 'U' { + tparams := r.tparamList() + typeparams.SetForNamed(named, tparams) + } + + underlying := r.p.typAt(r.uint64(), named).Underlying() + named.SetUnderlying(underlying) + + if !isInterface(underlying) { + for n := r.uint64(); n > 0; n-- { + mpos := r.pos() + mname := r.ident() + recv := r.param() + + // If the receiver has any targs, set those as the + // rparams of the method (since those are the + // typeparams being used in the method sig/body). + base := baseType(recv.Type()) + assert(base != nil) + targs := typeparams.NamedTypeArgs(base) + var rparams []*typeparams.TypeParam + if targs.Len() > 0 { + rparams = make([]*typeparams.TypeParam, targs.Len()) + for i := range rparams { + rparams[i] = targs.At(i).(*typeparams.TypeParam) + } + } + msig := r.signature(recv, rparams, nil) + + named.AddMethod(types.NewFunc(mpos, r.currPkg, mname, msig)) + } + } + + case 'P': + // We need to "declare" a typeparam in order to have a name that + // can be referenced recursively (if needed) in the type param's + // bound. + if r.p.version < iexportVersionGenerics { + errorf("unexpected type param type") + } + name0 := tparamName(name) + tn := types.NewTypeName(pos, r.currPkg, name0, nil) + t := typeparams.NewTypeParam(tn, nil) + + // To handle recursive references to the typeparam within its + // bound, save the partial type in tparamIndex before reading the bounds. + id := ident{r.currPkg, name} + r.p.tparamIndex[id] = t + var implicit bool + if r.p.version >= iexportVersionGo1_18 { + implicit = r.bool() + } + constraint := r.typ() + if implicit { + iface, _ := constraint.(*types.Interface) + if iface == nil { + errorf("non-interface constraint marked implicit") + } + typeparams.MarkImplicit(iface) + } + // The constraint type may not be complete, if we + // are in the middle of a type recursion involving type + // constraints. So, we defer SetConstraint until we have + // completely set up all types in ImportData. + r.p.later = append(r.p.later, setConstraintArgs{t: t, constraint: constraint}) + + case 'V': + typ := r.typ() + + r.declare(types.NewVar(pos, r.currPkg, name, typ)) + + default: + errorf("unexpected tag: %v", tag) + } +} + +func (r *importReader) declare(obj types.Object) { + obj.Pkg().Scope().Insert(obj) +} + +func (r *importReader) value() (typ types.Type, val constant.Value) { + typ = r.typ() + if r.p.version >= iexportVersionGo1_18 { + // TODO: add support for using the kind. + _ = constant.Kind(r.int64()) + } + + switch b := typ.Underlying().(*types.Basic); b.Info() & types.IsConstType { + case types.IsBoolean: + val = constant.MakeBool(r.bool()) + + case types.IsString: + val = constant.MakeString(r.string()) + + case types.IsInteger: + var x big.Int + r.mpint(&x, b) + val = constant.Make(&x) + + case types.IsFloat: + val = r.mpfloat(b) + + case types.IsComplex: + re := r.mpfloat(b) + im := r.mpfloat(b) + val = constant.BinaryOp(re, token.ADD, constant.MakeImag(im)) + + default: + if b.Kind() == types.Invalid { + val = constant.MakeUnknown() + return + } + errorf("unexpected type %v", typ) // panics + panic("unreachable") + } + + return +} + +func intSize(b *types.Basic) (signed bool, maxBytes uint) { + if (b.Info() & types.IsUntyped) != 0 { + return true, 64 + } + + switch b.Kind() { + case types.Float32, types.Complex64: + return true, 3 + case types.Float64, types.Complex128: + return true, 7 + } + + signed = (b.Info() & types.IsUnsigned) == 0 + switch b.Kind() { + case types.Int8, types.Uint8: + maxBytes = 1 + case types.Int16, types.Uint16: + maxBytes = 2 + case types.Int32, types.Uint32: + maxBytes = 4 + default: + maxBytes = 8 + } + + return +} + +func (r *importReader) mpint(x *big.Int, typ *types.Basic) { + signed, maxBytes := intSize(typ) + + maxSmall := 256 - maxBytes + if signed { + maxSmall = 256 - 2*maxBytes + } + if maxBytes == 1 { + maxSmall = 256 + } + + n, _ := r.declReader.ReadByte() + if uint(n) < maxSmall { + v := int64(n) + if signed { + v >>= 1 + if n&1 != 0 { + v = ^v + } + } + x.SetInt64(v) + return + } + + v := -n + if signed { + v = -(n &^ 1) >> 1 + } + if v < 1 || uint(v) > maxBytes { + errorf("weird decoding: %v, %v => %v", n, signed, v) + } + b := make([]byte, v) + io.ReadFull(&r.declReader, b) + x.SetBytes(b) + if signed && n&1 != 0 { + x.Neg(x) + } +} + +func (r *importReader) mpfloat(typ *types.Basic) constant.Value { + var mant big.Int + r.mpint(&mant, typ) + var f big.Float + f.SetInt(&mant) + if f.Sign() != 0 { + f.SetMantExp(&f, int(r.int64())) + } + return constant.Make(&f) +} + +func (r *importReader) ident() string { + return r.string() +} + +func (r *importReader) qualifiedIdent() (*types.Package, string) { + name := r.string() + pkg := r.pkg() + return pkg, name +} + +func (r *importReader) pos() token.Pos { + if r.p.insert != nil { // shallow mode + return r.posv2() + } + if r.p.version >= iexportVersionPosCol { + r.posv1() + } else { + r.posv0() + } + + if r.prevFile == "" && r.prevLine == 0 && r.prevColumn == 0 { + return token.NoPos + } + return r.p.fake.pos(r.prevFile, int(r.prevLine), int(r.prevColumn)) +} + +func (r *importReader) posv0() { + delta := r.int64() + if delta != deltaNewFile { + r.prevLine += delta + } else if l := r.int64(); l == -1 { + r.prevLine += deltaNewFile + } else { + r.prevFile = r.string() + r.prevLine = l + } +} + +func (r *importReader) posv1() { + delta := r.int64() + r.prevColumn += delta >> 1 + if delta&1 != 0 { + delta = r.int64() + r.prevLine += delta >> 1 + if delta&1 != 0 { + r.prevFile = r.string() + } + } +} + +func (r *importReader) posv2() token.Pos { + file := r.uint64() + if file == 0 { + return token.NoPos + } + tf := r.p.fileAt(file - 1) + return tf.Pos(int(r.uint64())) +} + +func (r *importReader) typ() types.Type { + return r.p.typAt(r.uint64(), nil) +} + +func isInterface(t types.Type) bool { + _, ok := t.(*types.Interface) + return ok +} + +func (r *importReader) pkg() *types.Package { return r.p.pkgAt(r.uint64()) } +func (r *importReader) string() string { return r.p.stringAt(r.uint64()) } + +func (r *importReader) doType(base *types.Named) (res types.Type) { + k := r.kind() + if debug { + r.p.trace("importing type %d (base: %s)", k, base) + r.p.indent++ + defer func() { + r.p.indent-- + r.p.trace("=> %s", res) + }() + } + switch k { + default: + errorf("unexpected kind tag in %q: %v", r.p.ipath, k) + return nil + + case definedType: + pkg, name := r.qualifiedIdent() + r.p.doDecl(pkg, name) + return pkg.Scope().Lookup(name).(*types.TypeName).Type() + case pointerType: + return types.NewPointer(r.typ()) + case sliceType: + return types.NewSlice(r.typ()) + case arrayType: + n := r.uint64() + return types.NewArray(r.typ(), int64(n)) + case chanType: + dir := chanDir(int(r.uint64())) + return types.NewChan(dir, r.typ()) + case mapType: + return types.NewMap(r.typ(), r.typ()) + case signatureType: + r.currPkg = r.pkg() + return r.signature(nil, nil, nil) + + case structType: + r.currPkg = r.pkg() + + fields := make([]*types.Var, r.uint64()) + tags := make([]string, len(fields)) + for i := range fields { + fpos := r.pos() + fname := r.ident() + ftyp := r.typ() + emb := r.bool() + tag := r.string() + + fields[i] = types.NewField(fpos, r.currPkg, fname, ftyp, emb) + tags[i] = tag + } + return types.NewStruct(fields, tags) + + case interfaceType: + r.currPkg = r.pkg() + + embeddeds := make([]types.Type, r.uint64()) + for i := range embeddeds { + _ = r.pos() + embeddeds[i] = r.typ() + } + + methods := make([]*types.Func, r.uint64()) + for i := range methods { + mpos := r.pos() + mname := r.ident() + + // TODO(mdempsky): Matches bimport.go, but I + // don't agree with this. + var recv *types.Var + if base != nil { + recv = types.NewVar(token.NoPos, r.currPkg, "", base) + } + + msig := r.signature(recv, nil, nil) + methods[i] = types.NewFunc(mpos, r.currPkg, mname, msig) + } + + typ := newInterface(methods, embeddeds) + r.p.interfaceList = append(r.p.interfaceList, typ) + return typ + + case typeParamType: + if r.p.version < iexportVersionGenerics { + errorf("unexpected type param type") + } + pkg, name := r.qualifiedIdent() + id := ident{pkg, name} + if t, ok := r.p.tparamIndex[id]; ok { + // We're already in the process of importing this typeparam. + return t + } + // Otherwise, import the definition of the typeparam now. + r.p.doDecl(pkg, name) + return r.p.tparamIndex[id] + + case instanceType: + if r.p.version < iexportVersionGenerics { + errorf("unexpected instantiation type") + } + // pos does not matter for instances: they are positioned on the original + // type. + _ = r.pos() + len := r.uint64() + targs := make([]types.Type, len) + for i := range targs { + targs[i] = r.typ() + } + baseType := r.typ() + // The imported instantiated type doesn't include any methods, so + // we must always use the methods of the base (orig) type. + // TODO provide a non-nil *Environment + t, _ := typeparams.Instantiate(nil, baseType, targs, false) + return t + + case unionType: + if r.p.version < iexportVersionGenerics { + errorf("unexpected instantiation type") + } + terms := make([]*typeparams.Term, r.uint64()) + for i := range terms { + terms[i] = typeparams.NewTerm(r.bool(), r.typ()) + } + return typeparams.NewUnion(terms) + } +} + +func (r *importReader) kind() itag { + return itag(r.uint64()) +} + +func (r *importReader) signature(recv *types.Var, rparams []*typeparams.TypeParam, tparams []*typeparams.TypeParam) *types.Signature { + params := r.paramList() + results := r.paramList() + variadic := params.Len() > 0 && r.bool() + return typeparams.NewSignatureType(recv, rparams, tparams, params, results, variadic) +} + +func (r *importReader) tparamList() []*typeparams.TypeParam { + n := r.uint64() + if n == 0 { + return nil + } + xs := make([]*typeparams.TypeParam, n) + for i := range xs { + // Note: the standard library importer is tolerant of nil types here, + // though would panic in SetTypeParams. + xs[i] = r.typ().(*typeparams.TypeParam) + } + return xs +} + +func (r *importReader) paramList() *types.Tuple { + xs := make([]*types.Var, r.uint64()) + for i := range xs { + xs[i] = r.param() + } + return types.NewTuple(xs...) +} + +func (r *importReader) param() *types.Var { + pos := r.pos() + name := r.ident() + typ := r.typ() + return types.NewParam(pos, r.currPkg, name, typ) +} + +func (r *importReader) bool() bool { + return r.uint64() != 0 +} + +func (r *importReader) int64() int64 { + n, err := binary.ReadVarint(&r.declReader) + if err != nil { + errorf("readVarint: %v", err) + } + return n +} + +func (r *importReader) uint64() uint64 { + n, err := binary.ReadUvarint(&r.declReader) + if err != nil { + errorf("readUvarint: %v", err) + } + return n +} + +func (r *importReader) byte() byte { + x, err := r.declReader.ReadByte() + if err != nil { + errorf("declReader.ReadByte: %v", err) + } + return x +} + +func baseType(typ types.Type) *types.Named { + // pointer receivers are never types.Named types + if p, _ := typ.(*types.Pointer); p != nil { + typ = p.Elem() + } + // receiver base types are always (possibly generic) types.Named types + n, _ := typ.(*types.Named) + return n +} diff --git a/cluster-autoscaler/vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go b/cluster-autoscaler/vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go new file mode 100644 index 000000000000..8b163e3d058a --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go @@ -0,0 +1,22 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.11 +// +build !go1.11 + +package gcimporter + +import "go/types" + +func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface { + named := make([]*types.Named, len(embeddeds)) + for i, e := range embeddeds { + var ok bool + named[i], ok = e.(*types.Named) + if !ok { + panic("embedding of non-defined interfaces in interfaces is not supported before Go 1.11") + } + } + return types.NewInterface(methods, named) +} diff --git a/cluster-autoscaler/vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go b/cluster-autoscaler/vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go new file mode 100644 index 000000000000..49984f40fd80 --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go @@ -0,0 +1,14 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.11 +// +build go1.11 + +package gcimporter + +import "go/types" + +func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface { + return types.NewInterfaceType(methods, embeddeds) +} diff --git a/cluster-autoscaler/vendor/golang.org/x/tools/internal/gcimporter/support_go117.go b/cluster-autoscaler/vendor/golang.org/x/tools/internal/gcimporter/support_go117.go new file mode 100644 index 000000000000..d892273efb61 --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/tools/internal/gcimporter/support_go117.go @@ -0,0 +1,16 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.18 +// +build !go1.18 + +package gcimporter + +import "go/types" + +const iexportVersion = iexportVersionGo1_11 + +func additionalPredeclared() []types.Type { + return nil +} diff --git a/cluster-autoscaler/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go b/cluster-autoscaler/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go new file mode 100644 index 000000000000..edbe6ea7041d --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go @@ -0,0 +1,37 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.18 +// +build go1.18 + +package gcimporter + +import "go/types" + +const iexportVersion = iexportVersionGenerics + +// additionalPredeclared returns additional predeclared types in go.1.18. +func additionalPredeclared() []types.Type { + return []types.Type{ + // comparable + types.Universe.Lookup("comparable").Type(), + + // any + types.Universe.Lookup("any").Type(), + } +} + +// See cmd/compile/internal/types.SplitVargenSuffix. +func splitVargenSuffix(name string) (base, suffix string) { + i := len(name) + for i > 0 && name[i-1] >= '0' && name[i-1] <= '9' { + i-- + } + const dot = "·" + if i >= len(dot) && name[i-len(dot):i] == dot { + i -= len(dot) + return name[:i], name[i:] + } + return name, "" +} diff --git a/cluster-autoscaler/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go b/cluster-autoscaler/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go new file mode 100644 index 000000000000..286bf445483d --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go @@ -0,0 +1,10 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !(go1.18 && goexperiment.unified) +// +build !go1.18 !goexperiment.unified + +package gcimporter + +const unifiedIR = false diff --git a/cluster-autoscaler/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go b/cluster-autoscaler/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go new file mode 100644 index 000000000000..b5d69ffbe682 --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go @@ -0,0 +1,10 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.18 && goexperiment.unified +// +build go1.18,goexperiment.unified + +package gcimporter + +const unifiedIR = true diff --git a/cluster-autoscaler/vendor/golang.org/x/tools/internal/gcimporter/ureader_no.go b/cluster-autoscaler/vendor/golang.org/x/tools/internal/gcimporter/ureader_no.go new file mode 100644 index 000000000000..8eb20729c2ad --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/tools/internal/gcimporter/ureader_no.go @@ -0,0 +1,19 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.18 +// +build !go1.18 + +package gcimporter + +import ( + "fmt" + "go/token" + "go/types" +) + +func UImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) { + err = fmt.Errorf("go/tools compiled with a Go version earlier than 1.18 cannot read unified IR export data") + return +} diff --git a/cluster-autoscaler/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go b/cluster-autoscaler/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go new file mode 100644 index 000000000000..34fc783f82b9 --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go @@ -0,0 +1,719 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Derived from go/internal/gcimporter/ureader.go + +//go:build go1.18 +// +build go1.18 + +package gcimporter + +import ( + "go/token" + "go/types" + "sort" + "strings" + + "golang.org/x/tools/internal/pkgbits" +) + +// A pkgReader holds the shared state for reading a unified IR package +// description. +type pkgReader struct { + pkgbits.PkgDecoder + + fake fakeFileSet + + ctxt *types.Context + imports map[string]*types.Package // previously imported packages, indexed by path + + // lazily initialized arrays corresponding to the unified IR + // PosBase, Pkg, and Type sections, respectively. + posBases []string // position bases (i.e., file names) + pkgs []*types.Package + typs []types.Type + + // laterFns holds functions that need to be invoked at the end of + // import reading. + laterFns []func() + // laterFors is used in case of 'type A B' to ensure that B is processed before A. + laterFors map[types.Type]int + + // ifaces holds a list of constructed Interfaces, which need to have + // Complete called after importing is done. + ifaces []*types.Interface +} + +// later adds a function to be invoked at the end of import reading. +func (pr *pkgReader) later(fn func()) { + pr.laterFns = append(pr.laterFns, fn) +} + +// See cmd/compile/internal/noder.derivedInfo. +type derivedInfo struct { + idx pkgbits.Index + needed bool +} + +// See cmd/compile/internal/noder.typeInfo. +type typeInfo struct { + idx pkgbits.Index + derived bool +} + +func UImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) { + s := string(data) + s = s[:strings.LastIndex(s, "\n$$\n")] + input := pkgbits.NewPkgDecoder(path, s) + pkg = readUnifiedPackage(fset, nil, imports, input) + return +} + +// laterFor adds a function to be invoked at the end of import reading, and records the type that function is finishing. +func (pr *pkgReader) laterFor(t types.Type, fn func()) { + if pr.laterFors == nil { + pr.laterFors = make(map[types.Type]int) + } + pr.laterFors[t] = len(pr.laterFns) + pr.laterFns = append(pr.laterFns, fn) +} + +// readUnifiedPackage reads a package description from the given +// unified IR export data decoder. +func readUnifiedPackage(fset *token.FileSet, ctxt *types.Context, imports map[string]*types.Package, input pkgbits.PkgDecoder) *types.Package { + pr := pkgReader{ + PkgDecoder: input, + + fake: fakeFileSet{ + fset: fset, + files: make(map[string]*fileInfo), + }, + + ctxt: ctxt, + imports: imports, + + posBases: make([]string, input.NumElems(pkgbits.RelocPosBase)), + pkgs: make([]*types.Package, input.NumElems(pkgbits.RelocPkg)), + typs: make([]types.Type, input.NumElems(pkgbits.RelocType)), + } + defer pr.fake.setLines() + + r := pr.newReader(pkgbits.RelocMeta, pkgbits.PublicRootIdx, pkgbits.SyncPublic) + pkg := r.pkg() + r.Bool() // has init + + for i, n := 0, r.Len(); i < n; i++ { + // As if r.obj(), but avoiding the Scope.Lookup call, + // to avoid eager loading of imports. + r.Sync(pkgbits.SyncObject) + assert(!r.Bool()) + r.p.objIdx(r.Reloc(pkgbits.RelocObj)) + assert(r.Len() == 0) + } + + r.Sync(pkgbits.SyncEOF) + + for _, fn := range pr.laterFns { + fn() + } + + for _, iface := range pr.ifaces { + iface.Complete() + } + + // Imports() of pkg are all of the transitive packages that were loaded. + var imps []*types.Package + for _, imp := range pr.pkgs { + if imp != nil && imp != pkg { + imps = append(imps, imp) + } + } + sort.Sort(byPath(imps)) + pkg.SetImports(imps) + + pkg.MarkComplete() + return pkg +} + +// A reader holds the state for reading a single unified IR element +// within a package. +type reader struct { + pkgbits.Decoder + + p *pkgReader + + dict *readerDict +} + +// A readerDict holds the state for type parameters that parameterize +// the current unified IR element. +type readerDict struct { + // bounds is a slice of typeInfos corresponding to the underlying + // bounds of the element's type parameters. + bounds []typeInfo + + // tparams is a slice of the constructed TypeParams for the element. + tparams []*types.TypeParam + + // devived is a slice of types derived from tparams, which may be + // instantiated while reading the current element. + derived []derivedInfo + derivedTypes []types.Type // lazily instantiated from derived +} + +func (pr *pkgReader) newReader(k pkgbits.RelocKind, idx pkgbits.Index, marker pkgbits.SyncMarker) *reader { + return &reader{ + Decoder: pr.NewDecoder(k, idx, marker), + p: pr, + } +} + +func (pr *pkgReader) tempReader(k pkgbits.RelocKind, idx pkgbits.Index, marker pkgbits.SyncMarker) *reader { + return &reader{ + Decoder: pr.TempDecoder(k, idx, marker), + p: pr, + } +} + +func (pr *pkgReader) retireReader(r *reader) { + pr.RetireDecoder(&r.Decoder) +} + +// @@@ Positions + +func (r *reader) pos() token.Pos { + r.Sync(pkgbits.SyncPos) + if !r.Bool() { + return token.NoPos + } + + // TODO(mdempsky): Delta encoding. + posBase := r.posBase() + line := r.Uint() + col := r.Uint() + return r.p.fake.pos(posBase, int(line), int(col)) +} + +func (r *reader) posBase() string { + return r.p.posBaseIdx(r.Reloc(pkgbits.RelocPosBase)) +} + +func (pr *pkgReader) posBaseIdx(idx pkgbits.Index) string { + if b := pr.posBases[idx]; b != "" { + return b + } + + var filename string + { + r := pr.tempReader(pkgbits.RelocPosBase, idx, pkgbits.SyncPosBase) + + // Within types2, position bases have a lot more details (e.g., + // keeping track of where //line directives appeared exactly). + // + // For go/types, we just track the file name. + + filename = r.String() + + if r.Bool() { // file base + // Was: "b = token.NewTrimmedFileBase(filename, true)" + } else { // line base + pos := r.pos() + line := r.Uint() + col := r.Uint() + + // Was: "b = token.NewLineBase(pos, filename, true, line, col)" + _, _, _ = pos, line, col + } + pr.retireReader(r) + } + b := filename + pr.posBases[idx] = b + return b +} + +// @@@ Packages + +func (r *reader) pkg() *types.Package { + r.Sync(pkgbits.SyncPkg) + return r.p.pkgIdx(r.Reloc(pkgbits.RelocPkg)) +} + +func (pr *pkgReader) pkgIdx(idx pkgbits.Index) *types.Package { + // TODO(mdempsky): Consider using some non-nil pointer to indicate + // the universe scope, so we don't need to keep re-reading it. + if pkg := pr.pkgs[idx]; pkg != nil { + return pkg + } + + pkg := pr.newReader(pkgbits.RelocPkg, idx, pkgbits.SyncPkgDef).doPkg() + pr.pkgs[idx] = pkg + return pkg +} + +func (r *reader) doPkg() *types.Package { + path := r.String() + switch path { + case "": + path = r.p.PkgPath() + case "builtin": + return nil // universe + case "unsafe": + return types.Unsafe + } + + if pkg := r.p.imports[path]; pkg != nil { + return pkg + } + + name := r.String() + + pkg := types.NewPackage(path, name) + r.p.imports[path] = pkg + + return pkg +} + +// @@@ Types + +func (r *reader) typ() types.Type { + return r.p.typIdx(r.typInfo(), r.dict) +} + +func (r *reader) typInfo() typeInfo { + r.Sync(pkgbits.SyncType) + if r.Bool() { + return typeInfo{idx: pkgbits.Index(r.Len()), derived: true} + } + return typeInfo{idx: r.Reloc(pkgbits.RelocType), derived: false} +} + +func (pr *pkgReader) typIdx(info typeInfo, dict *readerDict) types.Type { + idx := info.idx + var where *types.Type + if info.derived { + where = &dict.derivedTypes[idx] + idx = dict.derived[idx].idx + } else { + where = &pr.typs[idx] + } + + if typ := *where; typ != nil { + return typ + } + + var typ types.Type + { + r := pr.tempReader(pkgbits.RelocType, idx, pkgbits.SyncTypeIdx) + r.dict = dict + + typ = r.doTyp() + assert(typ != nil) + pr.retireReader(r) + } + // See comment in pkgReader.typIdx explaining how this happens. + if prev := *where; prev != nil { + return prev + } + + *where = typ + return typ +} + +func (r *reader) doTyp() (res types.Type) { + switch tag := pkgbits.CodeType(r.Code(pkgbits.SyncType)); tag { + default: + errorf("unhandled type tag: %v", tag) + panic("unreachable") + + case pkgbits.TypeBasic: + return types.Typ[r.Len()] + + case pkgbits.TypeNamed: + obj, targs := r.obj() + name := obj.(*types.TypeName) + if len(targs) != 0 { + t, _ := types.Instantiate(r.p.ctxt, name.Type(), targs, false) + return t + } + return name.Type() + + case pkgbits.TypeTypeParam: + return r.dict.tparams[r.Len()] + + case pkgbits.TypeArray: + len := int64(r.Uint64()) + return types.NewArray(r.typ(), len) + case pkgbits.TypeChan: + dir := types.ChanDir(r.Len()) + return types.NewChan(dir, r.typ()) + case pkgbits.TypeMap: + return types.NewMap(r.typ(), r.typ()) + case pkgbits.TypePointer: + return types.NewPointer(r.typ()) + case pkgbits.TypeSignature: + return r.signature(nil, nil, nil) + case pkgbits.TypeSlice: + return types.NewSlice(r.typ()) + case pkgbits.TypeStruct: + return r.structType() + case pkgbits.TypeInterface: + return r.interfaceType() + case pkgbits.TypeUnion: + return r.unionType() + } +} + +func (r *reader) structType() *types.Struct { + fields := make([]*types.Var, r.Len()) + var tags []string + for i := range fields { + pos := r.pos() + pkg, name := r.selector() + ftyp := r.typ() + tag := r.String() + embedded := r.Bool() + + fields[i] = types.NewField(pos, pkg, name, ftyp, embedded) + if tag != "" { + for len(tags) < i { + tags = append(tags, "") + } + tags = append(tags, tag) + } + } + return types.NewStruct(fields, tags) +} + +func (r *reader) unionType() *types.Union { + terms := make([]*types.Term, r.Len()) + for i := range terms { + terms[i] = types.NewTerm(r.Bool(), r.typ()) + } + return types.NewUnion(terms) +} + +func (r *reader) interfaceType() *types.Interface { + methods := make([]*types.Func, r.Len()) + embeddeds := make([]types.Type, r.Len()) + implicit := len(methods) == 0 && len(embeddeds) == 1 && r.Bool() + + for i := range methods { + pos := r.pos() + pkg, name := r.selector() + mtyp := r.signature(nil, nil, nil) + methods[i] = types.NewFunc(pos, pkg, name, mtyp) + } + + for i := range embeddeds { + embeddeds[i] = r.typ() + } + + iface := types.NewInterfaceType(methods, embeddeds) + if implicit { + iface.MarkImplicit() + } + + // We need to call iface.Complete(), but if there are any embedded + // defined types, then we may not have set their underlying + // interface type yet. So we need to defer calling Complete until + // after we've called SetUnderlying everywhere. + // + // TODO(mdempsky): After CL 424876 lands, it should be safe to call + // iface.Complete() immediately. + r.p.ifaces = append(r.p.ifaces, iface) + + return iface +} + +func (r *reader) signature(recv *types.Var, rtparams, tparams []*types.TypeParam) *types.Signature { + r.Sync(pkgbits.SyncSignature) + + params := r.params() + results := r.params() + variadic := r.Bool() + + return types.NewSignatureType(recv, rtparams, tparams, params, results, variadic) +} + +func (r *reader) params() *types.Tuple { + r.Sync(pkgbits.SyncParams) + + params := make([]*types.Var, r.Len()) + for i := range params { + params[i] = r.param() + } + + return types.NewTuple(params...) +} + +func (r *reader) param() *types.Var { + r.Sync(pkgbits.SyncParam) + + pos := r.pos() + pkg, name := r.localIdent() + typ := r.typ() + + return types.NewParam(pos, pkg, name, typ) +} + +// @@@ Objects + +func (r *reader) obj() (types.Object, []types.Type) { + r.Sync(pkgbits.SyncObject) + + assert(!r.Bool()) + + pkg, name := r.p.objIdx(r.Reloc(pkgbits.RelocObj)) + obj := pkgScope(pkg).Lookup(name) + + targs := make([]types.Type, r.Len()) + for i := range targs { + targs[i] = r.typ() + } + + return obj, targs +} + +func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) { + + var objPkg *types.Package + var objName string + var tag pkgbits.CodeObj + { + rname := pr.tempReader(pkgbits.RelocName, idx, pkgbits.SyncObject1) + + objPkg, objName = rname.qualifiedIdent() + assert(objName != "") + + tag = pkgbits.CodeObj(rname.Code(pkgbits.SyncCodeObj)) + pr.retireReader(rname) + } + + if tag == pkgbits.ObjStub { + assert(objPkg == nil || objPkg == types.Unsafe) + return objPkg, objName + } + + // Ignore local types promoted to global scope (#55110). + if _, suffix := splitVargenSuffix(objName); suffix != "" { + return objPkg, objName + } + + if objPkg.Scope().Lookup(objName) == nil { + dict := pr.objDictIdx(idx) + + r := pr.newReader(pkgbits.RelocObj, idx, pkgbits.SyncObject1) + r.dict = dict + + declare := func(obj types.Object) { + objPkg.Scope().Insert(obj) + } + + switch tag { + default: + panic("weird") + + case pkgbits.ObjAlias: + pos := r.pos() + typ := r.typ() + declare(types.NewTypeName(pos, objPkg, objName, typ)) + + case pkgbits.ObjConst: + pos := r.pos() + typ := r.typ() + val := r.Value() + declare(types.NewConst(pos, objPkg, objName, typ, val)) + + case pkgbits.ObjFunc: + pos := r.pos() + tparams := r.typeParamNames() + sig := r.signature(nil, nil, tparams) + declare(types.NewFunc(pos, objPkg, objName, sig)) + + case pkgbits.ObjType: + pos := r.pos() + + obj := types.NewTypeName(pos, objPkg, objName, nil) + named := types.NewNamed(obj, nil, nil) + declare(obj) + + named.SetTypeParams(r.typeParamNames()) + + setUnderlying := func(underlying types.Type) { + // If the underlying type is an interface, we need to + // duplicate its methods so we can replace the receiver + // parameter's type (#49906). + if iface, ok := underlying.(*types.Interface); ok && iface.NumExplicitMethods() != 0 { + methods := make([]*types.Func, iface.NumExplicitMethods()) + for i := range methods { + fn := iface.ExplicitMethod(i) + sig := fn.Type().(*types.Signature) + + recv := types.NewVar(fn.Pos(), fn.Pkg(), "", named) + methods[i] = types.NewFunc(fn.Pos(), fn.Pkg(), fn.Name(), types.NewSignature(recv, sig.Params(), sig.Results(), sig.Variadic())) + } + + embeds := make([]types.Type, iface.NumEmbeddeds()) + for i := range embeds { + embeds[i] = iface.EmbeddedType(i) + } + + newIface := types.NewInterfaceType(methods, embeds) + r.p.ifaces = append(r.p.ifaces, newIface) + underlying = newIface + } + + named.SetUnderlying(underlying) + } + + // Since go.dev/cl/455279, we can assume rhs.Underlying() will + // always be non-nil. However, to temporarily support users of + // older snapshot releases, we continue to fallback to the old + // behavior for now. + // + // TODO(mdempsky): Remove fallback code and simplify after + // allowing time for snapshot users to upgrade. + rhs := r.typ() + if underlying := rhs.Underlying(); underlying != nil { + setUnderlying(underlying) + } else { + pk := r.p + pk.laterFor(named, func() { + // First be sure that the rhs is initialized, if it needs to be initialized. + delete(pk.laterFors, named) // prevent cycles + if i, ok := pk.laterFors[rhs]; ok { + f := pk.laterFns[i] + pk.laterFns[i] = func() {} // function is running now, so replace it with a no-op + f() // initialize RHS + } + setUnderlying(rhs.Underlying()) + }) + } + + for i, n := 0, r.Len(); i < n; i++ { + named.AddMethod(r.method()) + } + + case pkgbits.ObjVar: + pos := r.pos() + typ := r.typ() + declare(types.NewVar(pos, objPkg, objName, typ)) + } + } + + return objPkg, objName +} + +func (pr *pkgReader) objDictIdx(idx pkgbits.Index) *readerDict { + + var dict readerDict + + { + r := pr.tempReader(pkgbits.RelocObjDict, idx, pkgbits.SyncObject1) + if implicits := r.Len(); implicits != 0 { + errorf("unexpected object with %v implicit type parameter(s)", implicits) + } + + dict.bounds = make([]typeInfo, r.Len()) + for i := range dict.bounds { + dict.bounds[i] = r.typInfo() + } + + dict.derived = make([]derivedInfo, r.Len()) + dict.derivedTypes = make([]types.Type, len(dict.derived)) + for i := range dict.derived { + dict.derived[i] = derivedInfo{r.Reloc(pkgbits.RelocType), r.Bool()} + } + + pr.retireReader(r) + } + // function references follow, but reader doesn't need those + + return &dict +} + +func (r *reader) typeParamNames() []*types.TypeParam { + r.Sync(pkgbits.SyncTypeParamNames) + + // Note: This code assumes it only processes objects without + // implement type parameters. This is currently fine, because + // reader is only used to read in exported declarations, which are + // always package scoped. + + if len(r.dict.bounds) == 0 { + return nil + } + + // Careful: Type parameter lists may have cycles. To allow for this, + // we construct the type parameter list in two passes: first we + // create all the TypeNames and TypeParams, then we construct and + // set the bound type. + + r.dict.tparams = make([]*types.TypeParam, len(r.dict.bounds)) + for i := range r.dict.bounds { + pos := r.pos() + pkg, name := r.localIdent() + + tname := types.NewTypeName(pos, pkg, name, nil) + r.dict.tparams[i] = types.NewTypeParam(tname, nil) + } + + typs := make([]types.Type, len(r.dict.bounds)) + for i, bound := range r.dict.bounds { + typs[i] = r.p.typIdx(bound, r.dict) + } + + // TODO(mdempsky): This is subtle, elaborate further. + // + // We have to save tparams outside of the closure, because + // typeParamNames() can be called multiple times with the same + // dictionary instance. + // + // Also, this needs to happen later to make sure SetUnderlying has + // been called. + // + // TODO(mdempsky): Is it safe to have a single "later" slice or do + // we need to have multiple passes? See comments on CL 386002 and + // go.dev/issue/52104. + tparams := r.dict.tparams + r.p.later(func() { + for i, typ := range typs { + tparams[i].SetConstraint(typ) + } + }) + + return r.dict.tparams +} + +func (r *reader) method() *types.Func { + r.Sync(pkgbits.SyncMethod) + pos := r.pos() + pkg, name := r.selector() + + rparams := r.typeParamNames() + sig := r.signature(r.param(), rparams, nil) + + _ = r.pos() // TODO(mdempsky): Remove; this is a hacker for linker.go. + return types.NewFunc(pos, pkg, name, sig) +} + +func (r *reader) qualifiedIdent() (*types.Package, string) { return r.ident(pkgbits.SyncSym) } +func (r *reader) localIdent() (*types.Package, string) { return r.ident(pkgbits.SyncLocalIdent) } +func (r *reader) selector() (*types.Package, string) { return r.ident(pkgbits.SyncSelector) } + +func (r *reader) ident(marker pkgbits.SyncMarker) (*types.Package, string) { + r.Sync(marker) + return r.pkg(), r.String() +} + +// pkgScope returns pkg.Scope(). +// If pkg is nil, it returns types.Universe instead. +// +// TODO(mdempsky): Remove after x/tools can depend on Go 1.19. +func pkgScope(pkg *types.Package) *types.Scope { + if pkg != nil { + return pkg.Scope() + } + return types.Universe +} diff --git a/cluster-autoscaler/vendor/golang.org/x/tools/internal/gocommand/invoke.go b/cluster-autoscaler/vendor/golang.org/x/tools/internal/gocommand/invoke.go new file mode 100644 index 000000000000..d50551693f3d --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/tools/internal/gocommand/invoke.go @@ -0,0 +1,356 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package gocommand is a helper for calling the go command. +package gocommand + +import ( + "bytes" + "context" + "fmt" + "io" + "log" + "os" + "regexp" + "runtime" + "strconv" + "strings" + "sync" + "time" + + exec "golang.org/x/sys/execabs" + + "golang.org/x/tools/internal/event" +) + +// An Runner will run go command invocations and serialize +// them if it sees a concurrency error. +type Runner struct { + // once guards the runner initialization. + once sync.Once + + // inFlight tracks available workers. + inFlight chan struct{} + + // serialized guards the ability to run a go command serially, + // to avoid deadlocks when claiming workers. + serialized chan struct{} +} + +const maxInFlight = 10 + +func (runner *Runner) initialize() { + runner.once.Do(func() { + runner.inFlight = make(chan struct{}, maxInFlight) + runner.serialized = make(chan struct{}, 1) + }) +} + +// 1.13: go: updates to go.mod needed, but contents have changed +// 1.14: go: updating go.mod: existing contents have changed since last read +var modConcurrencyError = regexp.MustCompile(`go:.*go.mod.*contents have changed`) + +// Run is a convenience wrapper around RunRaw. +// It returns only stdout and a "friendly" error. +func (runner *Runner) Run(ctx context.Context, inv Invocation) (*bytes.Buffer, error) { + stdout, _, friendly, _ := runner.RunRaw(ctx, inv) + return stdout, friendly +} + +// RunPiped runs the invocation serially, always waiting for any concurrent +// invocations to complete first. +func (runner *Runner) RunPiped(ctx context.Context, inv Invocation, stdout, stderr io.Writer) error { + _, err := runner.runPiped(ctx, inv, stdout, stderr) + return err +} + +// RunRaw runs the invocation, serializing requests only if they fight over +// go.mod changes. +func (runner *Runner) RunRaw(ctx context.Context, inv Invocation) (*bytes.Buffer, *bytes.Buffer, error, error) { + // Make sure the runner is always initialized. + runner.initialize() + + // First, try to run the go command concurrently. + stdout, stderr, friendlyErr, err := runner.runConcurrent(ctx, inv) + + // If we encounter a load concurrency error, we need to retry serially. + if friendlyErr == nil || !modConcurrencyError.MatchString(friendlyErr.Error()) { + return stdout, stderr, friendlyErr, err + } + event.Error(ctx, "Load concurrency error, will retry serially", err) + + // Run serially by calling runPiped. + stdout.Reset() + stderr.Reset() + friendlyErr, err = runner.runPiped(ctx, inv, stdout, stderr) + return stdout, stderr, friendlyErr, err +} + +func (runner *Runner) runConcurrent(ctx context.Context, inv Invocation) (*bytes.Buffer, *bytes.Buffer, error, error) { + // Wait for 1 worker to become available. + select { + case <-ctx.Done(): + return nil, nil, nil, ctx.Err() + case runner.inFlight <- struct{}{}: + defer func() { <-runner.inFlight }() + } + + stdout, stderr := &bytes.Buffer{}, &bytes.Buffer{} + friendlyErr, err := inv.runWithFriendlyError(ctx, stdout, stderr) + return stdout, stderr, friendlyErr, err +} + +func (runner *Runner) runPiped(ctx context.Context, inv Invocation, stdout, stderr io.Writer) (error, error) { + // Make sure the runner is always initialized. + runner.initialize() + + // Acquire the serialization lock. This avoids deadlocks between two + // runPiped commands. + select { + case <-ctx.Done(): + return nil, ctx.Err() + case runner.serialized <- struct{}{}: + defer func() { <-runner.serialized }() + } + + // Wait for all in-progress go commands to return before proceeding, + // to avoid load concurrency errors. + for i := 0; i < maxInFlight; i++ { + select { + case <-ctx.Done(): + return nil, ctx.Err() + case runner.inFlight <- struct{}{}: + // Make sure we always "return" any workers we took. + defer func() { <-runner.inFlight }() + } + } + + return inv.runWithFriendlyError(ctx, stdout, stderr) +} + +// An Invocation represents a call to the go command. +type Invocation struct { + Verb string + Args []string + BuildFlags []string + + // If ModFlag is set, the go command is invoked with -mod=ModFlag. + ModFlag string + + // If ModFile is set, the go command is invoked with -modfile=ModFile. + ModFile string + + // If Overlay is set, the go command is invoked with -overlay=Overlay. + Overlay string + + // If CleanEnv is set, the invocation will run only with the environment + // in Env, not starting with os.Environ. + CleanEnv bool + Env []string + WorkingDir string + Logf func(format string, args ...interface{}) +} + +func (i *Invocation) runWithFriendlyError(ctx context.Context, stdout, stderr io.Writer) (friendlyError error, rawError error) { + rawError = i.run(ctx, stdout, stderr) + if rawError != nil { + friendlyError = rawError + // Check for 'go' executable not being found. + if ee, ok := rawError.(*exec.Error); ok && ee.Err == exec.ErrNotFound { + friendlyError = fmt.Errorf("go command required, not found: %v", ee) + } + if ctx.Err() != nil { + friendlyError = ctx.Err() + } + friendlyError = fmt.Errorf("err: %v: stderr: %s", friendlyError, stderr) + } + return +} + +func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error { + log := i.Logf + if log == nil { + log = func(string, ...interface{}) {} + } + + goArgs := []string{i.Verb} + + appendModFile := func() { + if i.ModFile != "" { + goArgs = append(goArgs, "-modfile="+i.ModFile) + } + } + appendModFlag := func() { + if i.ModFlag != "" { + goArgs = append(goArgs, "-mod="+i.ModFlag) + } + } + appendOverlayFlag := func() { + if i.Overlay != "" { + goArgs = append(goArgs, "-overlay="+i.Overlay) + } + } + + switch i.Verb { + case "env", "version": + goArgs = append(goArgs, i.Args...) + case "mod": + // mod needs the sub-verb before flags. + goArgs = append(goArgs, i.Args[0]) + appendModFile() + goArgs = append(goArgs, i.Args[1:]...) + case "get": + goArgs = append(goArgs, i.BuildFlags...) + appendModFile() + goArgs = append(goArgs, i.Args...) + + default: // notably list and build. + goArgs = append(goArgs, i.BuildFlags...) + appendModFile() + appendModFlag() + appendOverlayFlag() + goArgs = append(goArgs, i.Args...) + } + cmd := exec.Command("go", goArgs...) + cmd.Stdout = stdout + cmd.Stderr = stderr + // On darwin the cwd gets resolved to the real path, which breaks anything that + // expects the working directory to keep the original path, including the + // go command when dealing with modules. + // The Go stdlib has a special feature where if the cwd and the PWD are the + // same node then it trusts the PWD, so by setting it in the env for the child + // process we fix up all the paths returned by the go command. + if !i.CleanEnv { + cmd.Env = os.Environ() + } + cmd.Env = append(cmd.Env, i.Env...) + if i.WorkingDir != "" { + cmd.Env = append(cmd.Env, "PWD="+i.WorkingDir) + cmd.Dir = i.WorkingDir + } + defer func(start time.Time) { log("%s for %v", time.Since(start), cmdDebugStr(cmd)) }(time.Now()) + + return runCmdContext(ctx, cmd) +} + +// DebugHangingGoCommands may be set by tests to enable additional +// instrumentation (including panics) for debugging hanging Go commands. +// +// See golang/go#54461 for details. +var DebugHangingGoCommands = false + +// runCmdContext is like exec.CommandContext except it sends os.Interrupt +// before os.Kill. +func runCmdContext(ctx context.Context, cmd *exec.Cmd) error { + if err := cmd.Start(); err != nil { + return err + } + resChan := make(chan error, 1) + go func() { + resChan <- cmd.Wait() + }() + + // If we're interested in debugging hanging Go commands, stop waiting after a + // minute and panic with interesting information. + if DebugHangingGoCommands { + select { + case err := <-resChan: + return err + case <-time.After(1 * time.Minute): + HandleHangingGoCommand(cmd.Process) + case <-ctx.Done(): + } + } else { + select { + case err := <-resChan: + return err + case <-ctx.Done(): + } + } + + // Cancelled. Interrupt and see if it ends voluntarily. + cmd.Process.Signal(os.Interrupt) + select { + case err := <-resChan: + return err + case <-time.After(time.Second): + } + + // Didn't shut down in response to interrupt. Kill it hard. + // TODO(rfindley): per advice from bcmills@, it may be better to send SIGQUIT + // on certain platforms, such as unix. + if err := cmd.Process.Kill(); err != nil && DebugHangingGoCommands { + // Don't panic here as this reliably fails on windows with EINVAL. + log.Printf("error killing the Go command: %v", err) + } + + // See above: don't wait indefinitely if we're debugging hanging Go commands. + if DebugHangingGoCommands { + select { + case err := <-resChan: + return err + case <-time.After(10 * time.Second): // a shorter wait as resChan should return quickly following Kill + HandleHangingGoCommand(cmd.Process) + } + } + return <-resChan +} + +func HandleHangingGoCommand(proc *os.Process) { + switch runtime.GOOS { + case "linux", "darwin", "freebsd", "netbsd": + fmt.Fprintln(os.Stderr, `DETECTED A HANGING GO COMMAND + +The gopls test runner has detected a hanging go command. In order to debug +this, the output of ps and lsof/fstat is printed below. + +See golang/go#54461 for more details.`) + + fmt.Fprintln(os.Stderr, "\nps axo ppid,pid,command:") + fmt.Fprintln(os.Stderr, "-------------------------") + psCmd := exec.Command("ps", "axo", "ppid,pid,command") + psCmd.Stdout = os.Stderr + psCmd.Stderr = os.Stderr + if err := psCmd.Run(); err != nil { + panic(fmt.Sprintf("running ps: %v", err)) + } + + listFiles := "lsof" + if runtime.GOOS == "freebsd" || runtime.GOOS == "netbsd" { + listFiles = "fstat" + } + + fmt.Fprintln(os.Stderr, "\n"+listFiles+":") + fmt.Fprintln(os.Stderr, "-----") + listFilesCmd := exec.Command(listFiles) + listFilesCmd.Stdout = os.Stderr + listFilesCmd.Stderr = os.Stderr + if err := listFilesCmd.Run(); err != nil { + panic(fmt.Sprintf("running %s: %v", listFiles, err)) + } + } + panic(fmt.Sprintf("detected hanging go command (pid %d): see golang/go#54461 for more details", proc.Pid)) +} + +func cmdDebugStr(cmd *exec.Cmd) string { + env := make(map[string]string) + for _, kv := range cmd.Env { + split := strings.SplitN(kv, "=", 2) + if len(split) == 2 { + k, v := split[0], split[1] + env[k] = v + } + } + + var args []string + for _, arg := range cmd.Args { + quoted := strconv.Quote(arg) + if quoted[1:len(quoted)-1] != arg || strings.Contains(arg, " ") { + args = append(args, quoted) + } else { + args = append(args, arg) + } + } + return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v GOPROXY=%v PWD=%v %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["GOPROXY"], env["PWD"], strings.Join(args, " ")) +} diff --git a/cluster-autoscaler/vendor/golang.org/x/tools/internal/gocommand/vendor.go b/cluster-autoscaler/vendor/golang.org/x/tools/internal/gocommand/vendor.go new file mode 100644 index 000000000000..2d3d408c0bed --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/tools/internal/gocommand/vendor.go @@ -0,0 +1,109 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gocommand + +import ( + "bytes" + "context" + "fmt" + "os" + "path/filepath" + "regexp" + "strings" + "time" + + "golang.org/x/mod/semver" +) + +// ModuleJSON holds information about a module. +type ModuleJSON struct { + Path string // module path + Version string // module version + Versions []string // available module versions (with -versions) + Replace *ModuleJSON // replaced by this module + Time *time.Time // time version was created + Update *ModuleJSON // available update, if any (with -u) + Main bool // is this the main module? + Indirect bool // is this module only an indirect dependency of main module? + Dir string // directory holding files for this module, if any + GoMod string // path to go.mod file used when loading this module, if any + GoVersion string // go version used in module +} + +var modFlagRegexp = regexp.MustCompile(`-mod[ =](\w+)`) + +// VendorEnabled reports whether vendoring is enabled. It takes a *Runner to execute Go commands +// with the supplied context.Context and Invocation. The Invocation can contain pre-defined fields, +// of which only Verb and Args are modified to run the appropriate Go command. +// Inspired by setDefaultBuildMod in modload/init.go +func VendorEnabled(ctx context.Context, inv Invocation, r *Runner) (bool, *ModuleJSON, error) { + mainMod, go114, err := getMainModuleAnd114(ctx, inv, r) + if err != nil { + return false, nil, err + } + + // We check the GOFLAGS to see if there is anything overridden or not. + inv.Verb = "env" + inv.Args = []string{"GOFLAGS"} + stdout, err := r.Run(ctx, inv) + if err != nil { + return false, nil, err + } + goflags := string(bytes.TrimSpace(stdout.Bytes())) + matches := modFlagRegexp.FindStringSubmatch(goflags) + var modFlag string + if len(matches) != 0 { + modFlag = matches[1] + } + // Don't override an explicit '-mod=' argument. + if modFlag == "vendor" { + return true, mainMod, nil + } else if modFlag != "" { + return false, nil, nil + } + if mainMod == nil || !go114 { + return false, nil, nil + } + // Check 1.14's automatic vendor mode. + if fi, err := os.Stat(filepath.Join(mainMod.Dir, "vendor")); err == nil && fi.IsDir() { + if mainMod.GoVersion != "" && semver.Compare("v"+mainMod.GoVersion, "v1.14") >= 0 { + // The Go version is at least 1.14, and a vendor directory exists. + // Set -mod=vendor by default. + return true, mainMod, nil + } + } + return false, nil, nil +} + +// getMainModuleAnd114 gets one of the main modules' information and whether the +// go command in use is 1.14+. This is the information needed to figure out +// if vendoring should be enabled. +func getMainModuleAnd114(ctx context.Context, inv Invocation, r *Runner) (*ModuleJSON, bool, error) { + const format = `{{.Path}} +{{.Dir}} +{{.GoMod}} +{{.GoVersion}} +{{range context.ReleaseTags}}{{if eq . "go1.14"}}{{.}}{{end}}{{end}} +` + inv.Verb = "list" + inv.Args = []string{"-m", "-f", format} + stdout, err := r.Run(ctx, inv) + if err != nil { + return nil, false, err + } + + lines := strings.Split(stdout.String(), "\n") + if len(lines) < 5 { + return nil, false, fmt.Errorf("unexpected stdout: %q", stdout.String()) + } + mod := &ModuleJSON{ + Path: lines[0], + Dir: lines[1], + GoMod: lines[2], + GoVersion: lines[3], + Main: true, + } + return mod, lines[4] == "go1.14", nil +} diff --git a/cluster-autoscaler/vendor/golang.org/x/tools/internal/gocommand/version.go b/cluster-autoscaler/vendor/golang.org/x/tools/internal/gocommand/version.go new file mode 100644 index 000000000000..307a76d474ad --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/tools/internal/gocommand/version.go @@ -0,0 +1,81 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gocommand + +import ( + "context" + "fmt" + "regexp" + "strings" +) + +// GoVersion reports the minor version number of the highest release +// tag built into the go command on the PATH. +// +// Note that this may be higher than the version of the go tool used +// to build this application, and thus the versions of the standard +// go/{scanner,parser,ast,types} packages that are linked into it. +// In that case, callers should either downgrade to the version of +// go used to build the application, or report an error that the +// application is too old to use the go command on the PATH. +func GoVersion(ctx context.Context, inv Invocation, r *Runner) (int, error) { + inv.Verb = "list" + inv.Args = []string{"-e", "-f", `{{context.ReleaseTags}}`, `--`, `unsafe`} + inv.Env = append(append([]string{}, inv.Env...), "GO111MODULE=off") + // Unset any unneeded flags, and remove them from BuildFlags, if they're + // present. + inv.ModFile = "" + inv.ModFlag = "" + var buildFlags []string + for _, flag := range inv.BuildFlags { + // Flags can be prefixed by one or two dashes. + f := strings.TrimPrefix(strings.TrimPrefix(flag, "-"), "-") + if strings.HasPrefix(f, "mod=") || strings.HasPrefix(f, "modfile=") { + continue + } + buildFlags = append(buildFlags, flag) + } + inv.BuildFlags = buildFlags + stdoutBytes, err := r.Run(ctx, inv) + if err != nil { + return 0, err + } + stdout := stdoutBytes.String() + if len(stdout) < 3 { + return 0, fmt.Errorf("bad ReleaseTags output: %q", stdout) + } + // Split up "[go1.1 go1.15]" and return highest go1.X value. + tags := strings.Fields(stdout[1 : len(stdout)-2]) + for i := len(tags) - 1; i >= 0; i-- { + var version int + if _, err := fmt.Sscanf(tags[i], "go1.%d", &version); err != nil { + continue + } + return version, nil + } + return 0, fmt.Errorf("no parseable ReleaseTags in %v", tags) +} + +// GoVersionOutput returns the complete output of the go version command. +func GoVersionOutput(ctx context.Context, inv Invocation, r *Runner) (string, error) { + inv.Verb = "version" + goVersion, err := r.Run(ctx, inv) + if err != nil { + return "", err + } + return goVersion.String(), nil +} + +// ParseGoVersionOutput extracts the Go version string +// from the output of the "go version" command. +// Given an unrecognized form, it returns an empty string. +func ParseGoVersionOutput(data string) string { + re := regexp.MustCompile(`^go version (go\S+|devel \S+)`) + m := re.FindStringSubmatch(data) + if len(m) != 2 { + return "" // unrecognized version + } + return m[1] +} diff --git a/cluster-autoscaler/vendor/golang.org/x/tools/internal/packagesinternal/packages.go b/cluster-autoscaler/vendor/golang.org/x/tools/internal/packagesinternal/packages.go new file mode 100644 index 000000000000..d9950b1f0bef --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/tools/internal/packagesinternal/packages.go @@ -0,0 +1,30 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package packagesinternal exposes internal-only fields from go/packages. +package packagesinternal + +import ( + "golang.org/x/tools/internal/gocommand" +) + +var GetForTest = func(p interface{}) string { return "" } +var GetDepsErrors = func(p interface{}) []*PackageError { return nil } + +type PackageError struct { + ImportStack []string // shortest path from package named on command line to this one + Pos string // position of error (if present, file:line:col) + Err string // the error itself +} + +var GetGoCmdRunner = func(config interface{}) *gocommand.Runner { return nil } + +var SetGoCmdRunner = func(config interface{}, runner *gocommand.Runner) {} + +var TypecheckCgo int +var DepsErrors int // must be set as a LoadMode to call GetDepsErrors +var ForTest int // must be set as a LoadMode to call GetForTest + +var SetModFlag = func(config interface{}, value string) {} +var SetModFile = func(config interface{}, value string) {} diff --git a/cluster-autoscaler/vendor/golang.org/x/tools/internal/pkgbits/codes.go b/cluster-autoscaler/vendor/golang.org/x/tools/internal/pkgbits/codes.go new file mode 100644 index 000000000000..f0cabde96eba --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/tools/internal/pkgbits/codes.go @@ -0,0 +1,77 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +// A Code is an enum value that can be encoded into bitstreams. +// +// Code types are preferable for enum types, because they allow +// Decoder to detect desyncs. +type Code interface { + // Marker returns the SyncMarker for the Code's dynamic type. + Marker() SyncMarker + + // Value returns the Code's ordinal value. + Value() int +} + +// A CodeVal distinguishes among go/constant.Value encodings. +type CodeVal int + +func (c CodeVal) Marker() SyncMarker { return SyncVal } +func (c CodeVal) Value() int { return int(c) } + +// Note: These values are public and cannot be changed without +// updating the go/types importers. + +const ( + ValBool CodeVal = iota + ValString + ValInt64 + ValBigInt + ValBigRat + ValBigFloat +) + +// A CodeType distinguishes among go/types.Type encodings. +type CodeType int + +func (c CodeType) Marker() SyncMarker { return SyncType } +func (c CodeType) Value() int { return int(c) } + +// Note: These values are public and cannot be changed without +// updating the go/types importers. + +const ( + TypeBasic CodeType = iota + TypeNamed + TypePointer + TypeSlice + TypeArray + TypeChan + TypeMap + TypeSignature + TypeStruct + TypeInterface + TypeUnion + TypeTypeParam +) + +// A CodeObj distinguishes among go/types.Object encodings. +type CodeObj int + +func (c CodeObj) Marker() SyncMarker { return SyncCodeObj } +func (c CodeObj) Value() int { return int(c) } + +// Note: These values are public and cannot be changed without +// updating the go/types importers. + +const ( + ObjAlias CodeObj = iota + ObjConst + ObjType + ObjFunc + ObjVar + ObjStub +) diff --git a/cluster-autoscaler/vendor/golang.org/x/tools/internal/pkgbits/decoder.go b/cluster-autoscaler/vendor/golang.org/x/tools/internal/pkgbits/decoder.go new file mode 100644 index 000000000000..b92e8e6eb329 --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/tools/internal/pkgbits/decoder.go @@ -0,0 +1,517 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +import ( + "encoding/binary" + "errors" + "fmt" + "go/constant" + "go/token" + "io" + "math/big" + "os" + "runtime" + "strings" +) + +// A PkgDecoder provides methods for decoding a package's Unified IR +// export data. +type PkgDecoder struct { + // version is the file format version. + version uint32 + + // sync indicates whether the file uses sync markers. + sync bool + + // pkgPath is the package path for the package to be decoded. + // + // TODO(mdempsky): Remove; unneeded since CL 391014. + pkgPath string + + // elemData is the full data payload of the encoded package. + // Elements are densely and contiguously packed together. + // + // The last 8 bytes of elemData are the package fingerprint. + elemData string + + // elemEnds stores the byte-offset end positions of element + // bitstreams within elemData. + // + // For example, element I's bitstream data starts at elemEnds[I-1] + // (or 0, if I==0) and ends at elemEnds[I]. + // + // Note: elemEnds is indexed by absolute indices, not + // section-relative indices. + elemEnds []uint32 + + // elemEndsEnds stores the index-offset end positions of relocation + // sections within elemEnds. + // + // For example, section K's end positions start at elemEndsEnds[K-1] + // (or 0, if K==0) and end at elemEndsEnds[K]. + elemEndsEnds [numRelocs]uint32 + + scratchRelocEnt []RelocEnt +} + +// PkgPath returns the package path for the package +// +// TODO(mdempsky): Remove; unneeded since CL 391014. +func (pr *PkgDecoder) PkgPath() string { return pr.pkgPath } + +// SyncMarkers reports whether pr uses sync markers. +func (pr *PkgDecoder) SyncMarkers() bool { return pr.sync } + +// NewPkgDecoder returns a PkgDecoder initialized to read the Unified +// IR export data from input. pkgPath is the package path for the +// compilation unit that produced the export data. +// +// TODO(mdempsky): Remove pkgPath parameter; unneeded since CL 391014. +func NewPkgDecoder(pkgPath, input string) PkgDecoder { + pr := PkgDecoder{ + pkgPath: pkgPath, + } + + // TODO(mdempsky): Implement direct indexing of input string to + // avoid copying the position information. + + r := strings.NewReader(input) + + assert(binary.Read(r, binary.LittleEndian, &pr.version) == nil) + + switch pr.version { + default: + panic(fmt.Errorf("unsupported version: %v", pr.version)) + case 0: + // no flags + case 1: + var flags uint32 + assert(binary.Read(r, binary.LittleEndian, &flags) == nil) + pr.sync = flags&flagSyncMarkers != 0 + } + + assert(binary.Read(r, binary.LittleEndian, pr.elemEndsEnds[:]) == nil) + + pr.elemEnds = make([]uint32, pr.elemEndsEnds[len(pr.elemEndsEnds)-1]) + assert(binary.Read(r, binary.LittleEndian, pr.elemEnds[:]) == nil) + + pos, err := r.Seek(0, io.SeekCurrent) + assert(err == nil) + + pr.elemData = input[pos:] + assert(len(pr.elemData)-8 == int(pr.elemEnds[len(pr.elemEnds)-1])) + + return pr +} + +// NumElems returns the number of elements in section k. +func (pr *PkgDecoder) NumElems(k RelocKind) int { + count := int(pr.elemEndsEnds[k]) + if k > 0 { + count -= int(pr.elemEndsEnds[k-1]) + } + return count +} + +// TotalElems returns the total number of elements across all sections. +func (pr *PkgDecoder) TotalElems() int { + return len(pr.elemEnds) +} + +// Fingerprint returns the package fingerprint. +func (pr *PkgDecoder) Fingerprint() [8]byte { + var fp [8]byte + copy(fp[:], pr.elemData[len(pr.elemData)-8:]) + return fp +} + +// AbsIdx returns the absolute index for the given (section, index) +// pair. +func (pr *PkgDecoder) AbsIdx(k RelocKind, idx Index) int { + absIdx := int(idx) + if k > 0 { + absIdx += int(pr.elemEndsEnds[k-1]) + } + if absIdx >= int(pr.elemEndsEnds[k]) { + errorf("%v:%v is out of bounds; %v", k, idx, pr.elemEndsEnds) + } + return absIdx +} + +// DataIdx returns the raw element bitstream for the given (section, +// index) pair. +func (pr *PkgDecoder) DataIdx(k RelocKind, idx Index) string { + absIdx := pr.AbsIdx(k, idx) + + var start uint32 + if absIdx > 0 { + start = pr.elemEnds[absIdx-1] + } + end := pr.elemEnds[absIdx] + + return pr.elemData[start:end] +} + +// StringIdx returns the string value for the given string index. +func (pr *PkgDecoder) StringIdx(idx Index) string { + return pr.DataIdx(RelocString, idx) +} + +// NewDecoder returns a Decoder for the given (section, index) pair, +// and decodes the given SyncMarker from the element bitstream. +func (pr *PkgDecoder) NewDecoder(k RelocKind, idx Index, marker SyncMarker) Decoder { + r := pr.NewDecoderRaw(k, idx) + r.Sync(marker) + return r +} + +// TempDecoder returns a Decoder for the given (section, index) pair, +// and decodes the given SyncMarker from the element bitstream. +// If possible the Decoder should be RetireDecoder'd when it is no longer +// needed, this will avoid heap allocations. +func (pr *PkgDecoder) TempDecoder(k RelocKind, idx Index, marker SyncMarker) Decoder { + r := pr.TempDecoderRaw(k, idx) + r.Sync(marker) + return r +} + +func (pr *PkgDecoder) RetireDecoder(d *Decoder) { + pr.scratchRelocEnt = d.Relocs + d.Relocs = nil +} + +// NewDecoderRaw returns a Decoder for the given (section, index) pair. +// +// Most callers should use NewDecoder instead. +func (pr *PkgDecoder) NewDecoderRaw(k RelocKind, idx Index) Decoder { + r := Decoder{ + common: pr, + k: k, + Idx: idx, + } + + // TODO(mdempsky) r.data.Reset(...) after #44505 is resolved. + r.Data = *strings.NewReader(pr.DataIdx(k, idx)) + + r.Sync(SyncRelocs) + r.Relocs = make([]RelocEnt, r.Len()) + for i := range r.Relocs { + r.Sync(SyncReloc) + r.Relocs[i] = RelocEnt{RelocKind(r.Len()), Index(r.Len())} + } + + return r +} + +func (pr *PkgDecoder) TempDecoderRaw(k RelocKind, idx Index) Decoder { + r := Decoder{ + common: pr, + k: k, + Idx: idx, + } + + r.Data.Reset(pr.DataIdx(k, idx)) + r.Sync(SyncRelocs) + l := r.Len() + if cap(pr.scratchRelocEnt) >= l { + r.Relocs = pr.scratchRelocEnt[:l] + pr.scratchRelocEnt = nil + } else { + r.Relocs = make([]RelocEnt, l) + } + for i := range r.Relocs { + r.Sync(SyncReloc) + r.Relocs[i] = RelocEnt{RelocKind(r.Len()), Index(r.Len())} + } + + return r +} + +// A Decoder provides methods for decoding an individual element's +// bitstream data. +type Decoder struct { + common *PkgDecoder + + Relocs []RelocEnt + Data strings.Reader + + k RelocKind + Idx Index +} + +func (r *Decoder) checkErr(err error) { + if err != nil { + errorf("unexpected decoding error: %w", err) + } +} + +func (r *Decoder) rawUvarint() uint64 { + x, err := readUvarint(&r.Data) + r.checkErr(err) + return x +} + +// readUvarint is a type-specialized copy of encoding/binary.ReadUvarint. +// This avoids the interface conversion and thus has better escape properties, +// which flows up the stack. +func readUvarint(r *strings.Reader) (uint64, error) { + var x uint64 + var s uint + for i := 0; i < binary.MaxVarintLen64; i++ { + b, err := r.ReadByte() + if err != nil { + if i > 0 && err == io.EOF { + err = io.ErrUnexpectedEOF + } + return x, err + } + if b < 0x80 { + if i == binary.MaxVarintLen64-1 && b > 1 { + return x, overflow + } + return x | uint64(b)<> 1) + if ux&1 != 0 { + x = ^x + } + return x +} + +func (r *Decoder) rawReloc(k RelocKind, idx int) Index { + e := r.Relocs[idx] + assert(e.Kind == k) + return e.Idx +} + +// Sync decodes a sync marker from the element bitstream and asserts +// that it matches the expected marker. +// +// If r.common.sync is false, then Sync is a no-op. +func (r *Decoder) Sync(mWant SyncMarker) { + if !r.common.sync { + return + } + + pos, _ := r.Data.Seek(0, io.SeekCurrent) + mHave := SyncMarker(r.rawUvarint()) + writerPCs := make([]int, r.rawUvarint()) + for i := range writerPCs { + writerPCs[i] = int(r.rawUvarint()) + } + + if mHave == mWant { + return + } + + // There's some tension here between printing: + // + // (1) full file paths that tools can recognize (e.g., so emacs + // hyperlinks the "file:line" text for easy navigation), or + // + // (2) short file paths that are easier for humans to read (e.g., by + // omitting redundant or irrelevant details, so it's easier to + // focus on the useful bits that remain). + // + // The current formatting favors the former, as it seems more + // helpful in practice. But perhaps the formatting could be improved + // to better address both concerns. For example, use relative file + // paths if they would be shorter, or rewrite file paths to contain + // "$GOROOT" (like objabi.AbsFile does) if tools can be taught how + // to reliably expand that again. + + fmt.Printf("export data desync: package %q, section %v, index %v, offset %v\n", r.common.pkgPath, r.k, r.Idx, pos) + + fmt.Printf("\nfound %v, written at:\n", mHave) + if len(writerPCs) == 0 { + fmt.Printf("\t[stack trace unavailable; recompile package %q with -d=syncframes]\n", r.common.pkgPath) + } + for _, pc := range writerPCs { + fmt.Printf("\t%s\n", r.common.StringIdx(r.rawReloc(RelocString, pc))) + } + + fmt.Printf("\nexpected %v, reading at:\n", mWant) + var readerPCs [32]uintptr // TODO(mdempsky): Dynamically size? + n := runtime.Callers(2, readerPCs[:]) + for _, pc := range fmtFrames(readerPCs[:n]...) { + fmt.Printf("\t%s\n", pc) + } + + // We already printed a stack trace for the reader, so now we can + // simply exit. Printing a second one with panic or base.Fatalf + // would just be noise. + os.Exit(1) +} + +// Bool decodes and returns a bool value from the element bitstream. +func (r *Decoder) Bool() bool { + r.Sync(SyncBool) + x, err := r.Data.ReadByte() + r.checkErr(err) + assert(x < 2) + return x != 0 +} + +// Int64 decodes and returns an int64 value from the element bitstream. +func (r *Decoder) Int64() int64 { + r.Sync(SyncInt64) + return r.rawVarint() +} + +// Uint64 decodes and returns a uint64 value from the element bitstream. +func (r *Decoder) Uint64() uint64 { + r.Sync(SyncUint64) + return r.rawUvarint() +} + +// Len decodes and returns a non-negative int value from the element bitstream. +func (r *Decoder) Len() int { x := r.Uint64(); v := int(x); assert(uint64(v) == x); return v } + +// Int decodes and returns an int value from the element bitstream. +func (r *Decoder) Int() int { x := r.Int64(); v := int(x); assert(int64(v) == x); return v } + +// Uint decodes and returns a uint value from the element bitstream. +func (r *Decoder) Uint() uint { x := r.Uint64(); v := uint(x); assert(uint64(v) == x); return v } + +// Code decodes a Code value from the element bitstream and returns +// its ordinal value. It's the caller's responsibility to convert the +// result to an appropriate Code type. +// +// TODO(mdempsky): Ideally this method would have signature "Code[T +// Code] T" instead, but we don't allow generic methods and the +// compiler can't depend on generics yet anyway. +func (r *Decoder) Code(mark SyncMarker) int { + r.Sync(mark) + return r.Len() +} + +// Reloc decodes a relocation of expected section k from the element +// bitstream and returns an index to the referenced element. +func (r *Decoder) Reloc(k RelocKind) Index { + r.Sync(SyncUseReloc) + return r.rawReloc(k, r.Len()) +} + +// String decodes and returns a string value from the element +// bitstream. +func (r *Decoder) String() string { + r.Sync(SyncString) + return r.common.StringIdx(r.Reloc(RelocString)) +} + +// Strings decodes and returns a variable-length slice of strings from +// the element bitstream. +func (r *Decoder) Strings() []string { + res := make([]string, r.Len()) + for i := range res { + res[i] = r.String() + } + return res +} + +// Value decodes and returns a constant.Value from the element +// bitstream. +func (r *Decoder) Value() constant.Value { + r.Sync(SyncValue) + isComplex := r.Bool() + val := r.scalar() + if isComplex { + val = constant.BinaryOp(val, token.ADD, constant.MakeImag(r.scalar())) + } + return val +} + +func (r *Decoder) scalar() constant.Value { + switch tag := CodeVal(r.Code(SyncVal)); tag { + default: + panic(fmt.Errorf("unexpected scalar tag: %v", tag)) + + case ValBool: + return constant.MakeBool(r.Bool()) + case ValString: + return constant.MakeString(r.String()) + case ValInt64: + return constant.MakeInt64(r.Int64()) + case ValBigInt: + return constant.Make(r.bigInt()) + case ValBigRat: + num := r.bigInt() + denom := r.bigInt() + return constant.Make(new(big.Rat).SetFrac(num, denom)) + case ValBigFloat: + return constant.Make(r.bigFloat()) + } +} + +func (r *Decoder) bigInt() *big.Int { + v := new(big.Int).SetBytes([]byte(r.String())) + if r.Bool() { + v.Neg(v) + } + return v +} + +func (r *Decoder) bigFloat() *big.Float { + v := new(big.Float).SetPrec(512) + assert(v.UnmarshalText([]byte(r.String())) == nil) + return v +} + +// @@@ Helpers + +// TODO(mdempsky): These should probably be removed. I think they're a +// smell that the export data format is not yet quite right. + +// PeekPkgPath returns the package path for the specified package +// index. +func (pr *PkgDecoder) PeekPkgPath(idx Index) string { + var path string + { + r := pr.TempDecoder(RelocPkg, idx, SyncPkgDef) + path = r.String() + pr.RetireDecoder(&r) + } + if path == "" { + path = pr.pkgPath + } + return path +} + +// PeekObj returns the package path, object name, and CodeObj for the +// specified object index. +func (pr *PkgDecoder) PeekObj(idx Index) (string, string, CodeObj) { + var ridx Index + var name string + var rcode int + { + r := pr.TempDecoder(RelocName, idx, SyncObject1) + r.Sync(SyncSym) + r.Sync(SyncPkg) + ridx = r.Reloc(RelocPkg) + name = r.String() + rcode = r.Code(SyncCodeObj) + pr.RetireDecoder(&r) + } + + path := pr.PeekPkgPath(ridx) + assert(name != "") + + tag := CodeObj(rcode) + + return path, name, tag +} diff --git a/cluster-autoscaler/vendor/golang.org/x/tools/internal/pkgbits/doc.go b/cluster-autoscaler/vendor/golang.org/x/tools/internal/pkgbits/doc.go new file mode 100644 index 000000000000..c8a2796b5e4c --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/tools/internal/pkgbits/doc.go @@ -0,0 +1,32 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package pkgbits implements low-level coding abstractions for +// Unified IR's export data format. +// +// At a low-level, a package is a collection of bitstream elements. +// Each element has a "kind" and a dense, non-negative index. +// Elements can be randomly accessed given their kind and index. +// +// Individual elements are sequences of variable-length values (e.g., +// integers, booleans, strings, go/constant values, cross-references +// to other elements). Package pkgbits provides APIs for encoding and +// decoding these low-level values, but the details of mapping +// higher-level Go constructs into elements is left to higher-level +// abstractions. +// +// Elements may cross-reference each other with "relocations." For +// example, an element representing a pointer type has a relocation +// referring to the element type. +// +// Go constructs may be composed as a constellation of multiple +// elements. For example, a declared function may have one element to +// describe the object (e.g., its name, type, position), and a +// separate element to describe its function body. This allows readers +// some flexibility in efficiently seeking or re-reading data (e.g., +// inlining requires re-reading the function body for each inlined +// call, without needing to re-read the object-level details). +// +// This is a copy of internal/pkgbits in the Go implementation. +package pkgbits diff --git a/cluster-autoscaler/vendor/golang.org/x/tools/internal/pkgbits/encoder.go b/cluster-autoscaler/vendor/golang.org/x/tools/internal/pkgbits/encoder.go new file mode 100644 index 000000000000..6482617a4fcc --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/tools/internal/pkgbits/encoder.go @@ -0,0 +1,383 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +import ( + "bytes" + "crypto/md5" + "encoding/binary" + "go/constant" + "io" + "math/big" + "runtime" +) + +// currentVersion is the current version number. +// +// - v0: initial prototype +// +// - v1: adds the flags uint32 word +const currentVersion uint32 = 1 + +// A PkgEncoder provides methods for encoding a package's Unified IR +// export data. +type PkgEncoder struct { + // elems holds the bitstream for previously encoded elements. + elems [numRelocs][]string + + // stringsIdx maps previously encoded strings to their index within + // the RelocString section, to allow deduplication. That is, + // elems[RelocString][stringsIdx[s]] == s (if present). + stringsIdx map[string]Index + + // syncFrames is the number of frames to write at each sync + // marker. A negative value means sync markers are omitted. + syncFrames int +} + +// SyncMarkers reports whether pw uses sync markers. +func (pw *PkgEncoder) SyncMarkers() bool { return pw.syncFrames >= 0 } + +// NewPkgEncoder returns an initialized PkgEncoder. +// +// syncFrames is the number of caller frames that should be serialized +// at Sync points. Serializing additional frames results in larger +// export data files, but can help diagnosing desync errors in +// higher-level Unified IR reader/writer code. If syncFrames is +// negative, then sync markers are omitted entirely. +func NewPkgEncoder(syncFrames int) PkgEncoder { + return PkgEncoder{ + stringsIdx: make(map[string]Index), + syncFrames: syncFrames, + } +} + +// DumpTo writes the package's encoded data to out0 and returns the +// package fingerprint. +func (pw *PkgEncoder) DumpTo(out0 io.Writer) (fingerprint [8]byte) { + h := md5.New() + out := io.MultiWriter(out0, h) + + writeUint32 := func(x uint32) { + assert(binary.Write(out, binary.LittleEndian, x) == nil) + } + + writeUint32(currentVersion) + + var flags uint32 + if pw.SyncMarkers() { + flags |= flagSyncMarkers + } + writeUint32(flags) + + // Write elemEndsEnds. + var sum uint32 + for _, elems := range &pw.elems { + sum += uint32(len(elems)) + writeUint32(sum) + } + + // Write elemEnds. + sum = 0 + for _, elems := range &pw.elems { + for _, elem := range elems { + sum += uint32(len(elem)) + writeUint32(sum) + } + } + + // Write elemData. + for _, elems := range &pw.elems { + for _, elem := range elems { + _, err := io.WriteString(out, elem) + assert(err == nil) + } + } + + // Write fingerprint. + copy(fingerprint[:], h.Sum(nil)) + _, err := out0.Write(fingerprint[:]) + assert(err == nil) + + return +} + +// StringIdx adds a string value to the strings section, if not +// already present, and returns its index. +func (pw *PkgEncoder) StringIdx(s string) Index { + if idx, ok := pw.stringsIdx[s]; ok { + assert(pw.elems[RelocString][idx] == s) + return idx + } + + idx := Index(len(pw.elems[RelocString])) + pw.elems[RelocString] = append(pw.elems[RelocString], s) + pw.stringsIdx[s] = idx + return idx +} + +// NewEncoder returns an Encoder for a new element within the given +// section, and encodes the given SyncMarker as the start of the +// element bitstream. +func (pw *PkgEncoder) NewEncoder(k RelocKind, marker SyncMarker) Encoder { + e := pw.NewEncoderRaw(k) + e.Sync(marker) + return e +} + +// NewEncoderRaw returns an Encoder for a new element within the given +// section. +// +// Most callers should use NewEncoder instead. +func (pw *PkgEncoder) NewEncoderRaw(k RelocKind) Encoder { + idx := Index(len(pw.elems[k])) + pw.elems[k] = append(pw.elems[k], "") // placeholder + + return Encoder{ + p: pw, + k: k, + Idx: idx, + } +} + +// An Encoder provides methods for encoding an individual element's +// bitstream data. +type Encoder struct { + p *PkgEncoder + + Relocs []RelocEnt + RelocMap map[RelocEnt]uint32 + Data bytes.Buffer // accumulated element bitstream data + + encodingRelocHeader bool + + k RelocKind + Idx Index // index within relocation section +} + +// Flush finalizes the element's bitstream and returns its Index. +func (w *Encoder) Flush() Index { + var sb bytes.Buffer // TODO(mdempsky): strings.Builder after #44505 is resolved + + // Backup the data so we write the relocations at the front. + var tmp bytes.Buffer + io.Copy(&tmp, &w.Data) + + // TODO(mdempsky): Consider writing these out separately so they're + // easier to strip, along with function bodies, so that we can prune + // down to just the data that's relevant to go/types. + if w.encodingRelocHeader { + panic("encodingRelocHeader already true; recursive flush?") + } + w.encodingRelocHeader = true + w.Sync(SyncRelocs) + w.Len(len(w.Relocs)) + for _, rEnt := range w.Relocs { + w.Sync(SyncReloc) + w.Len(int(rEnt.Kind)) + w.Len(int(rEnt.Idx)) + } + + io.Copy(&sb, &w.Data) + io.Copy(&sb, &tmp) + w.p.elems[w.k][w.Idx] = sb.String() + + return w.Idx +} + +func (w *Encoder) checkErr(err error) { + if err != nil { + errorf("unexpected encoding error: %v", err) + } +} + +func (w *Encoder) rawUvarint(x uint64) { + var buf [binary.MaxVarintLen64]byte + n := binary.PutUvarint(buf[:], x) + _, err := w.Data.Write(buf[:n]) + w.checkErr(err) +} + +func (w *Encoder) rawVarint(x int64) { + // Zig-zag encode. + ux := uint64(x) << 1 + if x < 0 { + ux = ^ux + } + + w.rawUvarint(ux) +} + +func (w *Encoder) rawReloc(r RelocKind, idx Index) int { + e := RelocEnt{r, idx} + if w.RelocMap != nil { + if i, ok := w.RelocMap[e]; ok { + return int(i) + } + } else { + w.RelocMap = make(map[RelocEnt]uint32) + } + + i := len(w.Relocs) + w.RelocMap[e] = uint32(i) + w.Relocs = append(w.Relocs, e) + return i +} + +func (w *Encoder) Sync(m SyncMarker) { + if !w.p.SyncMarkers() { + return + } + + // Writing out stack frame string references requires working + // relocations, but writing out the relocations themselves involves + // sync markers. To prevent infinite recursion, we simply trim the + // stack frame for sync markers within the relocation header. + var frames []string + if !w.encodingRelocHeader && w.p.syncFrames > 0 { + pcs := make([]uintptr, w.p.syncFrames) + n := runtime.Callers(2, pcs) + frames = fmtFrames(pcs[:n]...) + } + + // TODO(mdempsky): Save space by writing out stack frames as a + // linked list so we can share common stack frames. + w.rawUvarint(uint64(m)) + w.rawUvarint(uint64(len(frames))) + for _, frame := range frames { + w.rawUvarint(uint64(w.rawReloc(RelocString, w.p.StringIdx(frame)))) + } +} + +// Bool encodes and writes a bool value into the element bitstream, +// and then returns the bool value. +// +// For simple, 2-alternative encodings, the idiomatic way to call Bool +// is something like: +// +// if w.Bool(x != 0) { +// // alternative #1 +// } else { +// // alternative #2 +// } +// +// For multi-alternative encodings, use Code instead. +func (w *Encoder) Bool(b bool) bool { + w.Sync(SyncBool) + var x byte + if b { + x = 1 + } + err := w.Data.WriteByte(x) + w.checkErr(err) + return b +} + +// Int64 encodes and writes an int64 value into the element bitstream. +func (w *Encoder) Int64(x int64) { + w.Sync(SyncInt64) + w.rawVarint(x) +} + +// Uint64 encodes and writes a uint64 value into the element bitstream. +func (w *Encoder) Uint64(x uint64) { + w.Sync(SyncUint64) + w.rawUvarint(x) +} + +// Len encodes and writes a non-negative int value into the element bitstream. +func (w *Encoder) Len(x int) { assert(x >= 0); w.Uint64(uint64(x)) } + +// Int encodes and writes an int value into the element bitstream. +func (w *Encoder) Int(x int) { w.Int64(int64(x)) } + +// Uint encodes and writes a uint value into the element bitstream. +func (w *Encoder) Uint(x uint) { w.Uint64(uint64(x)) } + +// Reloc encodes and writes a relocation for the given (section, +// index) pair into the element bitstream. +// +// Note: Only the index is formally written into the element +// bitstream, so bitstream decoders must know from context which +// section an encoded relocation refers to. +func (w *Encoder) Reloc(r RelocKind, idx Index) { + w.Sync(SyncUseReloc) + w.Len(w.rawReloc(r, idx)) +} + +// Code encodes and writes a Code value into the element bitstream. +func (w *Encoder) Code(c Code) { + w.Sync(c.Marker()) + w.Len(c.Value()) +} + +// String encodes and writes a string value into the element +// bitstream. +// +// Internally, strings are deduplicated by adding them to the strings +// section (if not already present), and then writing a relocation +// into the element bitstream. +func (w *Encoder) String(s string) { + w.Sync(SyncString) + w.Reloc(RelocString, w.p.StringIdx(s)) +} + +// Strings encodes and writes a variable-length slice of strings into +// the element bitstream. +func (w *Encoder) Strings(ss []string) { + w.Len(len(ss)) + for _, s := range ss { + w.String(s) + } +} + +// Value encodes and writes a constant.Value into the element +// bitstream. +func (w *Encoder) Value(val constant.Value) { + w.Sync(SyncValue) + if w.Bool(val.Kind() == constant.Complex) { + w.scalar(constant.Real(val)) + w.scalar(constant.Imag(val)) + } else { + w.scalar(val) + } +} + +func (w *Encoder) scalar(val constant.Value) { + switch v := constant.Val(val).(type) { + default: + errorf("unhandled %v (%v)", val, val.Kind()) + case bool: + w.Code(ValBool) + w.Bool(v) + case string: + w.Code(ValString) + w.String(v) + case int64: + w.Code(ValInt64) + w.Int64(v) + case *big.Int: + w.Code(ValBigInt) + w.bigInt(v) + case *big.Rat: + w.Code(ValBigRat) + w.bigInt(v.Num()) + w.bigInt(v.Denom()) + case *big.Float: + w.Code(ValBigFloat) + w.bigFloat(v) + } +} + +func (w *Encoder) bigInt(v *big.Int) { + b := v.Bytes() + w.String(string(b)) // TODO: More efficient encoding. + w.Bool(v.Sign() < 0) +} + +func (w *Encoder) bigFloat(v *big.Float) { + b := v.Append(nil, 'p', -1) + w.String(string(b)) // TODO: More efficient encoding. +} diff --git a/cluster-autoscaler/vendor/golang.org/x/tools/internal/pkgbits/flags.go b/cluster-autoscaler/vendor/golang.org/x/tools/internal/pkgbits/flags.go new file mode 100644 index 000000000000..654222745fac --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/tools/internal/pkgbits/flags.go @@ -0,0 +1,9 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +const ( + flagSyncMarkers = 1 << iota // file format contains sync markers +) diff --git a/cluster-autoscaler/vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go b/cluster-autoscaler/vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go new file mode 100644 index 000000000000..5294f6a63edd --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go @@ -0,0 +1,21 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.7 +// +build !go1.7 + +// TODO(mdempsky): Remove after #44505 is resolved + +package pkgbits + +import "runtime" + +func walkFrames(pcs []uintptr, visit frameVisitor) { + for _, pc := range pcs { + fn := runtime.FuncForPC(pc) + file, line := fn.FileLine(pc) + + visit(file, line, fn.Name(), pc-fn.Entry()) + } +} diff --git a/cluster-autoscaler/vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go b/cluster-autoscaler/vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go new file mode 100644 index 000000000000..2324ae7adfe2 --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go @@ -0,0 +1,28 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.7 +// +build go1.7 + +package pkgbits + +import "runtime" + +// walkFrames calls visit for each call frame represented by pcs. +// +// pcs should be a slice of PCs, as returned by runtime.Callers. +func walkFrames(pcs []uintptr, visit frameVisitor) { + if len(pcs) == 0 { + return + } + + frames := runtime.CallersFrames(pcs) + for { + frame, more := frames.Next() + visit(frame.File, frame.Line, frame.Function, frame.PC-frame.Entry) + if !more { + return + } + } +} diff --git a/cluster-autoscaler/vendor/golang.org/x/tools/internal/pkgbits/reloc.go b/cluster-autoscaler/vendor/golang.org/x/tools/internal/pkgbits/reloc.go new file mode 100644 index 000000000000..fcdfb97ca992 --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/tools/internal/pkgbits/reloc.go @@ -0,0 +1,42 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +// A RelocKind indicates a particular section within a unified IR export. +type RelocKind int32 + +// An Index represents a bitstream element index within a particular +// section. +type Index int32 + +// A relocEnt (relocation entry) is an entry in an element's local +// reference table. +// +// TODO(mdempsky): Rename this too. +type RelocEnt struct { + Kind RelocKind + Idx Index +} + +// Reserved indices within the meta relocation section. +const ( + PublicRootIdx Index = 0 + PrivateRootIdx Index = 1 +) + +const ( + RelocString RelocKind = iota + RelocMeta + RelocPosBase + RelocPkg + RelocName + RelocType + RelocObj + RelocObjExt + RelocObjDict + RelocBody + + numRelocs = iota +) diff --git a/cluster-autoscaler/vendor/golang.org/x/tools/internal/pkgbits/support.go b/cluster-autoscaler/vendor/golang.org/x/tools/internal/pkgbits/support.go new file mode 100644 index 000000000000..ad26d3b28cae --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/tools/internal/pkgbits/support.go @@ -0,0 +1,17 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +import "fmt" + +func assert(b bool) { + if !b { + panic("assertion failed") + } +} + +func errorf(format string, args ...interface{}) { + panic(fmt.Errorf(format, args...)) +} diff --git a/cluster-autoscaler/vendor/golang.org/x/tools/internal/pkgbits/sync.go b/cluster-autoscaler/vendor/golang.org/x/tools/internal/pkgbits/sync.go new file mode 100644 index 000000000000..5bd51ef71700 --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/tools/internal/pkgbits/sync.go @@ -0,0 +1,113 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +import ( + "fmt" + "strings" +) + +// fmtFrames formats a backtrace for reporting reader/writer desyncs. +func fmtFrames(pcs ...uintptr) []string { + res := make([]string, 0, len(pcs)) + walkFrames(pcs, func(file string, line int, name string, offset uintptr) { + // Trim package from function name. It's just redundant noise. + name = strings.TrimPrefix(name, "cmd/compile/internal/noder.") + + res = append(res, fmt.Sprintf("%s:%v: %s +0x%v", file, line, name, offset)) + }) + return res +} + +type frameVisitor func(file string, line int, name string, offset uintptr) + +// SyncMarker is an enum type that represents markers that may be +// written to export data to ensure the reader and writer stay +// synchronized. +type SyncMarker int + +//go:generate stringer -type=SyncMarker -trimprefix=Sync + +const ( + _ SyncMarker = iota + + // Public markers (known to go/types importers). + + // Low-level coding markers. + SyncEOF + SyncBool + SyncInt64 + SyncUint64 + SyncString + SyncValue + SyncVal + SyncRelocs + SyncReloc + SyncUseReloc + + // Higher-level object and type markers. + SyncPublic + SyncPos + SyncPosBase + SyncObject + SyncObject1 + SyncPkg + SyncPkgDef + SyncMethod + SyncType + SyncTypeIdx + SyncTypeParamNames + SyncSignature + SyncParams + SyncParam + SyncCodeObj + SyncSym + SyncLocalIdent + SyncSelector + + // Private markers (only known to cmd/compile). + SyncPrivate + + SyncFuncExt + SyncVarExt + SyncTypeExt + SyncPragma + + SyncExprList + SyncExprs + SyncExpr + SyncExprType + SyncAssign + SyncOp + SyncFuncLit + SyncCompLit + + SyncDecl + SyncFuncBody + SyncOpenScope + SyncCloseScope + SyncCloseAnotherScope + SyncDeclNames + SyncDeclName + + SyncStmts + SyncBlockStmt + SyncIfStmt + SyncForStmt + SyncSwitchStmt + SyncRangeStmt + SyncCaseClause + SyncCommClause + SyncSelectStmt + SyncDecls + SyncLabeledStmt + SyncUseObjLocal + SyncAddLocal + SyncLinkname + SyncStmt1 + SyncStmtsEnd + SyncLabel + SyncOptLabel +) diff --git a/cluster-autoscaler/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go b/cluster-autoscaler/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go new file mode 100644 index 000000000000..4a5b0ca5f2ff --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go @@ -0,0 +1,89 @@ +// Code generated by "stringer -type=SyncMarker -trimprefix=Sync"; DO NOT EDIT. + +package pkgbits + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[SyncEOF-1] + _ = x[SyncBool-2] + _ = x[SyncInt64-3] + _ = x[SyncUint64-4] + _ = x[SyncString-5] + _ = x[SyncValue-6] + _ = x[SyncVal-7] + _ = x[SyncRelocs-8] + _ = x[SyncReloc-9] + _ = x[SyncUseReloc-10] + _ = x[SyncPublic-11] + _ = x[SyncPos-12] + _ = x[SyncPosBase-13] + _ = x[SyncObject-14] + _ = x[SyncObject1-15] + _ = x[SyncPkg-16] + _ = x[SyncPkgDef-17] + _ = x[SyncMethod-18] + _ = x[SyncType-19] + _ = x[SyncTypeIdx-20] + _ = x[SyncTypeParamNames-21] + _ = x[SyncSignature-22] + _ = x[SyncParams-23] + _ = x[SyncParam-24] + _ = x[SyncCodeObj-25] + _ = x[SyncSym-26] + _ = x[SyncLocalIdent-27] + _ = x[SyncSelector-28] + _ = x[SyncPrivate-29] + _ = x[SyncFuncExt-30] + _ = x[SyncVarExt-31] + _ = x[SyncTypeExt-32] + _ = x[SyncPragma-33] + _ = x[SyncExprList-34] + _ = x[SyncExprs-35] + _ = x[SyncExpr-36] + _ = x[SyncExprType-37] + _ = x[SyncAssign-38] + _ = x[SyncOp-39] + _ = x[SyncFuncLit-40] + _ = x[SyncCompLit-41] + _ = x[SyncDecl-42] + _ = x[SyncFuncBody-43] + _ = x[SyncOpenScope-44] + _ = x[SyncCloseScope-45] + _ = x[SyncCloseAnotherScope-46] + _ = x[SyncDeclNames-47] + _ = x[SyncDeclName-48] + _ = x[SyncStmts-49] + _ = x[SyncBlockStmt-50] + _ = x[SyncIfStmt-51] + _ = x[SyncForStmt-52] + _ = x[SyncSwitchStmt-53] + _ = x[SyncRangeStmt-54] + _ = x[SyncCaseClause-55] + _ = x[SyncCommClause-56] + _ = x[SyncSelectStmt-57] + _ = x[SyncDecls-58] + _ = x[SyncLabeledStmt-59] + _ = x[SyncUseObjLocal-60] + _ = x[SyncAddLocal-61] + _ = x[SyncLinkname-62] + _ = x[SyncStmt1-63] + _ = x[SyncStmtsEnd-64] + _ = x[SyncLabel-65] + _ = x[SyncOptLabel-66] +} + +const _SyncMarker_name = "EOFBoolInt64Uint64StringValueValRelocsRelocUseRelocPublicPosPosBaseObjectObject1PkgPkgDefMethodTypeTypeIdxTypeParamNamesSignatureParamsParamCodeObjSymLocalIdentSelectorPrivateFuncExtVarExtTypeExtPragmaExprListExprsExprExprTypeAssignOpFuncLitCompLitDeclFuncBodyOpenScopeCloseScopeCloseAnotherScopeDeclNamesDeclNameStmtsBlockStmtIfStmtForStmtSwitchStmtRangeStmtCaseClauseCommClauseSelectStmtDeclsLabeledStmtUseObjLocalAddLocalLinknameStmt1StmtsEndLabelOptLabel" + +var _SyncMarker_index = [...]uint16{0, 3, 7, 12, 18, 24, 29, 32, 38, 43, 51, 57, 60, 67, 73, 80, 83, 89, 95, 99, 106, 120, 129, 135, 140, 147, 150, 160, 168, 175, 182, 188, 195, 201, 209, 214, 218, 226, 232, 234, 241, 248, 252, 260, 269, 279, 296, 305, 313, 318, 327, 333, 340, 350, 359, 369, 379, 389, 394, 405, 416, 424, 432, 437, 445, 450, 458} + +func (i SyncMarker) String() string { + i -= 1 + if i < 0 || i >= SyncMarker(len(_SyncMarker_index)-1) { + return "SyncMarker(" + strconv.FormatInt(int64(i+1), 10) + ")" + } + return _SyncMarker_name[_SyncMarker_index[i]:_SyncMarker_index[i+1]] +} diff --git a/cluster-autoscaler/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go b/cluster-autoscaler/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go new file mode 100644 index 000000000000..7e638ec24fcb --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go @@ -0,0 +1,151 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// package tokeninternal provides access to some internal features of the token +// package. +package tokeninternal + +import ( + "fmt" + "go/token" + "sort" + "sync" + "unsafe" +) + +// GetLines returns the table of line-start offsets from a token.File. +func GetLines(file *token.File) []int { + // token.File has a Lines method on Go 1.21 and later. + if file, ok := (interface{})(file).(interface{ Lines() []int }); ok { + return file.Lines() + } + + // This declaration must match that of token.File. + // This creates a risk of dependency skew. + // For now we check that the size of the two + // declarations is the same, on the (fragile) assumption + // that future changes would add fields. + type tokenFile119 struct { + _ string + _ int + _ int + mu sync.Mutex // we're not complete monsters + lines []int + _ []struct{} + } + type tokenFile118 struct { + _ *token.FileSet // deleted in go1.19 + tokenFile119 + } + + type uP = unsafe.Pointer + switch unsafe.Sizeof(*file) { + case unsafe.Sizeof(tokenFile118{}): + var ptr *tokenFile118 + *(*uP)(uP(&ptr)) = uP(file) + ptr.mu.Lock() + defer ptr.mu.Unlock() + return ptr.lines + + case unsafe.Sizeof(tokenFile119{}): + var ptr *tokenFile119 + *(*uP)(uP(&ptr)) = uP(file) + ptr.mu.Lock() + defer ptr.mu.Unlock() + return ptr.lines + + default: + panic("unexpected token.File size") + } +} + +// AddExistingFiles adds the specified files to the FileSet if they +// are not already present. It panics if any pair of files in the +// resulting FileSet would overlap. +func AddExistingFiles(fset *token.FileSet, files []*token.File) { + // Punch through the FileSet encapsulation. + type tokenFileSet struct { + // This type remained essentially consistent from go1.16 to go1.21. + mutex sync.RWMutex + base int + files []*token.File + _ *token.File // changed to atomic.Pointer[token.File] in go1.19 + } + + // If the size of token.FileSet changes, this will fail to compile. + const delta = int64(unsafe.Sizeof(tokenFileSet{})) - int64(unsafe.Sizeof(token.FileSet{})) + var _ [-delta * delta]int + + type uP = unsafe.Pointer + var ptr *tokenFileSet + *(*uP)(uP(&ptr)) = uP(fset) + ptr.mutex.Lock() + defer ptr.mutex.Unlock() + + // Merge and sort. + newFiles := append(ptr.files, files...) + sort.Slice(newFiles, func(i, j int) bool { + return newFiles[i].Base() < newFiles[j].Base() + }) + + // Reject overlapping files. + // Discard adjacent identical files. + out := newFiles[:0] + for i, file := range newFiles { + if i > 0 { + prev := newFiles[i-1] + if file == prev { + continue + } + if prev.Base()+prev.Size()+1 > file.Base() { + panic(fmt.Sprintf("file %s (%d-%d) overlaps with file %s (%d-%d)", + prev.Name(), prev.Base(), prev.Base()+prev.Size(), + file.Name(), file.Base(), file.Base()+file.Size())) + } + } + out = append(out, file) + } + newFiles = out + + ptr.files = newFiles + + // Advance FileSet.Base(). + if len(newFiles) > 0 { + last := newFiles[len(newFiles)-1] + newBase := last.Base() + last.Size() + 1 + if ptr.base < newBase { + ptr.base = newBase + } + } +} + +// FileSetFor returns a new FileSet containing a sequence of new Files with +// the same base, size, and line as the input files, for use in APIs that +// require a FileSet. +// +// Precondition: the input files must be non-overlapping, and sorted in order +// of their Base. +func FileSetFor(files ...*token.File) *token.FileSet { + fset := token.NewFileSet() + for _, f := range files { + f2 := fset.AddFile(f.Name(), f.Base(), f.Size()) + lines := GetLines(f) + f2.SetLines(lines) + } + return fset +} + +// CloneFileSet creates a new FileSet holding all files in fset. It does not +// create copies of the token.Files in fset: they are added to the resulting +// FileSet unmodified. +func CloneFileSet(fset *token.FileSet) *token.FileSet { + var files []*token.File + fset.Iterate(func(f *token.File) bool { + files = append(files, f) + return true + }) + newFileSet := token.NewFileSet() + AddExistingFiles(newFileSet, files) + return newFileSet +} diff --git a/cluster-autoscaler/vendor/golang.org/x/tools/internal/typeparams/common.go b/cluster-autoscaler/vendor/golang.org/x/tools/internal/typeparams/common.go new file mode 100644 index 000000000000..cfba8189f154 --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/tools/internal/typeparams/common.go @@ -0,0 +1,178 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package typeparams contains common utilities for writing tools that interact +// with generic Go code, as introduced with Go 1.18. +// +// Many of the types and functions in this package are proxies for the new APIs +// introduced in the standard library with Go 1.18. For example, the +// typeparams.Union type is an alias for go/types.Union, and the ForTypeSpec +// function returns the value of the go/ast.TypeSpec.TypeParams field. At Go +// versions older than 1.18 these helpers are implemented as stubs, allowing +// users of this package to write code that handles generic constructs inline, +// even if the Go version being used to compile does not support generics. +// +// Additionally, this package contains common utilities for working with the +// new generic constructs, to supplement the standard library APIs. Notably, +// the StructuralTerms API computes a minimal representation of the structural +// restrictions on a type parameter. +// +// An external version of these APIs is available in the +// golang.org/x/exp/typeparams module. +package typeparams + +import ( + "go/ast" + "go/token" + "go/types" +) + +// UnpackIndexExpr extracts data from AST nodes that represent index +// expressions. +// +// For an ast.IndexExpr, the resulting indices slice will contain exactly one +// index expression. For an ast.IndexListExpr (go1.18+), it may have a variable +// number of index expressions. +// +// For nodes that don't represent index expressions, the first return value of +// UnpackIndexExpr will be nil. +func UnpackIndexExpr(n ast.Node) (x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack token.Pos) { + switch e := n.(type) { + case *ast.IndexExpr: + return e.X, e.Lbrack, []ast.Expr{e.Index}, e.Rbrack + case *IndexListExpr: + return e.X, e.Lbrack, e.Indices, e.Rbrack + } + return nil, token.NoPos, nil, token.NoPos +} + +// PackIndexExpr returns an *ast.IndexExpr or *ast.IndexListExpr, depending on +// the cardinality of indices. Calling PackIndexExpr with len(indices) == 0 +// will panic. +func PackIndexExpr(x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack token.Pos) ast.Expr { + switch len(indices) { + case 0: + panic("empty indices") + case 1: + return &ast.IndexExpr{ + X: x, + Lbrack: lbrack, + Index: indices[0], + Rbrack: rbrack, + } + default: + return &IndexListExpr{ + X: x, + Lbrack: lbrack, + Indices: indices, + Rbrack: rbrack, + } + } +} + +// IsTypeParam reports whether t is a type parameter. +func IsTypeParam(t types.Type) bool { + _, ok := t.(*TypeParam) + return ok +} + +// OriginMethod returns the origin method associated with the method fn. +// For methods on a non-generic receiver base type, this is just +// fn. However, for methods with a generic receiver, OriginMethod returns the +// corresponding method in the method set of the origin type. +// +// As a special case, if fn is not a method (has no receiver), OriginMethod +// returns fn. +func OriginMethod(fn *types.Func) *types.Func { + recv := fn.Type().(*types.Signature).Recv() + if recv == nil { + return fn + } + base := recv.Type() + p, isPtr := base.(*types.Pointer) + if isPtr { + base = p.Elem() + } + named, isNamed := base.(*types.Named) + if !isNamed { + // Receiver is a *types.Interface. + return fn + } + if ForNamed(named).Len() == 0 { + // Receiver base has no type parameters, so we can avoid the lookup below. + return fn + } + orig := NamedTypeOrigin(named) + gfn, _, _ := types.LookupFieldOrMethod(orig, true, fn.Pkg(), fn.Name()) + return gfn.(*types.Func) +} + +// GenericAssignableTo is a generalization of types.AssignableTo that +// implements the following rule for uninstantiated generic types: +// +// If V and T are generic named types, then V is considered assignable to T if, +// for every possible instantation of V[A_1, ..., A_N], the instantiation +// T[A_1, ..., A_N] is valid and V[A_1, ..., A_N] implements T[A_1, ..., A_N]. +// +// If T has structural constraints, they must be satisfied by V. +// +// For example, consider the following type declarations: +// +// type Interface[T any] interface { +// Accept(T) +// } +// +// type Container[T any] struct { +// Element T +// } +// +// func (c Container[T]) Accept(t T) { c.Element = t } +// +// In this case, GenericAssignableTo reports that instantiations of Container +// are assignable to the corresponding instantiation of Interface. +func GenericAssignableTo(ctxt *Context, V, T types.Type) bool { + // If V and T are not both named, or do not have matching non-empty type + // parameter lists, fall back on types.AssignableTo. + + VN, Vnamed := V.(*types.Named) + TN, Tnamed := T.(*types.Named) + if !Vnamed || !Tnamed { + return types.AssignableTo(V, T) + } + + vtparams := ForNamed(VN) + ttparams := ForNamed(TN) + if vtparams.Len() == 0 || vtparams.Len() != ttparams.Len() || NamedTypeArgs(VN).Len() != 0 || NamedTypeArgs(TN).Len() != 0 { + return types.AssignableTo(V, T) + } + + // V and T have the same (non-zero) number of type params. Instantiate both + // with the type parameters of V. This must always succeed for V, and will + // succeed for T if and only if the type set of each type parameter of V is a + // subset of the type set of the corresponding type parameter of T, meaning + // that every instantiation of V corresponds to a valid instantiation of T. + + // Minor optimization: ensure we share a context across the two + // instantiations below. + if ctxt == nil { + ctxt = NewContext() + } + + var targs []types.Type + for i := 0; i < vtparams.Len(); i++ { + targs = append(targs, vtparams.At(i)) + } + + vinst, err := Instantiate(ctxt, V, targs, true) + if err != nil { + panic("type parameters should satisfy their own constraints") + } + + tinst, err := Instantiate(ctxt, T, targs, true) + if err != nil { + return false + } + + return types.AssignableTo(vinst, tinst) +} diff --git a/cluster-autoscaler/vendor/golang.org/x/tools/internal/typeparams/coretype.go b/cluster-autoscaler/vendor/golang.org/x/tools/internal/typeparams/coretype.go new file mode 100644 index 000000000000..993135ec90e8 --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/tools/internal/typeparams/coretype.go @@ -0,0 +1,122 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typeparams + +import ( + "go/types" +) + +// CoreType returns the core type of T or nil if T does not have a core type. +// +// See https://go.dev/ref/spec#Core_types for the definition of a core type. +func CoreType(T types.Type) types.Type { + U := T.Underlying() + if _, ok := U.(*types.Interface); !ok { + return U // for non-interface types, + } + + terms, err := _NormalTerms(U) + if len(terms) == 0 || err != nil { + // len(terms) -> empty type set of interface. + // err != nil => U is invalid, exceeds complexity bounds, or has an empty type set. + return nil // no core type. + } + + U = terms[0].Type().Underlying() + var identical int // i in [0,identical) => Identical(U, terms[i].Type().Underlying()) + for identical = 1; identical < len(terms); identical++ { + if !types.Identical(U, terms[identical].Type().Underlying()) { + break + } + } + + if identical == len(terms) { + // https://go.dev/ref/spec#Core_types + // "There is a single type U which is the underlying type of all types in the type set of T" + return U + } + ch, ok := U.(*types.Chan) + if !ok { + return nil // no core type as identical < len(terms) and U is not a channel. + } + // https://go.dev/ref/spec#Core_types + // "the type chan E if T contains only bidirectional channels, or the type chan<- E or + // <-chan E depending on the direction of the directional channels present." + for chans := identical; chans < len(terms); chans++ { + curr, ok := terms[chans].Type().Underlying().(*types.Chan) + if !ok { + return nil + } + if !types.Identical(ch.Elem(), curr.Elem()) { + return nil // channel elements are not identical. + } + if ch.Dir() == types.SendRecv { + // ch is bidirectional. We can safely always use curr's direction. + ch = curr + } else if curr.Dir() != types.SendRecv && ch.Dir() != curr.Dir() { + // ch and curr are not bidirectional and not the same direction. + return nil + } + } + return ch +} + +// _NormalTerms returns a slice of terms representing the normalized structural +// type restrictions of a type, if any. +// +// For all types other than *types.TypeParam, *types.Interface, and +// *types.Union, this is just a single term with Tilde() == false and +// Type() == typ. For *types.TypeParam, *types.Interface, and *types.Union, see +// below. +// +// Structural type restrictions of a type parameter are created via +// non-interface types embedded in its constraint interface (directly, or via a +// chain of interface embeddings). For example, in the declaration type +// T[P interface{~int; m()}] int the structural restriction of the type +// parameter P is ~int. +// +// With interface embedding and unions, the specification of structural type +// restrictions may be arbitrarily complex. For example, consider the +// following: +// +// type A interface{ ~string|~[]byte } +// +// type B interface{ int|string } +// +// type C interface { ~string|~int } +// +// type T[P interface{ A|B; C }] int +// +// In this example, the structural type restriction of P is ~string|int: A|B +// expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int, +// which when intersected with C (~string|~int) yields ~string|int. +// +// _NormalTerms computes these expansions and reductions, producing a +// "normalized" form of the embeddings. A structural restriction is normalized +// if it is a single union containing no interface terms, and is minimal in the +// sense that removing any term changes the set of types satisfying the +// constraint. It is left as a proof for the reader that, modulo sorting, there +// is exactly one such normalized form. +// +// Because the minimal representation always takes this form, _NormalTerms +// returns a slice of tilde terms corresponding to the terms of the union in +// the normalized structural restriction. An error is returned if the type is +// invalid, exceeds complexity bounds, or has an empty type set. In the latter +// case, _NormalTerms returns ErrEmptyTypeSet. +// +// _NormalTerms makes no guarantees about the order of terms, except that it +// is deterministic. +func _NormalTerms(typ types.Type) ([]*Term, error) { + switch typ := typ.(type) { + case *TypeParam: + return StructuralTerms(typ) + case *Union: + return UnionTermSet(typ) + case *types.Interface: + return InterfaceTermSet(typ) + default: + return []*Term{NewTerm(false, typ)}, nil + } +} diff --git a/cluster-autoscaler/vendor/golang.org/x/tools/internal/typeparams/enabled_go117.go b/cluster-autoscaler/vendor/golang.org/x/tools/internal/typeparams/enabled_go117.go new file mode 100644 index 000000000000..18212390e192 --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/tools/internal/typeparams/enabled_go117.go @@ -0,0 +1,12 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.18 +// +build !go1.18 + +package typeparams + +// Enabled reports whether type parameters are enabled in the current build +// environment. +const Enabled = false diff --git a/cluster-autoscaler/vendor/golang.org/x/tools/internal/typeparams/enabled_go118.go b/cluster-autoscaler/vendor/golang.org/x/tools/internal/typeparams/enabled_go118.go new file mode 100644 index 000000000000..d67148823c4d --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/tools/internal/typeparams/enabled_go118.go @@ -0,0 +1,15 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.18 +// +build go1.18 + +package typeparams + +// Note: this constant is in a separate file as this is the only acceptable +// diff between the <1.18 API of this package and the 1.18 API. + +// Enabled reports whether type parameters are enabled in the current build +// environment. +const Enabled = true diff --git a/cluster-autoscaler/vendor/golang.org/x/tools/internal/typeparams/normalize.go b/cluster-autoscaler/vendor/golang.org/x/tools/internal/typeparams/normalize.go new file mode 100644 index 000000000000..9c631b6512de --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/tools/internal/typeparams/normalize.go @@ -0,0 +1,218 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typeparams + +import ( + "errors" + "fmt" + "go/types" + "os" + "strings" +) + +//go:generate go run copytermlist.go + +const debug = false + +var ErrEmptyTypeSet = errors.New("empty type set") + +// StructuralTerms returns a slice of terms representing the normalized +// structural type restrictions of a type parameter, if any. +// +// Structural type restrictions of a type parameter are created via +// non-interface types embedded in its constraint interface (directly, or via a +// chain of interface embeddings). For example, in the declaration +// +// type T[P interface{~int; m()}] int +// +// the structural restriction of the type parameter P is ~int. +// +// With interface embedding and unions, the specification of structural type +// restrictions may be arbitrarily complex. For example, consider the +// following: +// +// type A interface{ ~string|~[]byte } +// +// type B interface{ int|string } +// +// type C interface { ~string|~int } +// +// type T[P interface{ A|B; C }] int +// +// In this example, the structural type restriction of P is ~string|int: A|B +// expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int, +// which when intersected with C (~string|~int) yields ~string|int. +// +// StructuralTerms computes these expansions and reductions, producing a +// "normalized" form of the embeddings. A structural restriction is normalized +// if it is a single union containing no interface terms, and is minimal in the +// sense that removing any term changes the set of types satisfying the +// constraint. It is left as a proof for the reader that, modulo sorting, there +// is exactly one such normalized form. +// +// Because the minimal representation always takes this form, StructuralTerms +// returns a slice of tilde terms corresponding to the terms of the union in +// the normalized structural restriction. An error is returned if the +// constraint interface is invalid, exceeds complexity bounds, or has an empty +// type set. In the latter case, StructuralTerms returns ErrEmptyTypeSet. +// +// StructuralTerms makes no guarantees about the order of terms, except that it +// is deterministic. +func StructuralTerms(tparam *TypeParam) ([]*Term, error) { + constraint := tparam.Constraint() + if constraint == nil { + return nil, fmt.Errorf("%s has nil constraint", tparam) + } + iface, _ := constraint.Underlying().(*types.Interface) + if iface == nil { + return nil, fmt.Errorf("constraint is %T, not *types.Interface", constraint.Underlying()) + } + return InterfaceTermSet(iface) +} + +// InterfaceTermSet computes the normalized terms for a constraint interface, +// returning an error if the term set cannot be computed or is empty. In the +// latter case, the error will be ErrEmptyTypeSet. +// +// See the documentation of StructuralTerms for more information on +// normalization. +func InterfaceTermSet(iface *types.Interface) ([]*Term, error) { + return computeTermSet(iface) +} + +// UnionTermSet computes the normalized terms for a union, returning an error +// if the term set cannot be computed or is empty. In the latter case, the +// error will be ErrEmptyTypeSet. +// +// See the documentation of StructuralTerms for more information on +// normalization. +func UnionTermSet(union *Union) ([]*Term, error) { + return computeTermSet(union) +} + +func computeTermSet(typ types.Type) ([]*Term, error) { + tset, err := computeTermSetInternal(typ, make(map[types.Type]*termSet), 0) + if err != nil { + return nil, err + } + if tset.terms.isEmpty() { + return nil, ErrEmptyTypeSet + } + if tset.terms.isAll() { + return nil, nil + } + var terms []*Term + for _, term := range tset.terms { + terms = append(terms, NewTerm(term.tilde, term.typ)) + } + return terms, nil +} + +// A termSet holds the normalized set of terms for a given type. +// +// The name termSet is intentionally distinct from 'type set': a type set is +// all types that implement a type (and includes method restrictions), whereas +// a term set just represents the structural restrictions on a type. +type termSet struct { + complete bool + terms termlist +} + +func indentf(depth int, format string, args ...interface{}) { + fmt.Fprintf(os.Stderr, strings.Repeat(".", depth)+format+"\n", args...) +} + +func computeTermSetInternal(t types.Type, seen map[types.Type]*termSet, depth int) (res *termSet, err error) { + if t == nil { + panic("nil type") + } + + if debug { + indentf(depth, "%s", t.String()) + defer func() { + if err != nil { + indentf(depth, "=> %s", err) + } else { + indentf(depth, "=> %s", res.terms.String()) + } + }() + } + + const maxTermCount = 100 + if tset, ok := seen[t]; ok { + if !tset.complete { + return nil, fmt.Errorf("cycle detected in the declaration of %s", t) + } + return tset, nil + } + + // Mark the current type as seen to avoid infinite recursion. + tset := new(termSet) + defer func() { + tset.complete = true + }() + seen[t] = tset + + switch u := t.Underlying().(type) { + case *types.Interface: + // The term set of an interface is the intersection of the term sets of its + // embedded types. + tset.terms = allTermlist + for i := 0; i < u.NumEmbeddeds(); i++ { + embedded := u.EmbeddedType(i) + if _, ok := embedded.Underlying().(*TypeParam); ok { + return nil, fmt.Errorf("invalid embedded type %T", embedded) + } + tset2, err := computeTermSetInternal(embedded, seen, depth+1) + if err != nil { + return nil, err + } + tset.terms = tset.terms.intersect(tset2.terms) + } + case *Union: + // The term set of a union is the union of term sets of its terms. + tset.terms = nil + for i := 0; i < u.Len(); i++ { + t := u.Term(i) + var terms termlist + switch t.Type().Underlying().(type) { + case *types.Interface: + tset2, err := computeTermSetInternal(t.Type(), seen, depth+1) + if err != nil { + return nil, err + } + terms = tset2.terms + case *TypeParam, *Union: + // A stand-alone type parameter or union is not permitted as union + // term. + return nil, fmt.Errorf("invalid union term %T", t) + default: + if t.Type() == types.Typ[types.Invalid] { + continue + } + terms = termlist{{t.Tilde(), t.Type()}} + } + tset.terms = tset.terms.union(terms) + if len(tset.terms) > maxTermCount { + return nil, fmt.Errorf("exceeded max term count %d", maxTermCount) + } + } + case *TypeParam: + panic("unreachable") + default: + // For all other types, the term set is just a single non-tilde term + // holding the type itself. + if u != types.Typ[types.Invalid] { + tset.terms = termlist{{false, t}} + } + } + return tset, nil +} + +// under is a facade for the go/types internal function of the same name. It is +// used by typeterm.go. +func under(t types.Type) types.Type { + return t.Underlying() +} diff --git a/cluster-autoscaler/vendor/golang.org/x/tools/internal/typeparams/termlist.go b/cluster-autoscaler/vendor/golang.org/x/tools/internal/typeparams/termlist.go new file mode 100644 index 000000000000..933106a23dd4 --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/tools/internal/typeparams/termlist.go @@ -0,0 +1,163 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by copytermlist.go DO NOT EDIT. + +package typeparams + +import ( + "bytes" + "go/types" +) + +// A termlist represents the type set represented by the union +// t1 ∪ y2 ∪ ... tn of the type sets of the terms t1 to tn. +// A termlist is in normal form if all terms are disjoint. +// termlist operations don't require the operands to be in +// normal form. +type termlist []*term + +// allTermlist represents the set of all types. +// It is in normal form. +var allTermlist = termlist{new(term)} + +// String prints the termlist exactly (without normalization). +func (xl termlist) String() string { + if len(xl) == 0 { + return "∅" + } + var buf bytes.Buffer + for i, x := range xl { + if i > 0 { + buf.WriteString(" ∪ ") + } + buf.WriteString(x.String()) + } + return buf.String() +} + +// isEmpty reports whether the termlist xl represents the empty set of types. +func (xl termlist) isEmpty() bool { + // If there's a non-nil term, the entire list is not empty. + // If the termlist is in normal form, this requires at most + // one iteration. + for _, x := range xl { + if x != nil { + return false + } + } + return true +} + +// isAll reports whether the termlist xl represents the set of all types. +func (xl termlist) isAll() bool { + // If there's a 𝓤 term, the entire list is 𝓤. + // If the termlist is in normal form, this requires at most + // one iteration. + for _, x := range xl { + if x != nil && x.typ == nil { + return true + } + } + return false +} + +// norm returns the normal form of xl. +func (xl termlist) norm() termlist { + // Quadratic algorithm, but good enough for now. + // TODO(gri) fix asymptotic performance + used := make([]bool, len(xl)) + var rl termlist + for i, xi := range xl { + if xi == nil || used[i] { + continue + } + for j := i + 1; j < len(xl); j++ { + xj := xl[j] + if xj == nil || used[j] { + continue + } + if u1, u2 := xi.union(xj); u2 == nil { + // If we encounter a 𝓤 term, the entire list is 𝓤. + // Exit early. + // (Note that this is not just an optimization; + // if we continue, we may end up with a 𝓤 term + // and other terms and the result would not be + // in normal form.) + if u1.typ == nil { + return allTermlist + } + xi = u1 + used[j] = true // xj is now unioned into xi - ignore it in future iterations + } + } + rl = append(rl, xi) + } + return rl +} + +// union returns the union xl ∪ yl. +func (xl termlist) union(yl termlist) termlist { + return append(xl, yl...).norm() +} + +// intersect returns the intersection xl ∩ yl. +func (xl termlist) intersect(yl termlist) termlist { + if xl.isEmpty() || yl.isEmpty() { + return nil + } + + // Quadratic algorithm, but good enough for now. + // TODO(gri) fix asymptotic performance + var rl termlist + for _, x := range xl { + for _, y := range yl { + if r := x.intersect(y); r != nil { + rl = append(rl, r) + } + } + } + return rl.norm() +} + +// equal reports whether xl and yl represent the same type set. +func (xl termlist) equal(yl termlist) bool { + // TODO(gri) this should be more efficient + return xl.subsetOf(yl) && yl.subsetOf(xl) +} + +// includes reports whether t ∈ xl. +func (xl termlist) includes(t types.Type) bool { + for _, x := range xl { + if x.includes(t) { + return true + } + } + return false +} + +// supersetOf reports whether y ⊆ xl. +func (xl termlist) supersetOf(y *term) bool { + for _, x := range xl { + if y.subsetOf(x) { + return true + } + } + return false +} + +// subsetOf reports whether xl ⊆ yl. +func (xl termlist) subsetOf(yl termlist) bool { + if yl.isEmpty() { + return xl.isEmpty() + } + + // each term x of xl must be a subset of yl + for _, x := range xl { + if !yl.supersetOf(x) { + return false // x is not a subset yl + } + } + return true +} diff --git a/cluster-autoscaler/vendor/golang.org/x/tools/internal/typeparams/typeparams_go117.go b/cluster-autoscaler/vendor/golang.org/x/tools/internal/typeparams/typeparams_go117.go new file mode 100644 index 000000000000..b4788978ff4b --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/tools/internal/typeparams/typeparams_go117.go @@ -0,0 +1,197 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.18 +// +build !go1.18 + +package typeparams + +import ( + "go/ast" + "go/token" + "go/types" +) + +func unsupported() { + panic("type parameters are unsupported at this go version") +} + +// IndexListExpr is a placeholder type, as type parameters are not supported at +// this Go version. Its methods panic on use. +type IndexListExpr struct { + ast.Expr + X ast.Expr // expression + Lbrack token.Pos // position of "[" + Indices []ast.Expr // index expressions + Rbrack token.Pos // position of "]" +} + +// ForTypeSpec returns an empty field list, as type parameters on not supported +// at this Go version. +func ForTypeSpec(*ast.TypeSpec) *ast.FieldList { + return nil +} + +// ForFuncType returns an empty field list, as type parameters are not +// supported at this Go version. +func ForFuncType(*ast.FuncType) *ast.FieldList { + return nil +} + +// TypeParam is a placeholder type, as type parameters are not supported at +// this Go version. Its methods panic on use. +type TypeParam struct{ types.Type } + +func (*TypeParam) Index() int { unsupported(); return 0 } +func (*TypeParam) Constraint() types.Type { unsupported(); return nil } +func (*TypeParam) Obj() *types.TypeName { unsupported(); return nil } + +// TypeParamList is a placeholder for an empty type parameter list. +type TypeParamList struct{} + +func (*TypeParamList) Len() int { return 0 } +func (*TypeParamList) At(int) *TypeParam { unsupported(); return nil } + +// TypeList is a placeholder for an empty type list. +type TypeList struct{} + +func (*TypeList) Len() int { return 0 } +func (*TypeList) At(int) types.Type { unsupported(); return nil } + +// NewTypeParam is unsupported at this Go version, and panics. +func NewTypeParam(name *types.TypeName, constraint types.Type) *TypeParam { + unsupported() + return nil +} + +// SetTypeParamConstraint is unsupported at this Go version, and panics. +func SetTypeParamConstraint(tparam *TypeParam, constraint types.Type) { + unsupported() +} + +// NewSignatureType calls types.NewSignature, panicking if recvTypeParams or +// typeParams is non-empty. +func NewSignatureType(recv *types.Var, recvTypeParams, typeParams []*TypeParam, params, results *types.Tuple, variadic bool) *types.Signature { + if len(recvTypeParams) != 0 || len(typeParams) != 0 { + panic("signatures cannot have type parameters at this Go version") + } + return types.NewSignature(recv, params, results, variadic) +} + +// ForSignature returns an empty slice. +func ForSignature(*types.Signature) *TypeParamList { + return nil +} + +// RecvTypeParams returns a nil slice. +func RecvTypeParams(sig *types.Signature) *TypeParamList { + return nil +} + +// IsComparable returns false, as no interfaces are type-restricted at this Go +// version. +func IsComparable(*types.Interface) bool { + return false +} + +// IsMethodSet returns true, as no interfaces are type-restricted at this Go +// version. +func IsMethodSet(*types.Interface) bool { + return true +} + +// IsImplicit returns false, as no interfaces are implicit at this Go version. +func IsImplicit(*types.Interface) bool { + return false +} + +// MarkImplicit does nothing, because this Go version does not have implicit +// interfaces. +func MarkImplicit(*types.Interface) {} + +// ForNamed returns an empty type parameter list, as type parameters are not +// supported at this Go version. +func ForNamed(*types.Named) *TypeParamList { + return nil +} + +// SetForNamed panics if tparams is non-empty. +func SetForNamed(_ *types.Named, tparams []*TypeParam) { + if len(tparams) > 0 { + unsupported() + } +} + +// NamedTypeArgs returns nil. +func NamedTypeArgs(*types.Named) *TypeList { + return nil +} + +// NamedTypeOrigin is the identity method at this Go version. +func NamedTypeOrigin(named *types.Named) types.Type { + return named +} + +// Term holds information about a structural type restriction. +type Term struct { + tilde bool + typ types.Type +} + +func (m *Term) Tilde() bool { return m.tilde } +func (m *Term) Type() types.Type { return m.typ } +func (m *Term) String() string { + pre := "" + if m.tilde { + pre = "~" + } + return pre + m.typ.String() +} + +// NewTerm is unsupported at this Go version, and panics. +func NewTerm(tilde bool, typ types.Type) *Term { + return &Term{tilde, typ} +} + +// Union is a placeholder type, as type parameters are not supported at this Go +// version. Its methods panic on use. +type Union struct{ types.Type } + +func (*Union) Len() int { return 0 } +func (*Union) Term(i int) *Term { unsupported(); return nil } + +// NewUnion is unsupported at this Go version, and panics. +func NewUnion(terms []*Term) *Union { + unsupported() + return nil +} + +// InitInstanceInfo is a noop at this Go version. +func InitInstanceInfo(*types.Info) {} + +// Instance is a placeholder type, as type parameters are not supported at this +// Go version. +type Instance struct { + TypeArgs *TypeList + Type types.Type +} + +// GetInstances returns a nil map, as type parameters are not supported at this +// Go version. +func GetInstances(info *types.Info) map[*ast.Ident]Instance { return nil } + +// Context is a placeholder type, as type parameters are not supported at +// this Go version. +type Context struct{} + +// NewContext returns a placeholder Context instance. +func NewContext() *Context { + return &Context{} +} + +// Instantiate is unsupported on this Go version, and panics. +func Instantiate(ctxt *Context, typ types.Type, targs []types.Type, validate bool) (types.Type, error) { + unsupported() + return nil, nil +} diff --git a/cluster-autoscaler/vendor/golang.org/x/tools/internal/typeparams/typeparams_go118.go b/cluster-autoscaler/vendor/golang.org/x/tools/internal/typeparams/typeparams_go118.go new file mode 100644 index 000000000000..114a36b866bc --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/tools/internal/typeparams/typeparams_go118.go @@ -0,0 +1,151 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.18 +// +build go1.18 + +package typeparams + +import ( + "go/ast" + "go/types" +) + +// IndexListExpr is an alias for ast.IndexListExpr. +type IndexListExpr = ast.IndexListExpr + +// ForTypeSpec returns n.TypeParams. +func ForTypeSpec(n *ast.TypeSpec) *ast.FieldList { + if n == nil { + return nil + } + return n.TypeParams +} + +// ForFuncType returns n.TypeParams. +func ForFuncType(n *ast.FuncType) *ast.FieldList { + if n == nil { + return nil + } + return n.TypeParams +} + +// TypeParam is an alias for types.TypeParam +type TypeParam = types.TypeParam + +// TypeParamList is an alias for types.TypeParamList +type TypeParamList = types.TypeParamList + +// TypeList is an alias for types.TypeList +type TypeList = types.TypeList + +// NewTypeParam calls types.NewTypeParam. +func NewTypeParam(name *types.TypeName, constraint types.Type) *TypeParam { + return types.NewTypeParam(name, constraint) +} + +// SetTypeParamConstraint calls tparam.SetConstraint(constraint). +func SetTypeParamConstraint(tparam *TypeParam, constraint types.Type) { + tparam.SetConstraint(constraint) +} + +// NewSignatureType calls types.NewSignatureType. +func NewSignatureType(recv *types.Var, recvTypeParams, typeParams []*TypeParam, params, results *types.Tuple, variadic bool) *types.Signature { + return types.NewSignatureType(recv, recvTypeParams, typeParams, params, results, variadic) +} + +// ForSignature returns sig.TypeParams() +func ForSignature(sig *types.Signature) *TypeParamList { + return sig.TypeParams() +} + +// RecvTypeParams returns sig.RecvTypeParams(). +func RecvTypeParams(sig *types.Signature) *TypeParamList { + return sig.RecvTypeParams() +} + +// IsComparable calls iface.IsComparable(). +func IsComparable(iface *types.Interface) bool { + return iface.IsComparable() +} + +// IsMethodSet calls iface.IsMethodSet(). +func IsMethodSet(iface *types.Interface) bool { + return iface.IsMethodSet() +} + +// IsImplicit calls iface.IsImplicit(). +func IsImplicit(iface *types.Interface) bool { + return iface.IsImplicit() +} + +// MarkImplicit calls iface.MarkImplicit(). +func MarkImplicit(iface *types.Interface) { + iface.MarkImplicit() +} + +// ForNamed extracts the (possibly empty) type parameter object list from +// named. +func ForNamed(named *types.Named) *TypeParamList { + return named.TypeParams() +} + +// SetForNamed sets the type params tparams on n. Each tparam must be of +// dynamic type *types.TypeParam. +func SetForNamed(n *types.Named, tparams []*TypeParam) { + n.SetTypeParams(tparams) +} + +// NamedTypeArgs returns named.TypeArgs(). +func NamedTypeArgs(named *types.Named) *TypeList { + return named.TypeArgs() +} + +// NamedTypeOrigin returns named.Orig(). +func NamedTypeOrigin(named *types.Named) types.Type { + return named.Origin() +} + +// Term is an alias for types.Term. +type Term = types.Term + +// NewTerm calls types.NewTerm. +func NewTerm(tilde bool, typ types.Type) *Term { + return types.NewTerm(tilde, typ) +} + +// Union is an alias for types.Union +type Union = types.Union + +// NewUnion calls types.NewUnion. +func NewUnion(terms []*Term) *Union { + return types.NewUnion(terms) +} + +// InitInstanceInfo initializes info to record information about type and +// function instances. +func InitInstanceInfo(info *types.Info) { + info.Instances = make(map[*ast.Ident]types.Instance) +} + +// Instance is an alias for types.Instance. +type Instance = types.Instance + +// GetInstances returns info.Instances. +func GetInstances(info *types.Info) map[*ast.Ident]Instance { + return info.Instances +} + +// Context is an alias for types.Context. +type Context = types.Context + +// NewContext calls types.NewContext. +func NewContext() *Context { + return types.NewContext() +} + +// Instantiate calls types.Instantiate. +func Instantiate(ctxt *Context, typ types.Type, targs []types.Type, validate bool) (types.Type, error) { + return types.Instantiate(ctxt, typ, targs, validate) +} diff --git a/cluster-autoscaler/vendor/golang.org/x/tools/internal/typeparams/typeterm.go b/cluster-autoscaler/vendor/golang.org/x/tools/internal/typeparams/typeterm.go new file mode 100644 index 000000000000..7ddee28d9875 --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/tools/internal/typeparams/typeterm.go @@ -0,0 +1,170 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by copytermlist.go DO NOT EDIT. + +package typeparams + +import "go/types" + +// A term describes elementary type sets: +// +// ∅: (*term)(nil) == ∅ // set of no types (empty set) +// 𝓤: &term{} == 𝓤 // set of all types (𝓤niverse) +// T: &term{false, T} == {T} // set of type T +// ~t: &term{true, t} == {t' | under(t') == t} // set of types with underlying type t +// +type term struct { + tilde bool // valid if typ != nil + typ types.Type +} + +func (x *term) String() string { + switch { + case x == nil: + return "∅" + case x.typ == nil: + return "𝓤" + case x.tilde: + return "~" + x.typ.String() + default: + return x.typ.String() + } +} + +// equal reports whether x and y represent the same type set. +func (x *term) equal(y *term) bool { + // easy cases + switch { + case x == nil || y == nil: + return x == y + case x.typ == nil || y.typ == nil: + return x.typ == y.typ + } + // ∅ ⊂ x, y ⊂ 𝓤 + + return x.tilde == y.tilde && types.Identical(x.typ, y.typ) +} + +// union returns the union x ∪ y: zero, one, or two non-nil terms. +func (x *term) union(y *term) (_, _ *term) { + // easy cases + switch { + case x == nil && y == nil: + return nil, nil // ∅ ∪ ∅ == ∅ + case x == nil: + return y, nil // ∅ ∪ y == y + case y == nil: + return x, nil // x ∪ ∅ == x + case x.typ == nil: + return x, nil // 𝓤 ∪ y == 𝓤 + case y.typ == nil: + return y, nil // x ∪ 𝓤 == 𝓤 + } + // ∅ ⊂ x, y ⊂ 𝓤 + + if x.disjoint(y) { + return x, y // x ∪ y == (x, y) if x ∩ y == ∅ + } + // x.typ == y.typ + + // ~t ∪ ~t == ~t + // ~t ∪ T == ~t + // T ∪ ~t == ~t + // T ∪ T == T + if x.tilde || !y.tilde { + return x, nil + } + return y, nil +} + +// intersect returns the intersection x ∩ y. +func (x *term) intersect(y *term) *term { + // easy cases + switch { + case x == nil || y == nil: + return nil // ∅ ∩ y == ∅ and ∩ ∅ == ∅ + case x.typ == nil: + return y // 𝓤 ∩ y == y + case y.typ == nil: + return x // x ∩ 𝓤 == x + } + // ∅ ⊂ x, y ⊂ 𝓤 + + if x.disjoint(y) { + return nil // x ∩ y == ∅ if x ∩ y == ∅ + } + // x.typ == y.typ + + // ~t ∩ ~t == ~t + // ~t ∩ T == T + // T ∩ ~t == T + // T ∩ T == T + if !x.tilde || y.tilde { + return x + } + return y +} + +// includes reports whether t ∈ x. +func (x *term) includes(t types.Type) bool { + // easy cases + switch { + case x == nil: + return false // t ∈ ∅ == false + case x.typ == nil: + return true // t ∈ 𝓤 == true + } + // ∅ ⊂ x ⊂ 𝓤 + + u := t + if x.tilde { + u = under(u) + } + return types.Identical(x.typ, u) +} + +// subsetOf reports whether x ⊆ y. +func (x *term) subsetOf(y *term) bool { + // easy cases + switch { + case x == nil: + return true // ∅ ⊆ y == true + case y == nil: + return false // x ⊆ ∅ == false since x != ∅ + case y.typ == nil: + return true // x ⊆ 𝓤 == true + case x.typ == nil: + return false // 𝓤 ⊆ y == false since y != 𝓤 + } + // ∅ ⊂ x, y ⊂ 𝓤 + + if x.disjoint(y) { + return false // x ⊆ y == false if x ∩ y == ∅ + } + // x.typ == y.typ + + // ~t ⊆ ~t == true + // ~t ⊆ T == false + // T ⊆ ~t == true + // T ⊆ T == true + return !x.tilde || y.tilde +} + +// disjoint reports whether x ∩ y == ∅. +// x.typ and y.typ must not be nil. +func (x *term) disjoint(y *term) bool { + if debug && (x.typ == nil || y.typ == nil) { + panic("invalid argument(s)") + } + ux := x.typ + if y.tilde { + ux = under(ux) + } + uy := y.typ + if x.tilde { + uy = under(uy) + } + return !types.Identical(ux, uy) +} diff --git a/cluster-autoscaler/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go b/cluster-autoscaler/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go new file mode 100644 index 000000000000..07484073a57d --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go @@ -0,0 +1,1560 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typesinternal + +//go:generate stringer -type=ErrorCode + +type ErrorCode int + +// This file defines the error codes that can be produced during type-checking. +// Collectively, these codes provide an identifier that may be used to +// implement special handling for certain types of errors. +// +// Error codes should be fine-grained enough that the exact nature of the error +// can be easily determined, but coarse enough that they are not an +// implementation detail of the type checking algorithm. As a rule-of-thumb, +// errors should be considered equivalent if there is a theoretical refactoring +// of the type checker in which they are emitted in exactly one place. For +// example, the type checker emits different error messages for "too many +// arguments" and "too few arguments", but one can imagine an alternative type +// checker where this check instead just emits a single "wrong number of +// arguments", so these errors should have the same code. +// +// Error code names should be as brief as possible while retaining accuracy and +// distinctiveness. In most cases names should start with an adjective +// describing the nature of the error (e.g. "invalid", "unused", "misplaced"), +// and end with a noun identifying the relevant language object. For example, +// "DuplicateDecl" or "InvalidSliceExpr". For brevity, naming follows the +// convention that "bad" implies a problem with syntax, and "invalid" implies a +// problem with types. + +const ( + // InvalidSyntaxTree occurs if an invalid syntax tree is provided + // to the type checker. It should never happen. + InvalidSyntaxTree ErrorCode = -1 +) + +const ( + _ ErrorCode = iota + + // Test is reserved for errors that only apply while in self-test mode. + Test + + /* package names */ + + // BlankPkgName occurs when a package name is the blank identifier "_". + // + // Per the spec: + // "The PackageName must not be the blank identifier." + BlankPkgName + + // MismatchedPkgName occurs when a file's package name doesn't match the + // package name already established by other files. + MismatchedPkgName + + // InvalidPkgUse occurs when a package identifier is used outside of a + // selector expression. + // + // Example: + // import "fmt" + // + // var _ = fmt + InvalidPkgUse + + /* imports */ + + // BadImportPath occurs when an import path is not valid. + BadImportPath + + // BrokenImport occurs when importing a package fails. + // + // Example: + // import "amissingpackage" + BrokenImport + + // ImportCRenamed occurs when the special import "C" is renamed. "C" is a + // pseudo-package, and must not be renamed. + // + // Example: + // import _ "C" + ImportCRenamed + + // UnusedImport occurs when an import is unused. + // + // Example: + // import "fmt" + // + // func main() {} + UnusedImport + + /* initialization */ + + // InvalidInitCycle occurs when an invalid cycle is detected within the + // initialization graph. + // + // Example: + // var x int = f() + // + // func f() int { return x } + InvalidInitCycle + + /* decls */ + + // DuplicateDecl occurs when an identifier is declared multiple times. + // + // Example: + // var x = 1 + // var x = 2 + DuplicateDecl + + // InvalidDeclCycle occurs when a declaration cycle is not valid. + // + // Example: + // import "unsafe" + // + // type T struct { + // a [n]int + // } + // + // var n = unsafe.Sizeof(T{}) + InvalidDeclCycle + + // InvalidTypeCycle occurs when a cycle in type definitions results in a + // type that is not well-defined. + // + // Example: + // import "unsafe" + // + // type T [unsafe.Sizeof(T{})]int + InvalidTypeCycle + + /* decls > const */ + + // InvalidConstInit occurs when a const declaration has a non-constant + // initializer. + // + // Example: + // var x int + // const _ = x + InvalidConstInit + + // InvalidConstVal occurs when a const value cannot be converted to its + // target type. + // + // TODO(findleyr): this error code and example are not very clear. Consider + // removing it. + // + // Example: + // const _ = 1 << "hello" + InvalidConstVal + + // InvalidConstType occurs when the underlying type in a const declaration + // is not a valid constant type. + // + // Example: + // const c *int = 4 + InvalidConstType + + /* decls > var (+ other variable assignment codes) */ + + // UntypedNilUse occurs when the predeclared (untyped) value nil is used to + // initialize a variable declared without an explicit type. + // + // Example: + // var x = nil + UntypedNilUse + + // WrongAssignCount occurs when the number of values on the right-hand side + // of an assignment or or initialization expression does not match the number + // of variables on the left-hand side. + // + // Example: + // var x = 1, 2 + WrongAssignCount + + // UnassignableOperand occurs when the left-hand side of an assignment is + // not assignable. + // + // Example: + // func f() { + // const c = 1 + // c = 2 + // } + UnassignableOperand + + // NoNewVar occurs when a short variable declaration (':=') does not declare + // new variables. + // + // Example: + // func f() { + // x := 1 + // x := 2 + // } + NoNewVar + + // MultiValAssignOp occurs when an assignment operation (+=, *=, etc) does + // not have single-valued left-hand or right-hand side. + // + // Per the spec: + // "In assignment operations, both the left- and right-hand expression lists + // must contain exactly one single-valued expression" + // + // Example: + // func f() int { + // x, y := 1, 2 + // x, y += 1 + // return x + y + // } + MultiValAssignOp + + // InvalidIfaceAssign occurs when a value of type T is used as an + // interface, but T does not implement a method of the expected interface. + // + // Example: + // type I interface { + // f() + // } + // + // type T int + // + // var x I = T(1) + InvalidIfaceAssign + + // InvalidChanAssign occurs when a chan assignment is invalid. + // + // Per the spec, a value x is assignable to a channel type T if: + // "x is a bidirectional channel value, T is a channel type, x's type V and + // T have identical element types, and at least one of V or T is not a + // defined type." + // + // Example: + // type T1 chan int + // type T2 chan int + // + // var x T1 + // // Invalid assignment because both types are named + // var _ T2 = x + InvalidChanAssign + + // IncompatibleAssign occurs when the type of the right-hand side expression + // in an assignment cannot be assigned to the type of the variable being + // assigned. + // + // Example: + // var x []int + // var _ int = x + IncompatibleAssign + + // UnaddressableFieldAssign occurs when trying to assign to a struct field + // in a map value. + // + // Example: + // func f() { + // m := make(map[string]struct{i int}) + // m["foo"].i = 42 + // } + UnaddressableFieldAssign + + /* decls > type (+ other type expression codes) */ + + // NotAType occurs when the identifier used as the underlying type in a type + // declaration or the right-hand side of a type alias does not denote a type. + // + // Example: + // var S = 2 + // + // type T S + NotAType + + // InvalidArrayLen occurs when an array length is not a constant value. + // + // Example: + // var n = 3 + // var _ = [n]int{} + InvalidArrayLen + + // BlankIfaceMethod occurs when a method name is '_'. + // + // Per the spec: + // "The name of each explicitly specified method must be unique and not + // blank." + // + // Example: + // type T interface { + // _(int) + // } + BlankIfaceMethod + + // IncomparableMapKey occurs when a map key type does not support the == and + // != operators. + // + // Per the spec: + // "The comparison operators == and != must be fully defined for operands of + // the key type; thus the key type must not be a function, map, or slice." + // + // Example: + // var x map[T]int + // + // type T []int + IncomparableMapKey + + // InvalidIfaceEmbed occurs when a non-interface type is embedded in an + // interface. + // + // Example: + // type T struct {} + // + // func (T) m() + // + // type I interface { + // T + // } + InvalidIfaceEmbed + + // InvalidPtrEmbed occurs when an embedded field is of the pointer form *T, + // and T itself is itself a pointer, an unsafe.Pointer, or an interface. + // + // Per the spec: + // "An embedded field must be specified as a type name T or as a pointer to + // a non-interface type name *T, and T itself may not be a pointer type." + // + // Example: + // type T *int + // + // type S struct { + // *T + // } + InvalidPtrEmbed + + /* decls > func and method */ + + // BadRecv occurs when a method declaration does not have exactly one + // receiver parameter. + // + // Example: + // func () _() {} + BadRecv + + // InvalidRecv occurs when a receiver type expression is not of the form T + // or *T, or T is a pointer type. + // + // Example: + // type T struct {} + // + // func (**T) m() {} + InvalidRecv + + // DuplicateFieldAndMethod occurs when an identifier appears as both a field + // and method name. + // + // Example: + // type T struct { + // m int + // } + // + // func (T) m() {} + DuplicateFieldAndMethod + + // DuplicateMethod occurs when two methods on the same receiver type have + // the same name. + // + // Example: + // type T struct {} + // func (T) m() {} + // func (T) m(i int) int { return i } + DuplicateMethod + + /* decls > special */ + + // InvalidBlank occurs when a blank identifier is used as a value or type. + // + // Per the spec: + // "The blank identifier may appear as an operand only on the left-hand side + // of an assignment." + // + // Example: + // var x = _ + InvalidBlank + + // InvalidIota occurs when the predeclared identifier iota is used outside + // of a constant declaration. + // + // Example: + // var x = iota + InvalidIota + + // MissingInitBody occurs when an init function is missing its body. + // + // Example: + // func init() + MissingInitBody + + // InvalidInitSig occurs when an init function declares parameters or + // results. + // + // Example: + // func init() int { return 1 } + InvalidInitSig + + // InvalidInitDecl occurs when init is declared as anything other than a + // function. + // + // Example: + // var init = 1 + InvalidInitDecl + + // InvalidMainDecl occurs when main is declared as anything other than a + // function, in a main package. + InvalidMainDecl + + /* exprs */ + + // TooManyValues occurs when a function returns too many values for the + // expression context in which it is used. + // + // Example: + // func ReturnTwo() (int, int) { + // return 1, 2 + // } + // + // var x = ReturnTwo() + TooManyValues + + // NotAnExpr occurs when a type expression is used where a value expression + // is expected. + // + // Example: + // type T struct {} + // + // func f() { + // T + // } + NotAnExpr + + /* exprs > const */ + + // TruncatedFloat occurs when a float constant is truncated to an integer + // value. + // + // Example: + // var _ int = 98.6 + TruncatedFloat + + // NumericOverflow occurs when a numeric constant overflows its target type. + // + // Example: + // var x int8 = 1000 + NumericOverflow + + /* exprs > operation */ + + // UndefinedOp occurs when an operator is not defined for the type(s) used + // in an operation. + // + // Example: + // var c = "a" - "b" + UndefinedOp + + // MismatchedTypes occurs when operand types are incompatible in a binary + // operation. + // + // Example: + // var a = "hello" + // var b = 1 + // var c = a - b + MismatchedTypes + + // DivByZero occurs when a division operation is provable at compile + // time to be a division by zero. + // + // Example: + // const divisor = 0 + // var x int = 1/divisor + DivByZero + + // NonNumericIncDec occurs when an increment or decrement operator is + // applied to a non-numeric value. + // + // Example: + // func f() { + // var c = "c" + // c++ + // } + NonNumericIncDec + + /* exprs > ptr */ + + // UnaddressableOperand occurs when the & operator is applied to an + // unaddressable expression. + // + // Example: + // var x = &1 + UnaddressableOperand + + // InvalidIndirection occurs when a non-pointer value is indirected via the + // '*' operator. + // + // Example: + // var x int + // var y = *x + InvalidIndirection + + /* exprs > [] */ + + // NonIndexableOperand occurs when an index operation is applied to a value + // that cannot be indexed. + // + // Example: + // var x = 1 + // var y = x[1] + NonIndexableOperand + + // InvalidIndex occurs when an index argument is not of integer type, + // negative, or out-of-bounds. + // + // Example: + // var s = [...]int{1,2,3} + // var x = s[5] + // + // Example: + // var s = []int{1,2,3} + // var _ = s[-1] + // + // Example: + // var s = []int{1,2,3} + // var i string + // var _ = s[i] + InvalidIndex + + // SwappedSliceIndices occurs when constant indices in a slice expression + // are decreasing in value. + // + // Example: + // var _ = []int{1,2,3}[2:1] + SwappedSliceIndices + + /* operators > slice */ + + // NonSliceableOperand occurs when a slice operation is applied to a value + // whose type is not sliceable, or is unaddressable. + // + // Example: + // var x = [...]int{1, 2, 3}[:1] + // + // Example: + // var x = 1 + // var y = 1[:1] + NonSliceableOperand + + // InvalidSliceExpr occurs when a three-index slice expression (a[x:y:z]) is + // applied to a string. + // + // Example: + // var s = "hello" + // var x = s[1:2:3] + InvalidSliceExpr + + /* exprs > shift */ + + // InvalidShiftCount occurs when the right-hand side of a shift operation is + // either non-integer, negative, or too large. + // + // Example: + // var ( + // x string + // y int = 1 << x + // ) + InvalidShiftCount + + // InvalidShiftOperand occurs when the shifted operand is not an integer. + // + // Example: + // var s = "hello" + // var x = s << 2 + InvalidShiftOperand + + /* exprs > chan */ + + // InvalidReceive occurs when there is a channel receive from a value that + // is either not a channel, or is a send-only channel. + // + // Example: + // func f() { + // var x = 1 + // <-x + // } + InvalidReceive + + // InvalidSend occurs when there is a channel send to a value that is not a + // channel, or is a receive-only channel. + // + // Example: + // func f() { + // var x = 1 + // x <- "hello!" + // } + InvalidSend + + /* exprs > literal */ + + // DuplicateLitKey occurs when an index is duplicated in a slice, array, or + // map literal. + // + // Example: + // var _ = []int{0:1, 0:2} + // + // Example: + // var _ = map[string]int{"a": 1, "a": 2} + DuplicateLitKey + + // MissingLitKey occurs when a map literal is missing a key expression. + // + // Example: + // var _ = map[string]int{1} + MissingLitKey + + // InvalidLitIndex occurs when the key in a key-value element of a slice or + // array literal is not an integer constant. + // + // Example: + // var i = 0 + // var x = []string{i: "world"} + InvalidLitIndex + + // OversizeArrayLit occurs when an array literal exceeds its length. + // + // Example: + // var _ = [2]int{1,2,3} + OversizeArrayLit + + // MixedStructLit occurs when a struct literal contains a mix of positional + // and named elements. + // + // Example: + // var _ = struct{i, j int}{i: 1, 2} + MixedStructLit + + // InvalidStructLit occurs when a positional struct literal has an incorrect + // number of values. + // + // Example: + // var _ = struct{i, j int}{1,2,3} + InvalidStructLit + + // MissingLitField occurs when a struct literal refers to a field that does + // not exist on the struct type. + // + // Example: + // var _ = struct{i int}{j: 2} + MissingLitField + + // DuplicateLitField occurs when a struct literal contains duplicated + // fields. + // + // Example: + // var _ = struct{i int}{i: 1, i: 2} + DuplicateLitField + + // UnexportedLitField occurs when a positional struct literal implicitly + // assigns an unexported field of an imported type. + UnexportedLitField + + // InvalidLitField occurs when a field name is not a valid identifier. + // + // Example: + // var _ = struct{i int}{1: 1} + InvalidLitField + + // UntypedLit occurs when a composite literal omits a required type + // identifier. + // + // Example: + // type outer struct{ + // inner struct { i int } + // } + // + // var _ = outer{inner: {1}} + UntypedLit + + // InvalidLit occurs when a composite literal expression does not match its + // type. + // + // Example: + // type P *struct{ + // x int + // } + // var _ = P {} + InvalidLit + + /* exprs > selector */ + + // AmbiguousSelector occurs when a selector is ambiguous. + // + // Example: + // type E1 struct { i int } + // type E2 struct { i int } + // type T struct { E1; E2 } + // + // var x T + // var _ = x.i + AmbiguousSelector + + // UndeclaredImportedName occurs when a package-qualified identifier is + // undeclared by the imported package. + // + // Example: + // import "go/types" + // + // var _ = types.NotAnActualIdentifier + UndeclaredImportedName + + // UnexportedName occurs when a selector refers to an unexported identifier + // of an imported package. + // + // Example: + // import "reflect" + // + // type _ reflect.flag + UnexportedName + + // UndeclaredName occurs when an identifier is not declared in the current + // scope. + // + // Example: + // var x T + UndeclaredName + + // MissingFieldOrMethod occurs when a selector references a field or method + // that does not exist. + // + // Example: + // type T struct {} + // + // var x = T{}.f + MissingFieldOrMethod + + /* exprs > ... */ + + // BadDotDotDotSyntax occurs when a "..." occurs in a context where it is + // not valid. + // + // Example: + // var _ = map[int][...]int{0: {}} + BadDotDotDotSyntax + + // NonVariadicDotDotDot occurs when a "..." is used on the final argument to + // a non-variadic function. + // + // Example: + // func printArgs(s []string) { + // for _, a := range s { + // println(a) + // } + // } + // + // func f() { + // s := []string{"a", "b", "c"} + // printArgs(s...) + // } + NonVariadicDotDotDot + + // MisplacedDotDotDot occurs when a "..." is used somewhere other than the + // final argument to a function call. + // + // Example: + // func printArgs(args ...int) { + // for _, a := range args { + // println(a) + // } + // } + // + // func f() { + // a := []int{1,2,3} + // printArgs(0, a...) + // } + MisplacedDotDotDot + + // InvalidDotDotDotOperand occurs when a "..." operator is applied to a + // single-valued operand. + // + // Example: + // func printArgs(args ...int) { + // for _, a := range args { + // println(a) + // } + // } + // + // func f() { + // a := 1 + // printArgs(a...) + // } + // + // Example: + // func args() (int, int) { + // return 1, 2 + // } + // + // func printArgs(args ...int) { + // for _, a := range args { + // println(a) + // } + // } + // + // func g() { + // printArgs(args()...) + // } + InvalidDotDotDotOperand + + // InvalidDotDotDot occurs when a "..." is used in a non-variadic built-in + // function. + // + // Example: + // var s = []int{1, 2, 3} + // var l = len(s...) + InvalidDotDotDot + + /* exprs > built-in */ + + // UncalledBuiltin occurs when a built-in function is used as a + // function-valued expression, instead of being called. + // + // Per the spec: + // "The built-in functions do not have standard Go types, so they can only + // appear in call expressions; they cannot be used as function values." + // + // Example: + // var _ = copy + UncalledBuiltin + + // InvalidAppend occurs when append is called with a first argument that is + // not a slice. + // + // Example: + // var _ = append(1, 2) + InvalidAppend + + // InvalidCap occurs when an argument to the cap built-in function is not of + // supported type. + // + // See https://golang.org/ref/spec#Lengthand_capacity for information on + // which underlying types are supported as arguments to cap and len. + // + // Example: + // var s = 2 + // var x = cap(s) + InvalidCap + + // InvalidClose occurs when close(...) is called with an argument that is + // not of channel type, or that is a receive-only channel. + // + // Example: + // func f() { + // var x int + // close(x) + // } + InvalidClose + + // InvalidCopy occurs when the arguments are not of slice type or do not + // have compatible type. + // + // See https://golang.org/ref/spec#Appendingand_copying_slices for more + // information on the type requirements for the copy built-in. + // + // Example: + // func f() { + // var x []int + // y := []int64{1,2,3} + // copy(x, y) + // } + InvalidCopy + + // InvalidComplex occurs when the complex built-in function is called with + // arguments with incompatible types. + // + // Example: + // var _ = complex(float32(1), float64(2)) + InvalidComplex + + // InvalidDelete occurs when the delete built-in function is called with a + // first argument that is not a map. + // + // Example: + // func f() { + // m := "hello" + // delete(m, "e") + // } + InvalidDelete + + // InvalidImag occurs when the imag built-in function is called with an + // argument that does not have complex type. + // + // Example: + // var _ = imag(int(1)) + InvalidImag + + // InvalidLen occurs when an argument to the len built-in function is not of + // supported type. + // + // See https://golang.org/ref/spec#Lengthand_capacity for information on + // which underlying types are supported as arguments to cap and len. + // + // Example: + // var s = 2 + // var x = len(s) + InvalidLen + + // SwappedMakeArgs occurs when make is called with three arguments, and its + // length argument is larger than its capacity argument. + // + // Example: + // var x = make([]int, 3, 2) + SwappedMakeArgs + + // InvalidMake occurs when make is called with an unsupported type argument. + // + // See https://golang.org/ref/spec#Makingslices_maps_and_channels for + // information on the types that may be created using make. + // + // Example: + // var x = make(int) + InvalidMake + + // InvalidReal occurs when the real built-in function is called with an + // argument that does not have complex type. + // + // Example: + // var _ = real(int(1)) + InvalidReal + + /* exprs > assertion */ + + // InvalidAssert occurs when a type assertion is applied to a + // value that is not of interface type. + // + // Example: + // var x = 1 + // var _ = x.(float64) + InvalidAssert + + // ImpossibleAssert occurs for a type assertion x.(T) when the value x of + // interface cannot have dynamic type T, due to a missing or mismatching + // method on T. + // + // Example: + // type T int + // + // func (t *T) m() int { return int(*t) } + // + // type I interface { m() int } + // + // var x I + // var _ = x.(T) + ImpossibleAssert + + /* exprs > conversion */ + + // InvalidConversion occurs when the argument type cannot be converted to the + // target. + // + // See https://golang.org/ref/spec#Conversions for the rules of + // convertibility. + // + // Example: + // var x float64 + // var _ = string(x) + InvalidConversion + + // InvalidUntypedConversion occurs when an there is no valid implicit + // conversion from an untyped value satisfying the type constraints of the + // context in which it is used. + // + // Example: + // var _ = 1 + "" + InvalidUntypedConversion + + /* offsetof */ + + // BadOffsetofSyntax occurs when unsafe.Offsetof is called with an argument + // that is not a selector expression. + // + // Example: + // import "unsafe" + // + // var x int + // var _ = unsafe.Offsetof(x) + BadOffsetofSyntax + + // InvalidOffsetof occurs when unsafe.Offsetof is called with a method + // selector, rather than a field selector, or when the field is embedded via + // a pointer. + // + // Per the spec: + // + // "If f is an embedded field, it must be reachable without pointer + // indirections through fields of the struct. " + // + // Example: + // import "unsafe" + // + // type T struct { f int } + // type S struct { *T } + // var s S + // var _ = unsafe.Offsetof(s.f) + // + // Example: + // import "unsafe" + // + // type S struct{} + // + // func (S) m() {} + // + // var s S + // var _ = unsafe.Offsetof(s.m) + InvalidOffsetof + + /* control flow > scope */ + + // UnusedExpr occurs when a side-effect free expression is used as a + // statement. Such a statement has no effect. + // + // Example: + // func f(i int) { + // i*i + // } + UnusedExpr + + // UnusedVar occurs when a variable is declared but unused. + // + // Example: + // func f() { + // x := 1 + // } + UnusedVar + + // MissingReturn occurs when a function with results is missing a return + // statement. + // + // Example: + // func f() int {} + MissingReturn + + // WrongResultCount occurs when a return statement returns an incorrect + // number of values. + // + // Example: + // func ReturnOne() int { + // return 1, 2 + // } + WrongResultCount + + // OutOfScopeResult occurs when the name of a value implicitly returned by + // an empty return statement is shadowed in a nested scope. + // + // Example: + // func factor(n int) (i int) { + // for i := 2; i < n; i++ { + // if n%i == 0 { + // return + // } + // } + // return 0 + // } + OutOfScopeResult + + /* control flow > if */ + + // InvalidCond occurs when an if condition is not a boolean expression. + // + // Example: + // func checkReturn(i int) { + // if i { + // panic("non-zero return") + // } + // } + InvalidCond + + /* control flow > for */ + + // InvalidPostDecl occurs when there is a declaration in a for-loop post + // statement. + // + // Example: + // func f() { + // for i := 0; i < 10; j := 0 {} + // } + InvalidPostDecl + + // InvalidChanRange occurs when a send-only channel used in a range + // expression. + // + // Example: + // func sum(c chan<- int) { + // s := 0 + // for i := range c { + // s += i + // } + // } + InvalidChanRange + + // InvalidIterVar occurs when two iteration variables are used while ranging + // over a channel. + // + // Example: + // func f(c chan int) { + // for k, v := range c { + // println(k, v) + // } + // } + InvalidIterVar + + // InvalidRangeExpr occurs when the type of a range expression is not array, + // slice, string, map, or channel. + // + // Example: + // func f(i int) { + // for j := range i { + // println(j) + // } + // } + InvalidRangeExpr + + /* control flow > switch */ + + // MisplacedBreak occurs when a break statement is not within a for, switch, + // or select statement of the innermost function definition. + // + // Example: + // func f() { + // break + // } + MisplacedBreak + + // MisplacedContinue occurs when a continue statement is not within a for + // loop of the innermost function definition. + // + // Example: + // func sumeven(n int) int { + // proceed := func() { + // continue + // } + // sum := 0 + // for i := 1; i <= n; i++ { + // if i % 2 != 0 { + // proceed() + // } + // sum += i + // } + // return sum + // } + MisplacedContinue + + // MisplacedFallthrough occurs when a fallthrough statement is not within an + // expression switch. + // + // Example: + // func typename(i interface{}) string { + // switch i.(type) { + // case int64: + // fallthrough + // case int: + // return "int" + // } + // return "unsupported" + // } + MisplacedFallthrough + + // DuplicateCase occurs when a type or expression switch has duplicate + // cases. + // + // Example: + // func printInt(i int) { + // switch i { + // case 1: + // println("one") + // case 1: + // println("One") + // } + // } + DuplicateCase + + // DuplicateDefault occurs when a type or expression switch has multiple + // default clauses. + // + // Example: + // func printInt(i int) { + // switch i { + // case 1: + // println("one") + // default: + // println("One") + // default: + // println("1") + // } + // } + DuplicateDefault + + // BadTypeKeyword occurs when a .(type) expression is used anywhere other + // than a type switch. + // + // Example: + // type I interface { + // m() + // } + // var t I + // var _ = t.(type) + BadTypeKeyword + + // InvalidTypeSwitch occurs when .(type) is used on an expression that is + // not of interface type. + // + // Example: + // func f(i int) { + // switch x := i.(type) {} + // } + InvalidTypeSwitch + + // InvalidExprSwitch occurs when a switch expression is not comparable. + // + // Example: + // func _() { + // var a struct{ _ func() } + // switch a /* ERROR cannot switch on a */ { + // } + // } + InvalidExprSwitch + + /* control flow > select */ + + // InvalidSelectCase occurs when a select case is not a channel send or + // receive. + // + // Example: + // func checkChan(c <-chan int) bool { + // select { + // case c: + // return true + // default: + // return false + // } + // } + InvalidSelectCase + + /* control flow > labels and jumps */ + + // UndeclaredLabel occurs when an undeclared label is jumped to. + // + // Example: + // func f() { + // goto L + // } + UndeclaredLabel + + // DuplicateLabel occurs when a label is declared more than once. + // + // Example: + // func f() int { + // L: + // L: + // return 1 + // } + DuplicateLabel + + // MisplacedLabel occurs when a break or continue label is not on a for, + // switch, or select statement. + // + // Example: + // func f() { + // L: + // a := []int{1,2,3} + // for _, e := range a { + // if e > 10 { + // break L + // } + // println(a) + // } + // } + MisplacedLabel + + // UnusedLabel occurs when a label is declared but not used. + // + // Example: + // func f() { + // L: + // } + UnusedLabel + + // JumpOverDecl occurs when a label jumps over a variable declaration. + // + // Example: + // func f() int { + // goto L + // x := 2 + // L: + // x++ + // return x + // } + JumpOverDecl + + // JumpIntoBlock occurs when a forward jump goes to a label inside a nested + // block. + // + // Example: + // func f(x int) { + // goto L + // if x > 0 { + // L: + // print("inside block") + // } + // } + JumpIntoBlock + + /* control flow > calls */ + + // InvalidMethodExpr occurs when a pointer method is called but the argument + // is not addressable. + // + // Example: + // type T struct {} + // + // func (*T) m() int { return 1 } + // + // var _ = T.m(T{}) + InvalidMethodExpr + + // WrongArgCount occurs when too few or too many arguments are passed by a + // function call. + // + // Example: + // func f(i int) {} + // var x = f() + WrongArgCount + + // InvalidCall occurs when an expression is called that is not of function + // type. + // + // Example: + // var x = "x" + // var y = x() + InvalidCall + + /* control flow > suspended */ + + // UnusedResults occurs when a restricted expression-only built-in function + // is suspended via go or defer. Such a suspension discards the results of + // these side-effect free built-in functions, and therefore is ineffectual. + // + // Example: + // func f(a []int) int { + // defer len(a) + // return i + // } + UnusedResults + + // InvalidDefer occurs when a deferred expression is not a function call, + // for example if the expression is a type conversion. + // + // Example: + // func f(i int) int { + // defer int32(i) + // return i + // } + InvalidDefer + + // InvalidGo occurs when a go expression is not a function call, for example + // if the expression is a type conversion. + // + // Example: + // func f(i int) int { + // go int32(i) + // return i + // } + InvalidGo + + // All codes below were added in Go 1.17. + + /* decl */ + + // BadDecl occurs when a declaration has invalid syntax. + BadDecl + + // RepeatedDecl occurs when an identifier occurs more than once on the left + // hand side of a short variable declaration. + // + // Example: + // func _() { + // x, y, y := 1, 2, 3 + // } + RepeatedDecl + + /* unsafe */ + + // InvalidUnsafeAdd occurs when unsafe.Add is called with a + // length argument that is not of integer type. + // + // Example: + // import "unsafe" + // + // var p unsafe.Pointer + // var _ = unsafe.Add(p, float64(1)) + InvalidUnsafeAdd + + // InvalidUnsafeSlice occurs when unsafe.Slice is called with a + // pointer argument that is not of pointer type or a length argument + // that is not of integer type, negative, or out of bounds. + // + // Example: + // import "unsafe" + // + // var x int + // var _ = unsafe.Slice(x, 1) + // + // Example: + // import "unsafe" + // + // var x int + // var _ = unsafe.Slice(&x, float64(1)) + // + // Example: + // import "unsafe" + // + // var x int + // var _ = unsafe.Slice(&x, -1) + // + // Example: + // import "unsafe" + // + // var x int + // var _ = unsafe.Slice(&x, uint64(1) << 63) + InvalidUnsafeSlice + + // All codes below were added in Go 1.18. + + /* features */ + + // UnsupportedFeature occurs when a language feature is used that is not + // supported at this Go version. + UnsupportedFeature + + /* type params */ + + // NotAGenericType occurs when a non-generic type is used where a generic + // type is expected: in type or function instantiation. + // + // Example: + // type T int + // + // var _ T[int] + NotAGenericType + + // WrongTypeArgCount occurs when a type or function is instantiated with an + // incorrent number of type arguments, including when a generic type or + // function is used without instantiation. + // + // Errors inolving failed type inference are assigned other error codes. + // + // Example: + // type T[p any] int + // + // var _ T[int, string] + // + // Example: + // func f[T any]() {} + // + // var x = f + WrongTypeArgCount + + // CannotInferTypeArgs occurs when type or function type argument inference + // fails to infer all type arguments. + // + // Example: + // func f[T any]() {} + // + // func _() { + // f() + // } + // + // Example: + // type N[P, Q any] struct{} + // + // var _ N[int] + CannotInferTypeArgs + + // InvalidTypeArg occurs when a type argument does not satisfy its + // corresponding type parameter constraints. + // + // Example: + // type T[P ~int] struct{} + // + // var _ T[string] + InvalidTypeArg // arguments? InferenceFailed + + // InvalidInstanceCycle occurs when an invalid cycle is detected + // within the instantiation graph. + // + // Example: + // func f[T any]() { f[*T]() } + InvalidInstanceCycle + + // InvalidUnion occurs when an embedded union or approximation element is + // not valid. + // + // Example: + // type _ interface { + // ~int | interface{ m() } + // } + InvalidUnion + + // MisplacedConstraintIface occurs when a constraint-type interface is used + // outside of constraint position. + // + // Example: + // type I interface { ~int } + // + // var _ I + MisplacedConstraintIface + + // InvalidMethodTypeParams occurs when methods have type parameters. + // + // It cannot be encountered with an AST parsed using go/parser. + InvalidMethodTypeParams + + // MisplacedTypeParam occurs when a type parameter is used in a place where + // it is not permitted. + // + // Example: + // type T[P any] P + // + // Example: + // type T[P any] struct{ *P } + MisplacedTypeParam + + // InvalidUnsafeSliceData occurs when unsafe.SliceData is called with + // an argument that is not of slice type. It also occurs if it is used + // in a package compiled for a language version before go1.20. + // + // Example: + // import "unsafe" + // + // var x int + // var _ = unsafe.SliceData(x) + InvalidUnsafeSliceData + + // InvalidUnsafeString occurs when unsafe.String is called with + // a length argument that is not of integer type, negative, or + // out of bounds. It also occurs if it is used in a package + // compiled for a language version before go1.20. + // + // Example: + // import "unsafe" + // + // var b [10]byte + // var _ = unsafe.String(&b[0], -1) + InvalidUnsafeString + + // InvalidUnsafeStringData occurs if it is used in a package + // compiled for a language version before go1.20. + _ // not used anymore + +) diff --git a/cluster-autoscaler/vendor/golang.org/x/tools/internal/typesinternal/errorcode_string.go b/cluster-autoscaler/vendor/golang.org/x/tools/internal/typesinternal/errorcode_string.go new file mode 100644 index 000000000000..15ecf7c5ded9 --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/tools/internal/typesinternal/errorcode_string.go @@ -0,0 +1,179 @@ +// Code generated by "stringer -type=ErrorCode"; DO NOT EDIT. + +package typesinternal + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[InvalidSyntaxTree - -1] + _ = x[Test-1] + _ = x[BlankPkgName-2] + _ = x[MismatchedPkgName-3] + _ = x[InvalidPkgUse-4] + _ = x[BadImportPath-5] + _ = x[BrokenImport-6] + _ = x[ImportCRenamed-7] + _ = x[UnusedImport-8] + _ = x[InvalidInitCycle-9] + _ = x[DuplicateDecl-10] + _ = x[InvalidDeclCycle-11] + _ = x[InvalidTypeCycle-12] + _ = x[InvalidConstInit-13] + _ = x[InvalidConstVal-14] + _ = x[InvalidConstType-15] + _ = x[UntypedNilUse-16] + _ = x[WrongAssignCount-17] + _ = x[UnassignableOperand-18] + _ = x[NoNewVar-19] + _ = x[MultiValAssignOp-20] + _ = x[InvalidIfaceAssign-21] + _ = x[InvalidChanAssign-22] + _ = x[IncompatibleAssign-23] + _ = x[UnaddressableFieldAssign-24] + _ = x[NotAType-25] + _ = x[InvalidArrayLen-26] + _ = x[BlankIfaceMethod-27] + _ = x[IncomparableMapKey-28] + _ = x[InvalidIfaceEmbed-29] + _ = x[InvalidPtrEmbed-30] + _ = x[BadRecv-31] + _ = x[InvalidRecv-32] + _ = x[DuplicateFieldAndMethod-33] + _ = x[DuplicateMethod-34] + _ = x[InvalidBlank-35] + _ = x[InvalidIota-36] + _ = x[MissingInitBody-37] + _ = x[InvalidInitSig-38] + _ = x[InvalidInitDecl-39] + _ = x[InvalidMainDecl-40] + _ = x[TooManyValues-41] + _ = x[NotAnExpr-42] + _ = x[TruncatedFloat-43] + _ = x[NumericOverflow-44] + _ = x[UndefinedOp-45] + _ = x[MismatchedTypes-46] + _ = x[DivByZero-47] + _ = x[NonNumericIncDec-48] + _ = x[UnaddressableOperand-49] + _ = x[InvalidIndirection-50] + _ = x[NonIndexableOperand-51] + _ = x[InvalidIndex-52] + _ = x[SwappedSliceIndices-53] + _ = x[NonSliceableOperand-54] + _ = x[InvalidSliceExpr-55] + _ = x[InvalidShiftCount-56] + _ = x[InvalidShiftOperand-57] + _ = x[InvalidReceive-58] + _ = x[InvalidSend-59] + _ = x[DuplicateLitKey-60] + _ = x[MissingLitKey-61] + _ = x[InvalidLitIndex-62] + _ = x[OversizeArrayLit-63] + _ = x[MixedStructLit-64] + _ = x[InvalidStructLit-65] + _ = x[MissingLitField-66] + _ = x[DuplicateLitField-67] + _ = x[UnexportedLitField-68] + _ = x[InvalidLitField-69] + _ = x[UntypedLit-70] + _ = x[InvalidLit-71] + _ = x[AmbiguousSelector-72] + _ = x[UndeclaredImportedName-73] + _ = x[UnexportedName-74] + _ = x[UndeclaredName-75] + _ = x[MissingFieldOrMethod-76] + _ = x[BadDotDotDotSyntax-77] + _ = x[NonVariadicDotDotDot-78] + _ = x[MisplacedDotDotDot-79] + _ = x[InvalidDotDotDotOperand-80] + _ = x[InvalidDotDotDot-81] + _ = x[UncalledBuiltin-82] + _ = x[InvalidAppend-83] + _ = x[InvalidCap-84] + _ = x[InvalidClose-85] + _ = x[InvalidCopy-86] + _ = x[InvalidComplex-87] + _ = x[InvalidDelete-88] + _ = x[InvalidImag-89] + _ = x[InvalidLen-90] + _ = x[SwappedMakeArgs-91] + _ = x[InvalidMake-92] + _ = x[InvalidReal-93] + _ = x[InvalidAssert-94] + _ = x[ImpossibleAssert-95] + _ = x[InvalidConversion-96] + _ = x[InvalidUntypedConversion-97] + _ = x[BadOffsetofSyntax-98] + _ = x[InvalidOffsetof-99] + _ = x[UnusedExpr-100] + _ = x[UnusedVar-101] + _ = x[MissingReturn-102] + _ = x[WrongResultCount-103] + _ = x[OutOfScopeResult-104] + _ = x[InvalidCond-105] + _ = x[InvalidPostDecl-106] + _ = x[InvalidChanRange-107] + _ = x[InvalidIterVar-108] + _ = x[InvalidRangeExpr-109] + _ = x[MisplacedBreak-110] + _ = x[MisplacedContinue-111] + _ = x[MisplacedFallthrough-112] + _ = x[DuplicateCase-113] + _ = x[DuplicateDefault-114] + _ = x[BadTypeKeyword-115] + _ = x[InvalidTypeSwitch-116] + _ = x[InvalidExprSwitch-117] + _ = x[InvalidSelectCase-118] + _ = x[UndeclaredLabel-119] + _ = x[DuplicateLabel-120] + _ = x[MisplacedLabel-121] + _ = x[UnusedLabel-122] + _ = x[JumpOverDecl-123] + _ = x[JumpIntoBlock-124] + _ = x[InvalidMethodExpr-125] + _ = x[WrongArgCount-126] + _ = x[InvalidCall-127] + _ = x[UnusedResults-128] + _ = x[InvalidDefer-129] + _ = x[InvalidGo-130] + _ = x[BadDecl-131] + _ = x[RepeatedDecl-132] + _ = x[InvalidUnsafeAdd-133] + _ = x[InvalidUnsafeSlice-134] + _ = x[UnsupportedFeature-135] + _ = x[NotAGenericType-136] + _ = x[WrongTypeArgCount-137] + _ = x[CannotInferTypeArgs-138] + _ = x[InvalidTypeArg-139] + _ = x[InvalidInstanceCycle-140] + _ = x[InvalidUnion-141] + _ = x[MisplacedConstraintIface-142] + _ = x[InvalidMethodTypeParams-143] + _ = x[MisplacedTypeParam-144] + _ = x[InvalidUnsafeSliceData-145] + _ = x[InvalidUnsafeString-146] +} + +const ( + _ErrorCode_name_0 = "InvalidSyntaxTree" + _ErrorCode_name_1 = "TestBlankPkgNameMismatchedPkgNameInvalidPkgUseBadImportPathBrokenImportImportCRenamedUnusedImportInvalidInitCycleDuplicateDeclInvalidDeclCycleInvalidTypeCycleInvalidConstInitInvalidConstValInvalidConstTypeUntypedNilUseWrongAssignCountUnassignableOperandNoNewVarMultiValAssignOpInvalidIfaceAssignInvalidChanAssignIncompatibleAssignUnaddressableFieldAssignNotATypeInvalidArrayLenBlankIfaceMethodIncomparableMapKeyInvalidIfaceEmbedInvalidPtrEmbedBadRecvInvalidRecvDuplicateFieldAndMethodDuplicateMethodInvalidBlankInvalidIotaMissingInitBodyInvalidInitSigInvalidInitDeclInvalidMainDeclTooManyValuesNotAnExprTruncatedFloatNumericOverflowUndefinedOpMismatchedTypesDivByZeroNonNumericIncDecUnaddressableOperandInvalidIndirectionNonIndexableOperandInvalidIndexSwappedSliceIndicesNonSliceableOperandInvalidSliceExprInvalidShiftCountInvalidShiftOperandInvalidReceiveInvalidSendDuplicateLitKeyMissingLitKeyInvalidLitIndexOversizeArrayLitMixedStructLitInvalidStructLitMissingLitFieldDuplicateLitFieldUnexportedLitFieldInvalidLitFieldUntypedLitInvalidLitAmbiguousSelectorUndeclaredImportedNameUnexportedNameUndeclaredNameMissingFieldOrMethodBadDotDotDotSyntaxNonVariadicDotDotDotMisplacedDotDotDotInvalidDotDotDotOperandInvalidDotDotDotUncalledBuiltinInvalidAppendInvalidCapInvalidCloseInvalidCopyInvalidComplexInvalidDeleteInvalidImagInvalidLenSwappedMakeArgsInvalidMakeInvalidRealInvalidAssertImpossibleAssertInvalidConversionInvalidUntypedConversionBadOffsetofSyntaxInvalidOffsetofUnusedExprUnusedVarMissingReturnWrongResultCountOutOfScopeResultInvalidCondInvalidPostDeclInvalidChanRangeInvalidIterVarInvalidRangeExprMisplacedBreakMisplacedContinueMisplacedFallthroughDuplicateCaseDuplicateDefaultBadTypeKeywordInvalidTypeSwitchInvalidExprSwitchInvalidSelectCaseUndeclaredLabelDuplicateLabelMisplacedLabelUnusedLabelJumpOverDeclJumpIntoBlockInvalidMethodExprWrongArgCountInvalidCallUnusedResultsInvalidDeferInvalidGoBadDeclRepeatedDeclInvalidUnsafeAddInvalidUnsafeSliceUnsupportedFeatureNotAGenericTypeWrongTypeArgCountCannotInferTypeArgsInvalidTypeArgInvalidInstanceCycleInvalidUnionMisplacedConstraintIfaceInvalidMethodTypeParamsMisplacedTypeParamInvalidUnsafeSliceDataInvalidUnsafeString" +) + +var ( + _ErrorCode_index_1 = [...]uint16{0, 4, 16, 33, 46, 59, 71, 85, 97, 113, 126, 142, 158, 174, 189, 205, 218, 234, 253, 261, 277, 295, 312, 330, 354, 362, 377, 393, 411, 428, 443, 450, 461, 484, 499, 511, 522, 537, 551, 566, 581, 594, 603, 617, 632, 643, 658, 667, 683, 703, 721, 740, 752, 771, 790, 806, 823, 842, 856, 867, 882, 895, 910, 926, 940, 956, 971, 988, 1006, 1021, 1031, 1041, 1058, 1080, 1094, 1108, 1128, 1146, 1166, 1184, 1207, 1223, 1238, 1251, 1261, 1273, 1284, 1298, 1311, 1322, 1332, 1347, 1358, 1369, 1382, 1398, 1415, 1439, 1456, 1471, 1481, 1490, 1503, 1519, 1535, 1546, 1561, 1577, 1591, 1607, 1621, 1638, 1658, 1671, 1687, 1701, 1718, 1735, 1752, 1767, 1781, 1795, 1806, 1818, 1831, 1848, 1861, 1872, 1885, 1897, 1906, 1913, 1925, 1941, 1959, 1977, 1992, 2009, 2028, 2042, 2062, 2074, 2098, 2121, 2139, 2161, 2180} +) + +func (i ErrorCode) String() string { + switch { + case i == -1: + return _ErrorCode_name_0 + case 1 <= i && i <= 146: + i -= 1 + return _ErrorCode_name_1[_ErrorCode_index_1[i]:_ErrorCode_index_1[i+1]] + default: + return "ErrorCode(" + strconv.FormatInt(int64(i), 10) + ")" + } +} diff --git a/cluster-autoscaler/vendor/golang.org/x/tools/internal/typesinternal/types.go b/cluster-autoscaler/vendor/golang.org/x/tools/internal/typesinternal/types.go new file mode 100644 index 000000000000..3c53fbc63b94 --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/tools/internal/typesinternal/types.go @@ -0,0 +1,61 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package typesinternal provides access to internal go/types APIs that are not +// yet exported. +package typesinternal + +import ( + "go/token" + "go/types" + "reflect" + "unsafe" + + "golang.org/x/tools/go/types/objectpath" +) + +func SetUsesCgo(conf *types.Config) bool { + v := reflect.ValueOf(conf).Elem() + + f := v.FieldByName("go115UsesCgo") + if !f.IsValid() { + f = v.FieldByName("UsesCgo") + if !f.IsValid() { + return false + } + } + + addr := unsafe.Pointer(f.UnsafeAddr()) + *(*bool)(addr) = true + + return true +} + +// ReadGo116ErrorData extracts additional information from types.Error values +// generated by Go version 1.16 and later: the error code, start position, and +// end position. If all positions are valid, start <= err.Pos <= end. +// +// If the data could not be read, the final result parameter will be false. +func ReadGo116ErrorData(err types.Error) (code ErrorCode, start, end token.Pos, ok bool) { + var data [3]int + // By coincidence all of these fields are ints, which simplifies things. + v := reflect.ValueOf(err) + for i, name := range []string{"go116code", "go116start", "go116end"} { + f := v.FieldByName(name) + if !f.IsValid() { + return 0, 0, 0, false + } + data[i] = int(f.Int()) + } + return ErrorCode(data[0]), token.Pos(data[1]), token.Pos(data[2]), true +} + +var SetGoVersion = func(conf *types.Config, version string) bool { return false } + +// NewObjectpathEncoder returns a function closure equivalent to +// objectpath.For but amortized for multiple (sequential) calls. +// It is a temporary workaround, pending the approval of proposal 58668. +// +//go:linkname NewObjectpathFunc golang.org/x/tools/go/types/objectpath.newEncoderFor +func NewObjectpathFunc() func(types.Object) (objectpath.Path, error) diff --git a/cluster-autoscaler/vendor/golang.org/x/tools/internal/typesinternal/types_118.go b/cluster-autoscaler/vendor/golang.org/x/tools/internal/typesinternal/types_118.go new file mode 100644 index 000000000000..a42b072a67d3 --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/tools/internal/typesinternal/types_118.go @@ -0,0 +1,19 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.18 +// +build go1.18 + +package typesinternal + +import ( + "go/types" +) + +func init() { + SetGoVersion = func(conf *types.Config, version string) bool { + conf.GoVersion = version + return true + } +} diff --git a/cluster-autoscaler/vendor/k8s.io/kube-proxy/LICENSE b/cluster-autoscaler/vendor/google.golang.org/genproto/googleapis/api/LICENSE similarity index 100% rename from cluster-autoscaler/vendor/k8s.io/kube-proxy/LICENSE rename to cluster-autoscaler/vendor/google.golang.org/genproto/googleapis/api/LICENSE diff --git a/cluster-autoscaler/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go b/cluster-autoscaler/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go index bb9d94893176..83774fbcbe71 100644 --- a/cluster-autoscaler/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go +++ b/cluster-autoscaler/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.21.9 +// protoc v3.21.12 // source: google/api/client.proto package annotations @@ -53,6 +53,12 @@ const ( ClientLibraryOrganization_PHOTOS ClientLibraryOrganization = 3 // Street View Org. ClientLibraryOrganization_STREET_VIEW ClientLibraryOrganization = 4 + // Shopping Org. + ClientLibraryOrganization_SHOPPING ClientLibraryOrganization = 5 + // Geo Org. + ClientLibraryOrganization_GEO ClientLibraryOrganization = 6 + // Generative AI - https://developers.generativeai.google + ClientLibraryOrganization_GENERATIVE_AI ClientLibraryOrganization = 7 ) // Enum value maps for ClientLibraryOrganization. @@ -63,13 +69,19 @@ var ( 2: "ADS", 3: "PHOTOS", 4: "STREET_VIEW", + 5: "SHOPPING", + 6: "GEO", + 7: "GENERATIVE_AI", } ClientLibraryOrganization_value = map[string]int32{ "CLIENT_LIBRARY_ORGANIZATION_UNSPECIFIED": 0, - "CLOUD": 1, - "ADS": 2, - "PHOTOS": 3, - "STREET_VIEW": 4, + "CLOUD": 1, + "ADS": 2, + "PHOTOS": 3, + "STREET_VIEW": 4, + "SHOPPING": 5, + "GEO": 6, + "GENERATIVE_AI": 7, } ) @@ -370,7 +382,7 @@ type Publishing struct { // A list of API method settings, e.g. the behavior for methods that use the // long-running operation pattern. MethodSettings []*MethodSettings `protobuf:"bytes,2,rep,name=method_settings,json=methodSettings,proto3" json:"method_settings,omitempty"` - // Link to a place that API users can report issues. Example: + // Link to a *public* URI where users can report issues. Example: // https://issuetracker.google.com/issues/new?component=190865&template=1161103 NewIssueUri string `protobuf:"bytes,101,opt,name=new_issue_uri,json=newIssueUri,proto3" json:"new_issue_uri,omitempty"` // Link to product home page. Example: @@ -1465,42 +1477,44 @@ var file_google_api_client_proto_rawDesc = []byte{ 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x54, - 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x2a, 0x79, 0x0a, 0x19, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, - 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x27, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x4c, 0x49, - 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x4f, 0x52, 0x47, 0x41, 0x4e, 0x49, 0x5a, 0x41, 0x54, 0x49, - 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, - 0x12, 0x09, 0x0a, 0x05, 0x43, 0x4c, 0x4f, 0x55, 0x44, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x41, - 0x44, 0x53, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x48, 0x4f, 0x54, 0x4f, 0x53, 0x10, 0x03, - 0x12, 0x0f, 0x0a, 0x0b, 0x53, 0x54, 0x52, 0x45, 0x45, 0x54, 0x5f, 0x56, 0x49, 0x45, 0x57, 0x10, - 0x04, 0x2a, 0x67, 0x0a, 0x18, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, - 0x72, 0x79, 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, - 0x26, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, - 0x44, 0x45, 0x53, 0x54, 0x49, 0x4e, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, - 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x47, 0x49, 0x54, - 0x48, 0x55, 0x42, 0x10, 0x0a, 0x12, 0x13, 0x0a, 0x0f, 0x50, 0x41, 0x43, 0x4b, 0x41, 0x47, 0x45, - 0x5f, 0x4d, 0x41, 0x4e, 0x41, 0x47, 0x45, 0x52, 0x10, 0x14, 0x3a, 0x4a, 0x0a, 0x10, 0x6d, 0x65, - 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1e, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9b, - 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x69, 0x67, - 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x3a, 0x43, 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, - 0x74, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x2a, 0xa3, 0x01, 0x0a, 0x19, 0x43, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x27, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x4c, + 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x4f, 0x52, 0x47, 0x41, 0x4e, 0x49, 0x5a, 0x41, 0x54, + 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, + 0x00, 0x12, 0x09, 0x0a, 0x05, 0x43, 0x4c, 0x4f, 0x55, 0x44, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, + 0x41, 0x44, 0x53, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x48, 0x4f, 0x54, 0x4f, 0x53, 0x10, + 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x53, 0x54, 0x52, 0x45, 0x45, 0x54, 0x5f, 0x56, 0x49, 0x45, 0x57, + 0x10, 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x4f, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x10, 0x05, + 0x12, 0x07, 0x0a, 0x03, 0x47, 0x45, 0x4f, 0x10, 0x06, 0x12, 0x11, 0x0a, 0x0d, 0x47, 0x45, 0x4e, + 0x45, 0x52, 0x41, 0x54, 0x49, 0x56, 0x45, 0x5f, 0x41, 0x49, 0x10, 0x07, 0x2a, 0x67, 0x0a, 0x18, + 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x44, 0x65, 0x73, + 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x26, 0x43, 0x4c, 0x49, 0x45, + 0x4e, 0x54, 0x5f, 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x44, 0x45, 0x53, 0x54, 0x49, + 0x4e, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, + 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x47, 0x49, 0x54, 0x48, 0x55, 0x42, 0x10, 0x0a, + 0x12, 0x13, 0x0a, 0x0f, 0x50, 0x41, 0x43, 0x4b, 0x41, 0x47, 0x45, 0x5f, 0x4d, 0x41, 0x4e, 0x41, + 0x47, 0x45, 0x52, 0x10, 0x14, 0x3a, 0x4a, 0x0a, 0x10, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, + 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, + 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9b, 0x08, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x3a, 0x43, 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x68, 0x6f, 0x73, + 0x74, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0x99, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x66, 0x61, 0x75, + 0x6c, 0x74, 0x48, 0x6f, 0x73, 0x74, 0x3a, 0x43, 0x0a, 0x0c, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x5f, + 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x99, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, - 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x48, 0x6f, 0x73, 0x74, 0x3a, 0x43, 0x0a, 0x0c, 0x6f, - 0x61, 0x75, 0x74, 0x68, 0x5f, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9a, 0x08, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, - 0x42, 0x69, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, - 0x70, 0x69, 0x42, 0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, - 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, - 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, - 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9a, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x6f, 0x61, 0x75, 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x42, 0x69, 0x0a, 0x0e, 0x63, + 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0b, 0x43, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, + 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, + 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2, + 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/cluster-autoscaler/vendor/google.golang.org/genproto/googleapis/api/tidyfix.go b/cluster-autoscaler/vendor/google.golang.org/genproto/googleapis/api/tidyfix.go new file mode 100644 index 000000000000..1d3f1b5b7efe --- /dev/null +++ b/cluster-autoscaler/vendor/google.golang.org/genproto/googleapis/api/tidyfix.go @@ -0,0 +1,23 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file, and the {{.RootMod}} import, won't actually become part of +// the resultant binary. +//go:build modhack +// +build modhack + +package api + +// Necessary for safely adding multi-module repo. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository +import _ "google.golang.org/genproto/internal" diff --git a/cluster-autoscaler/vendor/google.golang.org/genproto/googleapis/rpc/LICENSE b/cluster-autoscaler/vendor/google.golang.org/genproto/googleapis/rpc/LICENSE new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/cluster-autoscaler/vendor/google.golang.org/genproto/googleapis/rpc/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/cluster-autoscaler/vendor/google.golang.org/genproto/internal/doc.go b/cluster-autoscaler/vendor/google.golang.org/genproto/internal/doc.go new file mode 100644 index 000000000000..90e89b4aa3ff --- /dev/null +++ b/cluster-autoscaler/vendor/google.golang.org/genproto/internal/doc.go @@ -0,0 +1,17 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file makes internal an importable go package +// for use with backreferences from submodules. +package internal diff --git a/cluster-autoscaler/vendor/k8s.io/api/authentication/v1/generated.pb.go b/cluster-autoscaler/vendor/k8s.io/api/authentication/v1/generated.pb.go index efbecf02c568..304bbd0744d8 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/authentication/v1/generated.pb.go +++ b/cluster-autoscaler/vendor/k8s.io/api/authentication/v1/generated.pb.go @@ -102,10 +102,66 @@ func (m *ExtraValue) XXX_DiscardUnknown() { var xxx_messageInfo_ExtraValue proto.InternalMessageInfo +func (m *SelfSubjectReview) Reset() { *m = SelfSubjectReview{} } +func (*SelfSubjectReview) ProtoMessage() {} +func (*SelfSubjectReview) Descriptor() ([]byte, []int) { + return fileDescriptor_2953ea822e7ffe1e, []int{2} +} +func (m *SelfSubjectReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SelfSubjectReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SelfSubjectReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_SelfSubjectReview.Merge(m, src) +} +func (m *SelfSubjectReview) XXX_Size() int { + return m.Size() +} +func (m *SelfSubjectReview) XXX_DiscardUnknown() { + xxx_messageInfo_SelfSubjectReview.DiscardUnknown(m) +} + +var xxx_messageInfo_SelfSubjectReview proto.InternalMessageInfo + +func (m *SelfSubjectReviewStatus) Reset() { *m = SelfSubjectReviewStatus{} } +func (*SelfSubjectReviewStatus) ProtoMessage() {} +func (*SelfSubjectReviewStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_2953ea822e7ffe1e, []int{3} +} +func (m *SelfSubjectReviewStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SelfSubjectReviewStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SelfSubjectReviewStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_SelfSubjectReviewStatus.Merge(m, src) +} +func (m *SelfSubjectReviewStatus) XXX_Size() int { + return m.Size() +} +func (m *SelfSubjectReviewStatus) XXX_DiscardUnknown() { + xxx_messageInfo_SelfSubjectReviewStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_SelfSubjectReviewStatus proto.InternalMessageInfo + func (m *TokenRequest) Reset() { *m = TokenRequest{} } func (*TokenRequest) ProtoMessage() {} func (*TokenRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_2953ea822e7ffe1e, []int{2} + return fileDescriptor_2953ea822e7ffe1e, []int{4} } func (m *TokenRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -133,7 +189,7 @@ var xxx_messageInfo_TokenRequest proto.InternalMessageInfo func (m *TokenRequestSpec) Reset() { *m = TokenRequestSpec{} } func (*TokenRequestSpec) ProtoMessage() {} func (*TokenRequestSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_2953ea822e7ffe1e, []int{3} + return fileDescriptor_2953ea822e7ffe1e, []int{5} } func (m *TokenRequestSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -161,7 +217,7 @@ var xxx_messageInfo_TokenRequestSpec proto.InternalMessageInfo func (m *TokenRequestStatus) Reset() { *m = TokenRequestStatus{} } func (*TokenRequestStatus) ProtoMessage() {} func (*TokenRequestStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_2953ea822e7ffe1e, []int{4} + return fileDescriptor_2953ea822e7ffe1e, []int{6} } func (m *TokenRequestStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -189,7 +245,7 @@ var xxx_messageInfo_TokenRequestStatus proto.InternalMessageInfo func (m *TokenReview) Reset() { *m = TokenReview{} } func (*TokenReview) ProtoMessage() {} func (*TokenReview) Descriptor() ([]byte, []int) { - return fileDescriptor_2953ea822e7ffe1e, []int{5} + return fileDescriptor_2953ea822e7ffe1e, []int{7} } func (m *TokenReview) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -217,7 +273,7 @@ var xxx_messageInfo_TokenReview proto.InternalMessageInfo func (m *TokenReviewSpec) Reset() { *m = TokenReviewSpec{} } func (*TokenReviewSpec) ProtoMessage() {} func (*TokenReviewSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_2953ea822e7ffe1e, []int{6} + return fileDescriptor_2953ea822e7ffe1e, []int{8} } func (m *TokenReviewSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -245,7 +301,7 @@ var xxx_messageInfo_TokenReviewSpec proto.InternalMessageInfo func (m *TokenReviewStatus) Reset() { *m = TokenReviewStatus{} } func (*TokenReviewStatus) ProtoMessage() {} func (*TokenReviewStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_2953ea822e7ffe1e, []int{7} + return fileDescriptor_2953ea822e7ffe1e, []int{9} } func (m *TokenReviewStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -273,7 +329,7 @@ var xxx_messageInfo_TokenReviewStatus proto.InternalMessageInfo func (m *UserInfo) Reset() { *m = UserInfo{} } func (*UserInfo) ProtoMessage() {} func (*UserInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_2953ea822e7ffe1e, []int{8} + return fileDescriptor_2953ea822e7ffe1e, []int{10} } func (m *UserInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -301,6 +357,8 @@ var xxx_messageInfo_UserInfo proto.InternalMessageInfo func init() { proto.RegisterType((*BoundObjectReference)(nil), "k8s.io.api.authentication.v1.BoundObjectReference") proto.RegisterType((*ExtraValue)(nil), "k8s.io.api.authentication.v1.ExtraValue") + proto.RegisterType((*SelfSubjectReview)(nil), "k8s.io.api.authentication.v1.SelfSubjectReview") + proto.RegisterType((*SelfSubjectReviewStatus)(nil), "k8s.io.api.authentication.v1.SelfSubjectReviewStatus") proto.RegisterType((*TokenRequest)(nil), "k8s.io.api.authentication.v1.TokenRequest") proto.RegisterType((*TokenRequestSpec)(nil), "k8s.io.api.authentication.v1.TokenRequestSpec") proto.RegisterType((*TokenRequestStatus)(nil), "k8s.io.api.authentication.v1.TokenRequestStatus") @@ -316,64 +374,67 @@ func init() { } var fileDescriptor_2953ea822e7ffe1e = []byte{ - // 907 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0xcf, 0x6f, 0xe3, 0x44, - 0x14, 0x8e, 0xf3, 0xa3, 0x4a, 0x26, 0xdb, 0xd2, 0xce, 0xb2, 0x52, 0x54, 0x96, 0xa4, 0x78, 0x25, - 0x54, 0x01, 0x6b, 0x6f, 0x23, 0x04, 0xab, 0x45, 0x42, 0xaa, 0x69, 0x04, 0x11, 0x82, 0x5d, 0xcd, - 0x6e, 0x0b, 0xe2, 0xc4, 0xc4, 0x7e, 0x4d, 0x87, 0xe0, 0xb1, 0xb1, 0xc7, 0x61, 0x73, 0xdb, 0x3f, - 0x81, 0x23, 0x48, 0x1c, 0xf8, 0x23, 0x90, 0xf8, 0x17, 0x7a, 0x5c, 0x71, 0xea, 0x01, 0x45, 0xd4, - 0x5c, 0x39, 0x72, 0xe2, 0x84, 0x66, 0x3c, 0xad, 0xe3, 0xa4, 0x4d, 0x73, 0xe2, 0x96, 0x79, 0xef, - 0x7b, 0xdf, 0xbc, 0xf7, 0xcd, 0xe7, 0x99, 0xa0, 0xde, 0xe8, 0x61, 0x6c, 0xb1, 0xc0, 0x1e, 0x25, - 0x03, 0x88, 0x38, 0x08, 0x88, 0xed, 0x31, 0x70, 0x2f, 0x88, 0x6c, 0x9d, 0xa0, 0x21, 0xb3, 0x69, - 0x22, 0x4e, 0x80, 0x0b, 0xe6, 0x52, 0xc1, 0x02, 0x6e, 0x8f, 0xf7, 0xec, 0x21, 0x70, 0x88, 0xa8, - 0x00, 0xcf, 0x0a, 0xa3, 0x40, 0x04, 0xf8, 0x6e, 0x86, 0xb6, 0x68, 0xc8, 0xac, 0x22, 0xda, 0x1a, - 0xef, 0x6d, 0xdf, 0x1f, 0x32, 0x71, 0x92, 0x0c, 0x2c, 0x37, 0xf0, 0xed, 0x61, 0x30, 0x0c, 0x6c, - 0x55, 0x34, 0x48, 0x8e, 0xd5, 0x4a, 0x2d, 0xd4, 0xaf, 0x8c, 0x6c, 0xfb, 0xdd, 0x7c, 0x6b, 0x9f, - 0xba, 0x27, 0x8c, 0x43, 0x34, 0xb1, 0xc3, 0xd1, 0x50, 0x06, 0x62, 0xdb, 0x07, 0x41, 0xaf, 0x68, - 0x61, 0xdb, 0xbe, 0xae, 0x2a, 0x4a, 0xb8, 0x60, 0x3e, 0x2c, 0x14, 0xbc, 0x77, 0x53, 0x41, 0xec, - 0x9e, 0x80, 0x4f, 0xe7, 0xeb, 0xcc, 0xdf, 0x0d, 0xf4, 0xaa, 0x13, 0x24, 0xdc, 0x7b, 0x3c, 0xf8, - 0x06, 0x5c, 0x41, 0xe0, 0x18, 0x22, 0xe0, 0x2e, 0xe0, 0x1d, 0x54, 0x1d, 0x31, 0xee, 0xb5, 0x8c, - 0x1d, 0x63, 0xb7, 0xe1, 0xdc, 0x3a, 0x9d, 0x76, 0x4a, 0xe9, 0xb4, 0x53, 0xfd, 0x94, 0x71, 0x8f, - 0xa8, 0x0c, 0xee, 0x22, 0x44, 0x43, 0x76, 0x04, 0x51, 0xcc, 0x02, 0xde, 0x2a, 0x2b, 0x1c, 0xd6, - 0x38, 0xb4, 0xff, 0xa4, 0xaf, 0x33, 0x64, 0x06, 0x25, 0x59, 0x39, 0xf5, 0xa1, 0x55, 0x29, 0xb2, - 0x7e, 0x4e, 0x7d, 0x20, 0x2a, 0x83, 0x1d, 0x54, 0x49, 0xfa, 0x07, 0xad, 0xaa, 0x02, 0x3c, 0xd0, - 0x80, 0xca, 0x61, 0xff, 0xe0, 0xdf, 0x69, 0xe7, 0x8d, 0xeb, 0x86, 0x14, 0x93, 0x10, 0x62, 0xeb, - 0xb0, 0x7f, 0x40, 0x64, 0xb1, 0xf9, 0x3e, 0x42, 0xbd, 0xe7, 0x22, 0xa2, 0x47, 0xf4, 0xdb, 0x04, - 0x70, 0x07, 0xd5, 0x98, 0x00, 0x3f, 0x6e, 0x19, 0x3b, 0x95, 0xdd, 0x86, 0xd3, 0x48, 0xa7, 0x9d, - 0x5a, 0x5f, 0x06, 0x48, 0x16, 0x7f, 0x54, 0xff, 0xf1, 0x97, 0x4e, 0xe9, 0xc5, 0x1f, 0x3b, 0x25, - 0xf3, 0xe7, 0x32, 0xba, 0xf5, 0x2c, 0x18, 0x01, 0x27, 0xf0, 0x5d, 0x02, 0xb1, 0xc0, 0x5f, 0xa3, - 0xba, 0x3c, 0x22, 0x8f, 0x0a, 0xaa, 0x94, 0x68, 0x76, 0x1f, 0x58, 0xb9, 0x3b, 0x2e, 0x9b, 0xb0, - 0xc2, 0xd1, 0x50, 0x06, 0x62, 0x4b, 0xa2, 0xad, 0xf1, 0x9e, 0x95, 0xc9, 0xf9, 0x19, 0x08, 0x9a, - 0x6b, 0x92, 0xc7, 0xc8, 0x25, 0x2b, 0x7e, 0x82, 0xaa, 0x71, 0x08, 0xae, 0xd2, 0xaf, 0xd9, 0xb5, - 0xac, 0x65, 0xde, 0xb3, 0x66, 0x7b, 0x7b, 0x1a, 0x82, 0x9b, 0x2b, 0x28, 0x57, 0x44, 0x31, 0xe1, - 0x2f, 0xd1, 0x5a, 0x2c, 0xa8, 0x48, 0x62, 0xa5, 0x72, 0xb1, 0xe3, 0x9b, 0x38, 0x55, 0x9d, 0xb3, - 0xa1, 0x59, 0xd7, 0xb2, 0x35, 0xd1, 0x7c, 0xe6, 0x3f, 0x06, 0xda, 0x9c, 0x6f, 0x01, 0xbf, 0x8d, - 0x1a, 0x34, 0xf1, 0x98, 0x34, 0xcd, 0x85, 0xc4, 0xeb, 0xe9, 0xb4, 0xd3, 0xd8, 0xbf, 0x08, 0x92, - 0x3c, 0x8f, 0x3f, 0x42, 0x5b, 0xf0, 0x3c, 0x64, 0x91, 0xda, 0xfd, 0x29, 0xb8, 0x01, 0xf7, 0x62, - 0x75, 0xd6, 0x15, 0xe7, 0x4e, 0x3a, 0xed, 0x6c, 0xf5, 0xe6, 0x93, 0x64, 0x11, 0x8f, 0x39, 0xda, - 0x18, 0x14, 0x2c, 0xab, 0x07, 0xed, 0x2e, 0x1f, 0xf4, 0x2a, 0x9b, 0x3b, 0x38, 0x9d, 0x76, 0x36, - 0x8a, 0x19, 0x32, 0xc7, 0x6e, 0xfe, 0x6a, 0x20, 0xbc, 0xa8, 0x12, 0xbe, 0x87, 0x6a, 0x42, 0x46, - 0xf5, 0x27, 0xb2, 0xae, 0x45, 0xab, 0x65, 0xd0, 0x2c, 0x87, 0x27, 0xe8, 0x76, 0x3e, 0xc0, 0x33, - 0xe6, 0x43, 0x2c, 0xa8, 0x1f, 0xea, 0xd3, 0x7e, 0x6b, 0x35, 0x2f, 0xc9, 0x32, 0xe7, 0x35, 0x4d, - 0x7f, 0xbb, 0xb7, 0x48, 0x47, 0xae, 0xda, 0xc3, 0xfc, 0xa9, 0x8c, 0x9a, 0xba, 0xed, 0x31, 0x83, - 0xef, 0xff, 0x07, 0x2f, 0x3f, 0x2e, 0x78, 0xf9, 0xfe, 0x4a, 0xbe, 0x93, 0xad, 0x5d, 0x6b, 0xe5, - 0x2f, 0xe6, 0xac, 0x6c, 0xaf, 0x4e, 0xb9, 0xdc, 0xc9, 0x2e, 0x7a, 0x65, 0x6e, 0xff, 0xd5, 0x8e, - 0xb3, 0x60, 0xf6, 0xf2, 0x72, 0xb3, 0x9b, 0x7f, 0x1b, 0x68, 0x6b, 0xa1, 0x25, 0xfc, 0x01, 0x5a, - 0x9f, 0xe9, 0x1c, 0xb2, 0x1b, 0xb6, 0xee, 0xdc, 0xd1, 0xfb, 0xad, 0xef, 0xcf, 0x26, 0x49, 0x11, - 0x8b, 0x3f, 0x41, 0xd5, 0x24, 0x86, 0x48, 0x2b, 0xfc, 0xe6, 0x72, 0x39, 0x0e, 0x63, 0x88, 0xfa, - 0xfc, 0x38, 0xc8, 0xa5, 0x95, 0x11, 0xa2, 0x18, 0x8a, 0x93, 0x54, 0x6f, 0xf8, 0x6c, 0xef, 0xa1, - 0x1a, 0x44, 0x51, 0x10, 0xe9, 0x7b, 0xfb, 0x52, 0x9b, 0x9e, 0x0c, 0x92, 0x2c, 0x67, 0xfe, 0x56, - 0x46, 0xf5, 0x8b, 0x2d, 0xf1, 0x3b, 0xa8, 0x2e, 0xb7, 0x51, 0x97, 0x7d, 0x26, 0xe8, 0xa6, 0x2e, - 0x52, 0x18, 0x19, 0x27, 0x97, 0x08, 0xfc, 0x3a, 0xaa, 0x24, 0xcc, 0xd3, 0x6f, 0x48, 0x73, 0xe6, - 0xd2, 0x27, 0x32, 0x8e, 0x4d, 0xb4, 0x36, 0x8c, 0x82, 0x24, 0x94, 0x36, 0x90, 0x8d, 0x22, 0x79, - 0xa2, 0x1f, 0xab, 0x08, 0xd1, 0x19, 0x7c, 0x84, 0x6a, 0x20, 0xef, 0x7c, 0x35, 0x4b, 0xb3, 0xbb, - 0xb7, 0x9a, 0x34, 0x96, 0x7a, 0x27, 0x7a, 0x5c, 0x44, 0x93, 0x99, 0xa9, 0x64, 0x8c, 0x64, 0x74, - 0xdb, 0x03, 0xfd, 0x96, 0x28, 0x0c, 0xde, 0x44, 0x95, 0x11, 0x4c, 0xb2, 0x89, 0x88, 0xfc, 0x89, - 0x3f, 0x44, 0xb5, 0xb1, 0x7c, 0x66, 0xf4, 0x91, 0xec, 0x2e, 0xdf, 0x37, 0x7f, 0x96, 0x48, 0x56, - 0xf6, 0xa8, 0xfc, 0xd0, 0x70, 0x9c, 0xd3, 0xf3, 0x76, 0xe9, 0xe5, 0x79, 0xbb, 0x74, 0x76, 0xde, - 0x2e, 0xbd, 0x48, 0xdb, 0xc6, 0x69, 0xda, 0x36, 0x5e, 0xa6, 0x6d, 0xe3, 0x2c, 0x6d, 0x1b, 0x7f, - 0xa6, 0x6d, 0xe3, 0x87, 0xbf, 0xda, 0xa5, 0xaf, 0xee, 0x2e, 0xfb, 0x13, 0xf3, 0x5f, 0x00, 0x00, - 0x00, 0xff, 0xff, 0x12, 0xb8, 0x31, 0x91, 0xfc, 0x08, 0x00, 0x00, + // 958 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0x4b, 0x6f, 0x23, 0x45, + 0x10, 0xf6, 0xf8, 0x11, 0xd9, 0xe5, 0x4d, 0x48, 0x7a, 0x59, 0x61, 0x85, 0xc5, 0x0e, 0xb3, 0x12, + 0x8a, 0x80, 0x9d, 0xd9, 0x58, 0x3c, 0x56, 0x8b, 0x84, 0x94, 0x21, 0x16, 0x58, 0x08, 0x76, 0xd5, + 0x4e, 0x02, 0x42, 0x42, 0xa2, 0x3d, 0xae, 0x38, 0x83, 0x77, 0x1e, 0xcc, 0xf4, 0x98, 0xf5, 0x6d, + 0x7f, 0x02, 0x47, 0x90, 0x38, 0xf0, 0x23, 0x90, 0xf8, 0x0b, 0x39, 0xae, 0x10, 0x87, 0x3d, 0x20, + 0x8b, 0x0c, 0x57, 0x8e, 0x9c, 0x38, 0xa1, 0xee, 0xe9, 0xf8, 0x99, 0x4c, 0x7c, 0xda, 0x9b, 0xa7, + 0x1e, 0x5f, 0x55, 0x7d, 0x55, 0x5d, 0x65, 0x68, 0x0d, 0xee, 0x47, 0x86, 0xe3, 0x9b, 0x83, 0xb8, + 0x8b, 0xa1, 0x87, 0x1c, 0x23, 0x73, 0x88, 0x5e, 0xcf, 0x0f, 0x4d, 0xa5, 0x60, 0x81, 0x63, 0xb2, + 0x98, 0x9f, 0xa2, 0xc7, 0x1d, 0x9b, 0x71, 0xc7, 0xf7, 0xcc, 0xe1, 0x9e, 0xd9, 0x47, 0x0f, 0x43, + 0xc6, 0xb1, 0x67, 0x04, 0xa1, 0xcf, 0x7d, 0x72, 0x3b, 0xb5, 0x36, 0x58, 0xe0, 0x18, 0xf3, 0xd6, + 0xc6, 0x70, 0x6f, 0xfb, 0x6e, 0xdf, 0xe1, 0xa7, 0x71, 0xd7, 0xb0, 0x7d, 0xd7, 0xec, 0xfb, 0x7d, + 0xdf, 0x94, 0x4e, 0xdd, 0xf8, 0x44, 0x7e, 0xc9, 0x0f, 0xf9, 0x2b, 0x05, 0xdb, 0x7e, 0x67, 0x1a, + 0xda, 0x65, 0xf6, 0xa9, 0xe3, 0x61, 0x38, 0x32, 0x83, 0x41, 0x5f, 0x08, 0x22, 0xd3, 0x45, 0xce, + 0x2e, 0x49, 0x61, 0xdb, 0xbc, 0xca, 0x2b, 0x8c, 0x3d, 0xee, 0xb8, 0xb8, 0xe4, 0xf0, 0xde, 0x75, + 0x0e, 0x91, 0x7d, 0x8a, 0x2e, 0x5b, 0xf4, 0xd3, 0x7f, 0xd7, 0xe0, 0x65, 0xcb, 0x8f, 0xbd, 0xde, + 0xc3, 0xee, 0xb7, 0x68, 0x73, 0x8a, 0x27, 0x18, 0xa2, 0x67, 0x23, 0xd9, 0x81, 0xe2, 0xc0, 0xf1, + 0x7a, 0x35, 0x6d, 0x47, 0xdb, 0xad, 0x58, 0x37, 0xce, 0xc6, 0x8d, 0x5c, 0x32, 0x6e, 0x14, 0x3f, + 0x75, 0xbc, 0x1e, 0x95, 0x1a, 0xd2, 0x04, 0x60, 0x81, 0x73, 0x8c, 0x61, 0xe4, 0xf8, 0x5e, 0x2d, + 0x2f, 0xed, 0x88, 0xb2, 0x83, 0xfd, 0x47, 0x6d, 0xa5, 0xa1, 0x33, 0x56, 0x02, 0xd5, 0x63, 0x2e, + 0xd6, 0x0a, 0xf3, 0xa8, 0x9f, 0x33, 0x17, 0xa9, 0xd4, 0x10, 0x0b, 0x0a, 0x71, 0xfb, 0xa0, 0x56, + 0x94, 0x06, 0xf7, 0x94, 0x41, 0xe1, 0xa8, 0x7d, 0xf0, 0xdf, 0xb8, 0xf1, 0xfa, 0x55, 0x45, 0xf2, + 0x51, 0x80, 0x91, 0x71, 0xd4, 0x3e, 0xa0, 0xc2, 0x59, 0x7f, 0x1f, 0xa0, 0xf5, 0x84, 0x87, 0xec, + 0x98, 0x3d, 0x8e, 0x91, 0x34, 0xa0, 0xe4, 0x70, 0x74, 0xa3, 0x9a, 0xb6, 0x53, 0xd8, 0xad, 0x58, + 0x95, 0x64, 0xdc, 0x28, 0xb5, 0x85, 0x80, 0xa6, 0xf2, 0x07, 0xe5, 0x1f, 0x7f, 0x69, 0xe4, 0x9e, + 0xfe, 0xb9, 0x93, 0xd3, 0xff, 0xd0, 0x60, 0xab, 0x83, 0x8f, 0x4f, 0x3a, 0xb1, 0x62, 0x63, 0xe8, + 0xe0, 0xf7, 0xe4, 0x1b, 0x28, 0x8b, 0x3e, 0xf5, 0x18, 0x67, 0x92, 0x8e, 0x6a, 0xf3, 0x9e, 0x31, + 0x1d, 0x91, 0x49, 0x26, 0x46, 0x30, 0xe8, 0x0b, 0x41, 0x64, 0x08, 0x6b, 0x63, 0xb8, 0x67, 0xa4, + 0x9c, 0x7e, 0x86, 0x9c, 0x4d, 0x89, 0x99, 0xca, 0xe8, 0x04, 0x95, 0x7c, 0x0d, 0x6b, 0x11, 0x67, + 0x3c, 0x8e, 0x24, 0x8d, 0xd5, 0xe6, 0xbb, 0x46, 0xd6, 0x08, 0x1a, 0x4b, 0x29, 0x76, 0xa4, 0xb3, + 0xb5, 0xa1, 0x82, 0xac, 0xa5, 0xdf, 0x54, 0x81, 0xea, 0x3e, 0xbc, 0x72, 0x85, 0x0b, 0x39, 0x84, + 0x72, 0x1c, 0x61, 0xd8, 0xf6, 0x4e, 0x7c, 0x55, 0xdb, 0x1b, 0xd9, 0xb1, 0x8f, 0x94, 0xb5, 0xb5, + 0xa9, 0x82, 0x95, 0x2f, 0x24, 0x74, 0x82, 0xa4, 0xff, 0x9c, 0x87, 0x1b, 0x87, 0xfe, 0x00, 0x3d, + 0x8a, 0xdf, 0xc5, 0x18, 0xf1, 0x17, 0x40, 0xe1, 0x23, 0x28, 0x46, 0x01, 0xda, 0x8a, 0x40, 0x23, + 0xbb, 0x88, 0xd9, 0xdc, 0x3a, 0x01, 0xda, 0xd3, 0x49, 0x14, 0x5f, 0x54, 0x22, 0x91, 0x2f, 0x27, + 0x4d, 0x29, 0x2c, 0x65, 0x7c, 0x1d, 0x66, 0x76, 0x3f, 0xfe, 0xd5, 0x60, 0x73, 0x31, 0x05, 0xf2, + 0x16, 0x54, 0x58, 0xdc, 0x73, 0xc4, 0xe3, 0xbb, 0x18, 0xd5, 0xf5, 0x64, 0xdc, 0xa8, 0xec, 0x5f, + 0x08, 0xe9, 0x54, 0x4f, 0x3e, 0x82, 0x2d, 0x7c, 0x12, 0x38, 0xa1, 0x8c, 0xde, 0x41, 0xdb, 0xf7, + 0x7a, 0x91, 0x7c, 0x33, 0x05, 0xeb, 0x56, 0x32, 0x6e, 0x6c, 0xb5, 0x16, 0x95, 0x74, 0xd9, 0x9e, + 0x78, 0xb0, 0xd1, 0x9d, 0x7b, 0xfa, 0xaa, 0xd0, 0x66, 0x76, 0xa1, 0x97, 0xad, 0x0b, 0x8b, 0x24, + 0xe3, 0xc6, 0xc6, 0xbc, 0x86, 0x2e, 0xa0, 0xeb, 0xbf, 0x6a, 0x40, 0x96, 0x59, 0x22, 0x77, 0xa0, + 0xc4, 0x85, 0x54, 0xad, 0x9a, 0x75, 0x45, 0x5a, 0x29, 0x35, 0x4d, 0x75, 0x64, 0x04, 0x37, 0xa7, + 0x05, 0x1c, 0x3a, 0x2e, 0x46, 0x9c, 0xb9, 0x81, 0xea, 0xf6, 0x9b, 0xab, 0xcd, 0x92, 0x70, 0xb3, + 0x5e, 0x55, 0xf0, 0x37, 0x5b, 0xcb, 0x70, 0xf4, 0xb2, 0x18, 0xfa, 0x4f, 0x79, 0xa8, 0xaa, 0xb4, + 0x5f, 0xd0, 0x3a, 0x78, 0x38, 0x37, 0xcb, 0x77, 0x57, 0x9a, 0x3b, 0xf9, 0xa6, 0xaf, 0x1a, 0xe5, + 0x2f, 0x16, 0x46, 0xd9, 0x5c, 0x1d, 0x32, 0x7b, 0x92, 0x6d, 0x78, 0x69, 0x21, 0xfe, 0x6a, 0xed, + 0x9c, 0x1b, 0xf6, 0x7c, 0xf6, 0xb0, 0xeb, 0xff, 0x68, 0xb0, 0xb5, 0x94, 0x12, 0xf9, 0x00, 0xd6, + 0x67, 0x32, 0xc7, 0xf4, 0x52, 0x95, 0xad, 0x5b, 0x2a, 0xde, 0xfa, 0xfe, 0xac, 0x92, 0xce, 0xdb, + 0x92, 0x4f, 0xa0, 0x28, 0x96, 0x95, 0x62, 0x78, 0xd5, 0x95, 0x37, 0xa1, 0x56, 0x48, 0xa8, 0x44, + 0x98, 0xaf, 0xa4, 0x78, 0xcd, 0xb3, 0xbd, 0x03, 0x25, 0x0c, 0x43, 0x3f, 0x54, 0xf7, 0x6f, 0xc2, + 0x4d, 0x4b, 0x08, 0x69, 0xaa, 0xd3, 0x7f, 0xcb, 0xc3, 0x64, 0xa7, 0x92, 0xb7, 0xd3, 0xfd, 0x2c, + 0x8f, 0x66, 0x4a, 0xe8, 0xdc, 0xde, 0x15, 0x72, 0x3a, 0xb1, 0x20, 0xaf, 0x41, 0x21, 0x76, 0x7a, + 0xea, 0x16, 0x57, 0x67, 0x8e, 0x27, 0x15, 0x72, 0xa2, 0xc3, 0x5a, 0x3f, 0xf4, 0xe3, 0x40, 0x8c, + 0x81, 0x48, 0x14, 0x44, 0x47, 0x3f, 0x96, 0x12, 0xaa, 0x34, 0xe4, 0x18, 0x4a, 0x28, 0x6e, 0xa7, + 0xac, 0xa5, 0xda, 0xdc, 0x5b, 0x8d, 0x1a, 0x43, 0xde, 0xdb, 0x96, 0xc7, 0xc3, 0xd1, 0x4c, 0x55, + 0x42, 0x46, 0x53, 0xb8, 0xed, 0xae, 0xba, 0xc9, 0xd2, 0x86, 0x6c, 0x42, 0x61, 0x80, 0xa3, 0xb4, + 0x22, 0x2a, 0x7e, 0x92, 0x0f, 0xa1, 0x34, 0x14, 0xe7, 0x5a, 0xb5, 0x64, 0x37, 0x3b, 0xee, 0xf4, + 0xbc, 0xd3, 0xd4, 0xed, 0x41, 0xfe, 0xbe, 0x66, 0x59, 0x67, 0xe7, 0xf5, 0xdc, 0xb3, 0xf3, 0x7a, + 0xee, 0xf9, 0x79, 0x3d, 0xf7, 0x34, 0xa9, 0x6b, 0x67, 0x49, 0x5d, 0x7b, 0x96, 0xd4, 0xb5, 0xe7, + 0x49, 0x5d, 0xfb, 0x2b, 0xa9, 0x6b, 0x3f, 0xfc, 0x5d, 0xcf, 0x7d, 0x75, 0x3b, 0xeb, 0xcf, 0xe0, + 0xff, 0x01, 0x00, 0x00, 0xff, 0xff, 0x0d, 0x9a, 0x38, 0x17, 0x44, 0x0a, 0x00, 0x00, } func (m *BoundObjectReference) Marshal() (dAtA []byte, err error) { @@ -451,6 +512,82 @@ func (m ExtraValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *SelfSubjectReview) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SelfSubjectReview) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SelfSubjectReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SelfSubjectReviewStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SelfSubjectReviewStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SelfSubjectReviewStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.UserInfo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *TokenRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -850,6 +987,30 @@ func (m ExtraValue) Size() (n int) { return n } +func (m *SelfSubjectReview) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SelfSubjectReviewStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.UserInfo.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *TokenRequest) Size() (n int) { if m == nil { return 0 @@ -999,6 +1160,27 @@ func (this *BoundObjectReference) String() string { }, "") return s } +func (this *SelfSubjectReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelfSubjectReview{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SelfSubjectReviewStatus", "SelfSubjectReviewStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *SelfSubjectReviewStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelfSubjectReviewStatus{`, + `UserInfo:` + strings.Replace(strings.Replace(this.UserInfo.String(), "UserInfo", "UserInfo", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} func (this *TokenRequest) String() string { if this == nil { return "nil" @@ -1361,6 +1543,205 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { } return nil } +func (m *SelfSubjectReview) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SelfSubjectReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SelfSubjectReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SelfSubjectReviewStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SelfSubjectReviewStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SelfSubjectReviewStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.UserInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *TokenRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/cluster-autoscaler/vendor/k8s.io/api/authentication/v1/generated.proto b/cluster-autoscaler/vendor/k8s.io/api/authentication/v1/generated.proto index f4806a3c6361..1632070c872c 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/authentication/v1/generated.proto +++ b/cluster-autoscaler/vendor/k8s.io/api/authentication/v1/generated.proto @@ -56,6 +56,26 @@ message ExtraValue { repeated string items = 1; } +// SelfSubjectReview contains the user information that the kube-apiserver has about the user making this request. +// When using impersonation, users will receive the user info of the user being impersonated. If impersonation or +// request header authentication is used, any extra keys will have their case ignored and returned as lowercase. +message SelfSubjectReview { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Status is filled in by the server with the user attributes. + optional SelfSubjectReviewStatus status = 2; +} + +// SelfSubjectReviewStatus is filled by the kube-apiserver and sent back to a user. +message SelfSubjectReviewStatus { + // User attributes of the user making this request. + // +optional + optional UserInfo userInfo = 1; +} + // TokenRequest requests a token for a given service account. message TokenRequest { // Standard object's metadata. diff --git a/cluster-autoscaler/vendor/k8s.io/api/authentication/v1/register.go b/cluster-autoscaler/vendor/k8s.io/api/authentication/v1/register.go index c522e4a46d71..6a32b5926b3b 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/authentication/v1/register.go +++ b/cluster-autoscaler/vendor/k8s.io/api/authentication/v1/register.go @@ -46,6 +46,7 @@ func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &TokenReview{}, &TokenRequest{}, + &SelfSubjectReview{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil diff --git a/cluster-autoscaler/vendor/k8s.io/api/authentication/v1/types.go b/cluster-autoscaler/vendor/k8s.io/api/authentication/v1/types.go index 4e221e58c7d9..b498007c000c 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/authentication/v1/types.go +++ b/cluster-autoscaler/vendor/k8s.io/api/authentication/v1/types.go @@ -197,3 +197,28 @@ type BoundObjectReference struct { // +optional UID types.UID `json:"uid,omitempty" protobuf:"bytes,4,opt,name=uID,casttype=k8s.io/apimachinery/pkg/types.UID"` } + +// +genclient +// +genclient:nonNamespaced +// +genclient:onlyVerbs=create +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// SelfSubjectReview contains the user information that the kube-apiserver has about the user making this request. +// When using impersonation, users will receive the user info of the user being impersonated. If impersonation or +// request header authentication is used, any extra keys will have their case ignored and returned as lowercase. +type SelfSubjectReview struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Status is filled in by the server with the user attributes. + Status SelfSubjectReviewStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"` +} + +// SelfSubjectReviewStatus is filled by the kube-apiserver and sent back to a user. +type SelfSubjectReviewStatus struct { + // User attributes of the user making this request. + // +optional + UserInfo UserInfo `json:"userInfo,omitempty" protobuf:"bytes,1,opt,name=userInfo"` +} diff --git a/cluster-autoscaler/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go index b1a730b816ee..ebfd4852c058 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go @@ -39,6 +39,25 @@ func (BoundObjectReference) SwaggerDoc() map[string]string { return map_BoundObjectReference } +var map_SelfSubjectReview = map[string]string{ + "": "SelfSubjectReview contains the user information that the kube-apiserver has about the user making this request. When using impersonation, users will receive the user info of the user being impersonated. If impersonation or request header authentication is used, any extra keys will have their case ignored and returned as lowercase.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "status": "Status is filled in by the server with the user attributes.", +} + +func (SelfSubjectReview) SwaggerDoc() map[string]string { + return map_SelfSubjectReview +} + +var map_SelfSubjectReviewStatus = map[string]string{ + "": "SelfSubjectReviewStatus is filled by the kube-apiserver and sent back to a user.", + "userInfo": "User attributes of the user making this request.", +} + +func (SelfSubjectReviewStatus) SwaggerDoc() map[string]string { + return map_SelfSubjectReviewStatus +} + var map_TokenRequest = map[string]string{ "": "TokenRequest requests a token for a given service account.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", diff --git a/cluster-autoscaler/vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go b/cluster-autoscaler/vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go index 2af533191ba3..369c89b8631b 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go +++ b/cluster-autoscaler/vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go @@ -61,6 +61,50 @@ func (in ExtraValue) DeepCopy() ExtraValue { return *out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelfSubjectReview) DeepCopyInto(out *SelfSubjectReview) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfSubjectReview. +func (in *SelfSubjectReview) DeepCopy() *SelfSubjectReview { + if in == nil { + return nil + } + out := new(SelfSubjectReview) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SelfSubjectReview) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelfSubjectReviewStatus) DeepCopyInto(out *SelfSubjectReviewStatus) { + *out = *in + in.UserInfo.DeepCopyInto(&out.UserInfo) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfSubjectReviewStatus. +func (in *SelfSubjectReviewStatus) DeepCopy() *SelfSubjectReviewStatus { + if in == nil { + return nil + } + out := new(SelfSubjectReviewStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TokenRequest) DeepCopyInto(out *TokenRequest) { *out = *in diff --git a/cluster-autoscaler/vendor/k8s.io/api/batch/v1/generated.proto b/cluster-autoscaler/vendor/k8s.io/api/batch/v1/generated.proto index 181c79597da6..df4381c737fa 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/batch/v1/generated.proto +++ b/cluster-autoscaler/vendor/k8s.io/api/batch/v1/generated.proto @@ -213,8 +213,8 @@ message JobSpec { // checked against the backoffLimit. This field cannot be used in combination // with restartPolicy=OnFailure. // - // This field is alpha-level. To use this field, you must enable the - // `JobPodFailurePolicy` feature gate (disabled by default). + // This field is beta-level. It can be used when the `JobPodFailurePolicy` + // feature gate is enabled (enabled by default). // +optional optional PodFailurePolicy podFailurePolicy = 11; diff --git a/cluster-autoscaler/vendor/k8s.io/api/batch/v1/types.go b/cluster-autoscaler/vendor/k8s.io/api/batch/v1/types.go index 346676b09515..22cf9ee9cb61 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/batch/v1/types.go +++ b/cluster-autoscaler/vendor/k8s.io/api/batch/v1/types.go @@ -252,8 +252,8 @@ type JobSpec struct { // checked against the backoffLimit. This field cannot be used in combination // with restartPolicy=OnFailure. // - // This field is alpha-level. To use this field, you must enable the - // `JobPodFailurePolicy` feature gate (disabled by default). + // This field is beta-level. It can be used when the `JobPodFailurePolicy` + // feature gate is enabled (enabled by default). // +optional PodFailurePolicy *PodFailurePolicy `json:"podFailurePolicy,omitempty" protobuf:"bytes,11,opt,name=podFailurePolicy"` diff --git a/cluster-autoscaler/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go index 1f28f006cc7a..f6f3141f1897 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go @@ -115,7 +115,7 @@ var map_JobSpec = map[string]string{ "parallelism": "Specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", "completions": "Specifies the desired number of successfully finished pods the job should be run with. Setting to null means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", "activeDeadlineSeconds": "Specifies the duration in seconds relative to the startTime that the job may be continuously active before the system tries to terminate it; value must be positive integer. If a Job is suspended (at creation or through an update), this timer will effectively be stopped and reset when the Job is resumed again.", - "podFailurePolicy": "Specifies the policy of handling failed pods. In particular, it allows to specify the set of actions and conditions which need to be satisfied to take the associated action. If empty, the default behaviour applies - the counter of failed pods, represented by the jobs's .status.failed field, is incremented and it is checked against the backoffLimit. This field cannot be used in combination with restartPolicy=OnFailure.\n\nThis field is alpha-level. To use this field, you must enable the `JobPodFailurePolicy` feature gate (disabled by default).", + "podFailurePolicy": "Specifies the policy of handling failed pods. In particular, it allows to specify the set of actions and conditions which need to be satisfied to take the associated action. If empty, the default behaviour applies - the counter of failed pods, represented by the jobs's .status.failed field, is incremented and it is checked against the backoffLimit. This field cannot be used in combination with restartPolicy=OnFailure.\n\nThis field is beta-level. It can be used when the `JobPodFailurePolicy` feature gate is enabled (enabled by default).", "backoffLimit": "Specifies the number of retries before marking this job failed. Defaults to 6", "selector": "A label query over pods that should match the pod count. Normally, the system sets this field for you. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", "manualSelector": "manualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector", diff --git a/cluster-autoscaler/vendor/k8s.io/api/core/v1/annotation_key_constants.go b/cluster-autoscaler/vendor/k8s.io/api/core/v1/annotation_key_constants.go index 61f86f850a3b..106ba14c3df1 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/core/v1/annotation_key_constants.go +++ b/cluster-autoscaler/vendor/k8s.io/api/core/v1/annotation_key_constants.go @@ -56,9 +56,9 @@ const ( // AppArmorBetaContainerAnnotationKeyPrefix is the prefix to an annotation key specifying a container's apparmor profile. AppArmorBetaContainerAnnotationKeyPrefix = "container.apparmor.security.beta.kubernetes.io/" - // AppArmorBetaDefaultProfileAnnotatoinKey is the annotation key specifying the default AppArmor profile. + // AppArmorBetaDefaultProfileAnnotationKey is the annotation key specifying the default AppArmor profile. AppArmorBetaDefaultProfileAnnotationKey = "apparmor.security.beta.kubernetes.io/defaultProfileName" - // AppArmorBetaAllowedProfileAnnotationKey is the annotation key specifying the allowed AppArmor profiles. + // AppArmorBetaAllowedProfilesAnnotationKey is the annotation key specifying the allowed AppArmor profiles. AppArmorBetaAllowedProfilesAnnotationKey = "apparmor.security.beta.kubernetes.io/allowedProfileNames" // AppArmorBetaProfileRuntimeDefault is the profile specifying the runtime default. @@ -78,7 +78,7 @@ const ( // in the Annotations of a Node. PreferAvoidPodsAnnotationKey string = "scheduler.alpha.kubernetes.io/preferAvoidPods" - // ObjectTTLAnnotations represents a suggestion for kubelet for how long it can cache + // ObjectTTLAnnotationKey represents a suggestion for kubelet for how long it can cache // an object (e.g. secret, config map) before fetching it again from apiserver. // This annotation can be attached to node. ObjectTTLAnnotationKey string = "node.alpha.kubernetes.io/ttl" diff --git a/cluster-autoscaler/vendor/k8s.io/api/core/v1/generated.proto b/cluster-autoscaler/vendor/k8s.io/api/core/v1/generated.proto index 94e0a71156cc..7913bbc05568 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/core/v1/generated.proto +++ b/cluster-autoscaler/vendor/k8s.io/api/core/v1/generated.proto @@ -1853,7 +1853,8 @@ message HTTPGetAction { // HTTPHeader describes a custom header to be used in HTTP probes message HTTPHeader { - // The header field name + // The header field name. + // This will be canonicalized upon output, so case-variant names will be understood as the same header. optional string name = 1; // The header field value @@ -4752,7 +4753,7 @@ message SeccompProfile { // localhostProfile indicates a profile defined in a file on the node should be used. // The profile must be preconfigured on the node to work. // Must be a descending path, relative to the kubelet's configured seccomp profile location. - // Must only be set if type is "Localhost". + // Must be set if type is "Localhost". Must NOT be set for any other type. // +optional optional string localhostProfile = 2; } @@ -5274,10 +5275,9 @@ message ServiceSpec { // This feature depends on whether the underlying cloud-provider supports specifying // the loadBalancerIP when a load balancer is created. // This field will be ignored if the cloud-provider does not support the feature. - // Deprecated: This field was under-specified and its meaning varies across implementations, - // and it cannot support dual-stack. - // As of Kubernetes v1.24, users are encouraged to use implementation-specific annotations when available. - // This field may be removed in a future API version. + // Deprecated: This field was under-specified and its meaning varies across implementations. + // Using it is non-portable and it may not support dual-stack. + // Users are encouraged to use implementation-specific annotations when available. // +optional optional string loadBalancerIP = 8; @@ -6052,12 +6052,9 @@ message WindowsSecurityContextOptions { optional string runAsUserName = 3; // HostProcess determines if a container should be run as a 'Host Process' container. - // This field is alpha-level and will only be honored by components that enable the - // WindowsHostProcessContainers feature flag. Setting this field without the feature - // flag will result in errors when validating the Pod. All of a Pod's containers must - // have the same effective HostProcess value (it is not allowed to have a mix of HostProcess - // containers and non-HostProcess containers). In addition, if HostProcess is true - // then HostNetwork must also be set to true. + // All of a Pod's containers must have the same effective HostProcess value + // (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + // In addition, if HostProcess is true then HostNetwork must also be set to true. // +optional optional bool hostProcess = 4; } diff --git a/cluster-autoscaler/vendor/k8s.io/api/core/v1/types.go b/cluster-autoscaler/vendor/k8s.io/api/core/v1/types.go index c9bb18a2cc77..0f699d25ca67 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/core/v1/types.go +++ b/cluster-autoscaler/vendor/k8s.io/api/core/v1/types.go @@ -2137,7 +2137,8 @@ type SecretEnvSource struct { // HTTPHeader describes a custom header to be used in HTTP probes type HTTPHeader struct { - // The header field name + // The header field name. + // This will be canonicalized upon output, so case-variant names will be understood as the same header. Name string `json:"name" protobuf:"bytes,1,opt,name=name"` // The header field value Value string `json:"value" protobuf:"bytes,2,opt,name=value"` @@ -3837,7 +3838,7 @@ type SeccompProfile struct { // localhostProfile indicates a profile defined in a file on the node should be used. // The profile must be preconfigured on the node to work. // Must be a descending path, relative to the kubelet's configured seccomp profile location. - // Must only be set if type is "Localhost". + // Must be set if type is "Localhost". Must NOT be set for any other type. // +optional LocalhostProfile *string `json:"localhostProfile,omitempty" protobuf:"bytes,2,opt,name=localhostProfile"` } @@ -4713,10 +4714,9 @@ type ServiceSpec struct { // This feature depends on whether the underlying cloud-provider supports specifying // the loadBalancerIP when a load balancer is created. // This field will be ignored if the cloud-provider does not support the feature. - // Deprecated: This field was under-specified and its meaning varies across implementations, - // and it cannot support dual-stack. - // As of Kubernetes v1.24, users are encouraged to use implementation-specific annotations when available. - // This field may be removed in a future API version. + // Deprecated: This field was under-specified and its meaning varies across implementations. + // Using it is non-portable and it may not support dual-stack. + // Users are encouraged to use implementation-specific annotations when available. // +optional LoadBalancerIP string `json:"loadBalancerIP,omitempty" protobuf:"bytes,8,opt,name=loadBalancerIP"` @@ -6801,12 +6801,9 @@ type WindowsSecurityContextOptions struct { RunAsUserName *string `json:"runAsUserName,omitempty" protobuf:"bytes,3,opt,name=runAsUserName"` // HostProcess determines if a container should be run as a 'Host Process' container. - // This field is alpha-level and will only be honored by components that enable the - // WindowsHostProcessContainers feature flag. Setting this field without the feature - // flag will result in errors when validating the Pod. All of a Pod's containers must - // have the same effective HostProcess value (it is not allowed to have a mix of HostProcess - // containers and non-HostProcess containers). In addition, if HostProcess is true - // then HostNetwork must also be set to true. + // All of a Pod's containers must have the same effective HostProcess value + // (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + // In addition, if HostProcess is true then HostNetwork must also be set to true. // +optional HostProcess *bool `json:"hostProcess,omitempty" protobuf:"bytes,4,opt,name=hostProcess"` } diff --git a/cluster-autoscaler/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go index a2cf00db87a7..7751e79ee4cf 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go @@ -832,7 +832,7 @@ func (HTTPGetAction) SwaggerDoc() map[string]string { var map_HTTPHeader = map[string]string{ "": "HTTPHeader describes a custom header to be used in HTTP probes", - "name": "The header field name", + "name": "The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header.", "value": "The header field value", } @@ -2134,7 +2134,7 @@ func (ScopedResourceSelectorRequirement) SwaggerDoc() map[string]string { var map_SeccompProfile = map[string]string{ "": "SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set.", "type": "type indicates which kind of seccomp profile will be applied. Valid options are:\n\nLocalhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied.", - "localhostProfile": "localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is \"Localhost\".", + "localhostProfile": "localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is \"Localhost\". Must NOT be set for any other type.", } func (SeccompProfile) SwaggerDoc() map[string]string { @@ -2329,7 +2329,7 @@ var map_ServiceSpec = map[string]string{ "type": "type determines how the Service is exposed. Defaults to ClusterIP. Valid options are ExternalName, ClusterIP, NodePort, and LoadBalancer. \"ClusterIP\" allocates a cluster-internal IP address for load-balancing to endpoints. Endpoints are determined by the selector or if that is not specified, by manual construction of an Endpoints object or EndpointSlice objects. If clusterIP is \"None\", no virtual IP is allocated and the endpoints are published as a set of endpoints rather than a virtual IP. \"NodePort\" builds on ClusterIP and allocates a port on every node which routes to the same endpoints as the clusterIP. \"LoadBalancer\" builds on NodePort and creates an external load-balancer (if supported in the current cloud) which routes to the same endpoints as the clusterIP. \"ExternalName\" aliases this service to the specified externalName. Several other fields do not apply to ExternalName services. More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types", "externalIPs": "externalIPs is a list of IP addresses for which nodes in the cluster will also accept traffic for this service. These IPs are not managed by Kubernetes. The user is responsible for ensuring that traffic arrives at a node with this IP. A common example is external load-balancers that are not part of the Kubernetes system.", "sessionAffinity": "Supports \"ClientIP\" and \"None\". Used to maintain session affinity. Enable client IP based session affinity. Must be ClientIP or None. Defaults to None. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", - "loadBalancerIP": "Only applies to Service Type: LoadBalancer. This feature depends on whether the underlying cloud-provider supports specifying the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature. Deprecated: This field was under-specified and its meaning varies across implementations, and it cannot support dual-stack. As of Kubernetes v1.24, users are encouraged to use implementation-specific annotations when available. This field may be removed in a future API version.", + "loadBalancerIP": "Only applies to Service Type: LoadBalancer. This feature depends on whether the underlying cloud-provider supports specifying the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature. Deprecated: This field was under-specified and its meaning varies across implementations. Using it is non-portable and it may not support dual-stack. Users are encouraged to use implementation-specific annotations when available.", "loadBalancerSourceRanges": "If specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer will be restricted to the specified client IPs. This field will be ignored if the cloud-provider does not support the feature.\" More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/", "externalName": "externalName is the external reference that discovery mechanisms will return as an alias for this service (e.g. a DNS CNAME record). No proxying will be involved. Must be a lowercase RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) and requires `type` to be \"ExternalName\".", "externalTrafficPolicy": "externalTrafficPolicy describes how nodes distribute service traffic they receive on one of the Service's \"externally-facing\" addresses (NodePorts, ExternalIPs, and LoadBalancer IPs). If set to \"Local\", the proxy will configure the service in a way that assumes that external load balancers will take care of balancing the service traffic between nodes, and so each node will deliver traffic only to the node-local endpoints of the service, without masquerading the client source IP. (Traffic mistakenly sent to a node with no endpoints will be dropped.) The default value, \"Cluster\", uses the standard behavior of routing to all endpoints evenly (possibly modified by topology and other features). Note that traffic sent to an External IP or LoadBalancer IP from within the cluster will always get \"Cluster\" semantics, but clients sending to a NodePort from within the cluster may need to take traffic policy into account when picking a node.", @@ -2612,7 +2612,7 @@ var map_WindowsSecurityContextOptions = map[string]string{ "gmsaCredentialSpecName": "GMSACredentialSpecName is the name of the GMSA credential spec to use.", "gmsaCredentialSpec": "GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field.", "runAsUserName": "The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", - "hostProcess": "HostProcess determines if a container should be run as a 'Host Process' container. This field is alpha-level and will only be honored by components that enable the WindowsHostProcessContainers feature flag. Setting this field without the feature flag will result in errors when validating the Pod. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true.", + "hostProcess": "HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true.", } func (WindowsSecurityContextOptions) SwaggerDoc() map[string]string { diff --git a/cluster-autoscaler/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go b/cluster-autoscaler/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go index 863ebbc4a722..d967e3810683 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go +++ b/cluster-autoscaler/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go @@ -1001,38 +1001,10 @@ func (m *NetworkPolicySpec) XXX_DiscardUnknown() { var xxx_messageInfo_NetworkPolicySpec proto.InternalMessageInfo -func (m *NetworkPolicyStatus) Reset() { *m = NetworkPolicyStatus{} } -func (*NetworkPolicyStatus) ProtoMessage() {} -func (*NetworkPolicyStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{34} -} -func (m *NetworkPolicyStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NetworkPolicyStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *NetworkPolicyStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_NetworkPolicyStatus.Merge(m, src) -} -func (m *NetworkPolicyStatus) XXX_Size() int { - return m.Size() -} -func (m *NetworkPolicyStatus) XXX_DiscardUnknown() { - xxx_messageInfo_NetworkPolicyStatus.DiscardUnknown(m) -} - -var xxx_messageInfo_NetworkPolicyStatus proto.InternalMessageInfo - func (m *ReplicaSet) Reset() { *m = ReplicaSet{} } func (*ReplicaSet) ProtoMessage() {} func (*ReplicaSet) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{35} + return fileDescriptor_cdc93917efc28165, []int{34} } func (m *ReplicaSet) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1060,7 +1032,7 @@ var xxx_messageInfo_ReplicaSet proto.InternalMessageInfo func (m *ReplicaSetCondition) Reset() { *m = ReplicaSetCondition{} } func (*ReplicaSetCondition) ProtoMessage() {} func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{36} + return fileDescriptor_cdc93917efc28165, []int{35} } func (m *ReplicaSetCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1088,7 +1060,7 @@ var xxx_messageInfo_ReplicaSetCondition proto.InternalMessageInfo func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} } func (*ReplicaSetList) ProtoMessage() {} func (*ReplicaSetList) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{37} + return fileDescriptor_cdc93917efc28165, []int{36} } func (m *ReplicaSetList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1116,7 +1088,7 @@ var xxx_messageInfo_ReplicaSetList proto.InternalMessageInfo func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} } func (*ReplicaSetSpec) ProtoMessage() {} func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{38} + return fileDescriptor_cdc93917efc28165, []int{37} } func (m *ReplicaSetSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1144,7 +1116,7 @@ var xxx_messageInfo_ReplicaSetSpec proto.InternalMessageInfo func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} } func (*ReplicaSetStatus) ProtoMessage() {} func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{39} + return fileDescriptor_cdc93917efc28165, []int{38} } func (m *ReplicaSetStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1172,7 +1144,7 @@ var xxx_messageInfo_ReplicaSetStatus proto.InternalMessageInfo func (m *RollbackConfig) Reset() { *m = RollbackConfig{} } func (*RollbackConfig) ProtoMessage() {} func (*RollbackConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{40} + return fileDescriptor_cdc93917efc28165, []int{39} } func (m *RollbackConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1200,7 +1172,7 @@ var xxx_messageInfo_RollbackConfig proto.InternalMessageInfo func (m *RollingUpdateDaemonSet) Reset() { *m = RollingUpdateDaemonSet{} } func (*RollingUpdateDaemonSet) ProtoMessage() {} func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{41} + return fileDescriptor_cdc93917efc28165, []int{40} } func (m *RollingUpdateDaemonSet) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1228,7 +1200,7 @@ var xxx_messageInfo_RollingUpdateDaemonSet proto.InternalMessageInfo func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} } func (*RollingUpdateDeployment) ProtoMessage() {} func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{42} + return fileDescriptor_cdc93917efc28165, []int{41} } func (m *RollingUpdateDeployment) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1256,7 +1228,7 @@ var xxx_messageInfo_RollingUpdateDeployment proto.InternalMessageInfo func (m *Scale) Reset() { *m = Scale{} } func (*Scale) ProtoMessage() {} func (*Scale) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{43} + return fileDescriptor_cdc93917efc28165, []int{42} } func (m *Scale) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1284,7 +1256,7 @@ var xxx_messageInfo_Scale proto.InternalMessageInfo func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } func (*ScaleSpec) ProtoMessage() {} func (*ScaleSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{44} + return fileDescriptor_cdc93917efc28165, []int{43} } func (m *ScaleSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1312,7 +1284,7 @@ var xxx_messageInfo_ScaleSpec proto.InternalMessageInfo func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } func (*ScaleStatus) ProtoMessage() {} func (*ScaleStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{45} + return fileDescriptor_cdc93917efc28165, []int{44} } func (m *ScaleStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1373,7 +1345,6 @@ func init() { proto.RegisterType((*NetworkPolicyPeer)(nil), "k8s.io.api.extensions.v1beta1.NetworkPolicyPeer") proto.RegisterType((*NetworkPolicyPort)(nil), "k8s.io.api.extensions.v1beta1.NetworkPolicyPort") proto.RegisterType((*NetworkPolicySpec)(nil), "k8s.io.api.extensions.v1beta1.NetworkPolicySpec") - proto.RegisterType((*NetworkPolicyStatus)(nil), "k8s.io.api.extensions.v1beta1.NetworkPolicyStatus") proto.RegisterType((*ReplicaSet)(nil), "k8s.io.api.extensions.v1beta1.ReplicaSet") proto.RegisterType((*ReplicaSetCondition)(nil), "k8s.io.api.extensions.v1beta1.ReplicaSetCondition") proto.RegisterType((*ReplicaSetList)(nil), "k8s.io.api.extensions.v1beta1.ReplicaSetList") @@ -1393,188 +1364,186 @@ func init() { } var fileDescriptor_cdc93917efc28165 = []byte{ - // 2890 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5b, 0xcf, 0x6f, 0x24, 0x47, - 0xf5, 0xdf, 0x9e, 0xf1, 0xd8, 0xe3, 0xe7, 0xb5, 0xbd, 0x5b, 0xeb, 0xac, 0x1d, 0xef, 0x37, 0x76, - 0xd4, 0x5f, 0x11, 0x36, 0x61, 0x33, 0xc3, 0x6e, 0x92, 0x25, 0x3f, 0xa4, 0x84, 0x1d, 0xef, 0x26, - 0xeb, 0xc4, 0x1e, 0x4f, 0x6a, 0xc6, 0x09, 0x8a, 0x08, 0xd0, 0xee, 0x29, 0x8f, 0x3b, 0xee, 0xe9, - 0x1e, 0x75, 0xd7, 0x98, 0x35, 0x27, 0x10, 0x5c, 0x72, 0x82, 0x4b, 0x20, 0x47, 0x10, 0x12, 0x57, - 0xae, 0x1c, 0x42, 0x04, 0x22, 0x48, 0x2b, 0xc4, 0x21, 0x12, 0x07, 0x72, 0xb2, 0x88, 0x73, 0x42, - 0xfc, 0x03, 0x68, 0x4f, 0xa8, 0x7e, 0x74, 0xf5, 0x6f, 0xbb, 0xc7, 0x38, 0x16, 0x41, 0x9c, 0x3c, - 0x5d, 0xef, 0xbd, 0x4f, 0xbd, 0xaa, 0x7a, 0xf5, 0xde, 0xa7, 0xba, 0xda, 0xf0, 0xf2, 0xee, 0xb3, - 0x7e, 0xcd, 0x72, 0xeb, 0xbb, 0xc3, 0x2d, 0xe2, 0x39, 0x84, 0x12, 0xbf, 0xbe, 0x47, 0x9c, 0xae, - 0xeb, 0xd5, 0xa5, 0xc0, 0x18, 0x58, 0x75, 0x72, 0x8f, 0x12, 0xc7, 0xb7, 0x5c, 0xc7, 0xaf, 0xef, - 0x5d, 0xdf, 0x22, 0xd4, 0xb8, 0x5e, 0xef, 0x11, 0x87, 0x78, 0x06, 0x25, 0xdd, 0xda, 0xc0, 0x73, - 0xa9, 0x8b, 0x1e, 0x11, 0xea, 0x35, 0x63, 0x60, 0xd5, 0x42, 0xf5, 0x9a, 0x54, 0x5f, 0x7c, 0xb2, - 0x67, 0xd1, 0x9d, 0xe1, 0x56, 0xcd, 0x74, 0xfb, 0xf5, 0x9e, 0xdb, 0x73, 0xeb, 0xdc, 0x6a, 0x6b, - 0xb8, 0xcd, 0x9f, 0xf8, 0x03, 0xff, 0x25, 0xd0, 0x16, 0xf5, 0x48, 0xe7, 0xa6, 0xeb, 0x91, 0xfa, - 0x5e, 0xaa, 0xc7, 0xc5, 0xa7, 0x43, 0x9d, 0xbe, 0x61, 0xee, 0x58, 0x0e, 0xf1, 0xf6, 0xeb, 0x83, - 0xdd, 0x1e, 0x6b, 0xf0, 0xeb, 0x7d, 0x42, 0x8d, 0x2c, 0xab, 0x7a, 0x9e, 0x95, 0x37, 0x74, 0xa8, - 0xd5, 0x27, 0x29, 0x83, 0x9b, 0xc7, 0x19, 0xf8, 0xe6, 0x0e, 0xe9, 0x1b, 0x29, 0xbb, 0xa7, 0xf2, - 0xec, 0x86, 0xd4, 0xb2, 0xeb, 0x96, 0x43, 0x7d, 0xea, 0x25, 0x8d, 0xf4, 0xf7, 0x4a, 0x30, 0x79, - 0xdb, 0x20, 0x7d, 0xd7, 0x69, 0x13, 0x8a, 0xbe, 0x03, 0x55, 0x36, 0x8c, 0xae, 0x41, 0x8d, 0x05, - 0xed, 0x51, 0xed, 0xea, 0xd4, 0x8d, 0xaf, 0xd6, 0xc2, 0x69, 0x56, 0xa8, 0xb5, 0xc1, 0x6e, 0x8f, - 0x35, 0xf8, 0x35, 0xa6, 0x5d, 0xdb, 0xbb, 0x5e, 0xdb, 0xd8, 0x7a, 0x87, 0x98, 0x74, 0x9d, 0x50, - 0xa3, 0x81, 0xee, 0x1f, 0x2c, 0x9f, 0x3b, 0x3c, 0x58, 0x86, 0xb0, 0x0d, 0x2b, 0x54, 0xd4, 0x84, - 0x31, 0x7f, 0x40, 0xcc, 0x85, 0x12, 0x47, 0xbf, 0x56, 0x3b, 0x72, 0x11, 0x6b, 0xca, 0xb3, 0xf6, - 0x80, 0x98, 0x8d, 0xf3, 0x12, 0x79, 0x8c, 0x3d, 0x61, 0x8e, 0x83, 0xde, 0x80, 0x71, 0x9f, 0x1a, - 0x74, 0xe8, 0x2f, 0x94, 0x39, 0x62, 0xad, 0x30, 0x22, 0xb7, 0x6a, 0xcc, 0x48, 0xcc, 0x71, 0xf1, - 0x8c, 0x25, 0x9a, 0xfe, 0xf7, 0x12, 0x20, 0xa5, 0xbb, 0xe2, 0x3a, 0x5d, 0x8b, 0x5a, 0xae, 0x83, - 0x9e, 0x87, 0x31, 0xba, 0x3f, 0x20, 0x7c, 0x72, 0x26, 0x1b, 0x8f, 0x05, 0x0e, 0x75, 0xf6, 0x07, - 0xe4, 0xc1, 0xc1, 0xf2, 0xe5, 0xb4, 0x05, 0x93, 0x60, 0x6e, 0x83, 0xd6, 0x94, 0xab, 0x25, 0x6e, - 0xfd, 0x74, 0xbc, 0xeb, 0x07, 0x07, 0xcb, 0x19, 0x41, 0x58, 0x53, 0x48, 0x71, 0x07, 0xd1, 0x1e, - 0x20, 0xdb, 0xf0, 0x69, 0xc7, 0x33, 0x1c, 0x5f, 0xf4, 0x64, 0xf5, 0x89, 0x9c, 0x84, 0x27, 0x8a, - 0x2d, 0x1a, 0xb3, 0x68, 0x2c, 0x4a, 0x2f, 0xd0, 0x5a, 0x0a, 0x0d, 0x67, 0xf4, 0x80, 0x1e, 0x83, - 0x71, 0x8f, 0x18, 0xbe, 0xeb, 0x2c, 0x8c, 0xf1, 0x51, 0xa8, 0x09, 0xc4, 0xbc, 0x15, 0x4b, 0x29, - 0x7a, 0x1c, 0x26, 0xfa, 0xc4, 0xf7, 0x8d, 0x1e, 0x59, 0xa8, 0x70, 0xc5, 0x59, 0xa9, 0x38, 0xb1, - 0x2e, 0x9a, 0x71, 0x20, 0xd7, 0x3f, 0xd0, 0x60, 0x5a, 0xcd, 0xdc, 0x9a, 0xe5, 0x53, 0xf4, 0xcd, - 0x54, 0x1c, 0xd6, 0x8a, 0x0d, 0x89, 0x59, 0xf3, 0x28, 0xbc, 0x20, 0x7b, 0xab, 0x06, 0x2d, 0x91, - 0x18, 0x5c, 0x87, 0x8a, 0x45, 0x49, 0x9f, 0xad, 0x43, 0xf9, 0xea, 0xd4, 0x8d, 0xab, 0x45, 0x43, - 0xa6, 0x31, 0x2d, 0x41, 0x2b, 0xab, 0xcc, 0x1c, 0x0b, 0x14, 0xfd, 0xa7, 0x63, 0x11, 0xf7, 0x59, - 0x68, 0xa2, 0xb7, 0xa1, 0xea, 0x13, 0x9b, 0x98, 0xd4, 0xf5, 0xa4, 0xfb, 0x4f, 0x15, 0x74, 0xdf, - 0xd8, 0x22, 0x76, 0x5b, 0x9a, 0x36, 0xce, 0x33, 0xff, 0x83, 0x27, 0xac, 0x20, 0xd1, 0xeb, 0x50, - 0xa5, 0xa4, 0x3f, 0xb0, 0x0d, 0x4a, 0xe4, 0x3e, 0xfa, 0xff, 0xe8, 0x10, 0x58, 0xe4, 0x30, 0xb0, - 0x96, 0xdb, 0xed, 0x48, 0x35, 0xbe, 0x7d, 0xd4, 0x94, 0x04, 0xad, 0x58, 0xc1, 0xa0, 0x3d, 0x98, - 0x19, 0x0e, 0xba, 0x4c, 0x93, 0xb2, 0xec, 0xd0, 0xdb, 0x97, 0x91, 0x74, 0xb3, 0xe8, 0xdc, 0x6c, - 0xc6, 0xac, 0x1b, 0x97, 0x65, 0x5f, 0x33, 0xf1, 0x76, 0x9c, 0xe8, 0x05, 0xdd, 0x82, 0xd9, 0xbe, - 0xe5, 0x60, 0x62, 0x74, 0xf7, 0xdb, 0xc4, 0x74, 0x9d, 0xae, 0xcf, 0xc3, 0xaa, 0xd2, 0x98, 0x97, - 0x00, 0xb3, 0xeb, 0x71, 0x31, 0x4e, 0xea, 0xa3, 0x57, 0x01, 0x05, 0xc3, 0x78, 0x45, 0x24, 0x37, - 0xcb, 0x75, 0x78, 0xcc, 0x95, 0xc3, 0xe0, 0xee, 0xa4, 0x34, 0x70, 0x86, 0x15, 0x5a, 0x83, 0x39, - 0x8f, 0xec, 0x59, 0x6c, 0x8c, 0x77, 0x2d, 0x9f, 0xba, 0xde, 0xfe, 0x9a, 0xd5, 0xb7, 0xe8, 0xc2, - 0x38, 0xf7, 0x69, 0xe1, 0xf0, 0x60, 0x79, 0x0e, 0x67, 0xc8, 0x71, 0xa6, 0x95, 0xfe, 0xb3, 0x71, - 0x98, 0x4d, 0xe4, 0x1b, 0xf4, 0x06, 0x5c, 0x36, 0x87, 0x9e, 0x47, 0x1c, 0xda, 0x1c, 0xf6, 0xb7, - 0x88, 0xd7, 0x36, 0x77, 0x48, 0x77, 0x68, 0x93, 0x2e, 0x0f, 0x94, 0x4a, 0x63, 0x49, 0x7a, 0x7c, - 0x79, 0x25, 0x53, 0x0b, 0xe7, 0x58, 0xb3, 0x59, 0x70, 0x78, 0xd3, 0xba, 0xe5, 0xfb, 0x0a, 0xb3, - 0xc4, 0x31, 0xd5, 0x2c, 0x34, 0x53, 0x1a, 0x38, 0xc3, 0x8a, 0xf9, 0xd8, 0x25, 0xbe, 0xe5, 0x91, - 0x6e, 0xd2, 0xc7, 0x72, 0xdc, 0xc7, 0xdb, 0x99, 0x5a, 0x38, 0xc7, 0x1a, 0x3d, 0x03, 0x53, 0xa2, - 0x37, 0xbe, 0x7e, 0x72, 0xa1, 0x2f, 0x49, 0xb0, 0xa9, 0x66, 0x28, 0xc2, 0x51, 0x3d, 0x36, 0x34, - 0x77, 0xcb, 0x27, 0xde, 0x1e, 0xe9, 0xe6, 0x2f, 0xf0, 0x46, 0x4a, 0x03, 0x67, 0x58, 0xb1, 0xa1, - 0x89, 0x08, 0x4c, 0x0d, 0x6d, 0x3c, 0x3e, 0xb4, 0xcd, 0x4c, 0x2d, 0x9c, 0x63, 0xcd, 0xe2, 0x58, - 0xb8, 0x7c, 0x6b, 0xcf, 0xb0, 0x6c, 0x63, 0xcb, 0x26, 0x0b, 0x13, 0xf1, 0x38, 0x6e, 0xc6, 0xc5, - 0x38, 0xa9, 0x8f, 0x5e, 0x81, 0x8b, 0xa2, 0x69, 0xd3, 0x31, 0x14, 0x48, 0x95, 0x83, 0x3c, 0x2c, - 0x41, 0x2e, 0x36, 0x93, 0x0a, 0x38, 0x6d, 0x83, 0x9e, 0x87, 0x19, 0xd3, 0xb5, 0x6d, 0x1e, 0x8f, - 0x2b, 0xee, 0xd0, 0xa1, 0x0b, 0x93, 0x1c, 0x05, 0xb1, 0xfd, 0xb8, 0x12, 0x93, 0xe0, 0x84, 0x26, - 0x22, 0x00, 0x66, 0x50, 0x70, 0xfc, 0x05, 0xe0, 0xf9, 0xf1, 0x7a, 0xd1, 0x1c, 0xa0, 0x4a, 0x55, - 0xc8, 0x01, 0x54, 0x93, 0x8f, 0x23, 0xc0, 0xfa, 0x9f, 0x34, 0x98, 0xcf, 0x49, 0x1d, 0xe8, 0xa5, - 0x58, 0x89, 0xfd, 0x4a, 0xa2, 0xc4, 0x5e, 0xc9, 0x31, 0x8b, 0xd4, 0x59, 0x07, 0xa6, 0x3d, 0x36, - 0x2a, 0xa7, 0x27, 0x54, 0x64, 0x8e, 0x7c, 0xe6, 0x98, 0x61, 0xe0, 0xa8, 0x4d, 0x98, 0xf3, 0x2f, - 0x1e, 0x1e, 0x2c, 0x4f, 0xc7, 0x64, 0x38, 0x0e, 0xaf, 0xbf, 0x5f, 0x02, 0xb8, 0x4d, 0x06, 0xb6, - 0xbb, 0xdf, 0x27, 0xce, 0x59, 0x70, 0xa8, 0x8d, 0x18, 0x87, 0x7a, 0xf2, 0xb8, 0xe5, 0x51, 0xae, - 0xe5, 0x92, 0xa8, 0x37, 0x13, 0x24, 0xaa, 0x5e, 0x1c, 0xf2, 0x68, 0x16, 0xf5, 0xd7, 0x32, 0x5c, - 0x0a, 0x95, 0x43, 0x1a, 0xf5, 0x42, 0x6c, 0x8d, 0xbf, 0x9c, 0x58, 0xe3, 0xf9, 0x0c, 0x93, 0xcf, - 0x8d, 0x47, 0xbd, 0x03, 0x33, 0x8c, 0xe5, 0x88, 0xb5, 0xe4, 0x1c, 0x6a, 0x7c, 0x64, 0x0e, 0xa5, - 0xaa, 0xdd, 0x5a, 0x0c, 0x09, 0x27, 0x90, 0x73, 0x38, 0xdb, 0xc4, 0x17, 0x91, 0xb3, 0x7d, 0xa8, - 0xc1, 0x4c, 0xb8, 0x4c, 0x67, 0x40, 0xda, 0x9a, 0x71, 0xd2, 0xf6, 0x78, 0xe1, 0x10, 0xcd, 0x61, - 0x6d, 0xff, 0x64, 0x04, 0x5f, 0x29, 0xb1, 0x0d, 0xbe, 0x65, 0x98, 0xbb, 0xe8, 0x51, 0x18, 0x73, - 0x8c, 0x7e, 0x10, 0x99, 0x6a, 0xb3, 0x34, 0x8d, 0x3e, 0xc1, 0x5c, 0x82, 0xde, 0xd3, 0x00, 0xc9, - 0x2a, 0x70, 0xcb, 0x71, 0x5c, 0x6a, 0x88, 0x5c, 0x29, 0xdc, 0x5a, 0x2d, 0xec, 0x56, 0xd0, 0x63, - 0x6d, 0x33, 0x85, 0x75, 0xc7, 0xa1, 0xde, 0x7e, 0xb8, 0xc8, 0x69, 0x05, 0x9c, 0xe1, 0x00, 0x32, - 0x00, 0x3c, 0x89, 0xd9, 0x71, 0xe5, 0x46, 0x7e, 0xb2, 0x40, 0xce, 0x63, 0x06, 0x2b, 0xae, 0xb3, - 0x6d, 0xf5, 0xc2, 0xb4, 0x83, 0x15, 0x10, 0x8e, 0x80, 0x2e, 0xde, 0x81, 0xf9, 0x1c, 0x6f, 0xd1, - 0x05, 0x28, 0xef, 0x92, 0x7d, 0x31, 0x6d, 0x98, 0xfd, 0x44, 0x73, 0x50, 0xd9, 0x33, 0xec, 0xa1, - 0x48, 0xbf, 0x93, 0x58, 0x3c, 0x3c, 0x5f, 0x7a, 0x56, 0xd3, 0x3f, 0xa8, 0x44, 0x63, 0x87, 0x33, - 0xe6, 0xab, 0x50, 0xf5, 0xc8, 0xc0, 0xb6, 0x4c, 0xc3, 0x97, 0x44, 0x88, 0x93, 0x5f, 0x2c, 0xdb, - 0xb0, 0x92, 0xc6, 0xb8, 0x75, 0xe9, 0xf3, 0xe5, 0xd6, 0xe5, 0xd3, 0xe1, 0xd6, 0xdf, 0x86, 0xaa, - 0x1f, 0xb0, 0xea, 0x31, 0x0e, 0x79, 0x7d, 0x84, 0xfc, 0x2a, 0x09, 0xb5, 0xea, 0x40, 0x51, 0x69, - 0x05, 0x9a, 0x45, 0xa2, 0x2b, 0x23, 0x92, 0xe8, 0x53, 0x25, 0xbe, 0x2c, 0xdf, 0x0c, 0x8c, 0xa1, - 0x4f, 0xba, 0x3c, 0xb7, 0x55, 0xc3, 0x7c, 0xd3, 0xe2, 0xad, 0x58, 0x4a, 0xd1, 0xdb, 0xb1, 0x90, - 0xad, 0x9e, 0x24, 0x64, 0x67, 0xf2, 0xc3, 0x15, 0x6d, 0xc2, 0xfc, 0xc0, 0x73, 0x7b, 0x1e, 0xf1, - 0xfd, 0xdb, 0xc4, 0xe8, 0xda, 0x96, 0x43, 0x82, 0xf9, 0x11, 0x8c, 0xe8, 0xca, 0xe1, 0xc1, 0xf2, - 0x7c, 0x2b, 0x5b, 0x05, 0xe7, 0xd9, 0xea, 0xf7, 0xc7, 0xe0, 0x42, 0xb2, 0x02, 0xe6, 0x90, 0x54, - 0xed, 0x44, 0x24, 0xf5, 0x5a, 0x64, 0x33, 0x08, 0x06, 0xaf, 0x56, 0x3f, 0x63, 0x43, 0xdc, 0x82, - 0x59, 0x99, 0x0d, 0x02, 0xa1, 0xa4, 0xe9, 0x6a, 0xf5, 0x37, 0xe3, 0x62, 0x9c, 0xd4, 0x47, 0x2f, - 0xc0, 0xb4, 0xc7, 0x79, 0x77, 0x00, 0x20, 0xb8, 0xeb, 0x43, 0x12, 0x60, 0x1a, 0x47, 0x85, 0x38, - 0xae, 0xcb, 0x78, 0x6b, 0x48, 0x47, 0x03, 0x80, 0xb1, 0x38, 0x6f, 0xbd, 0x95, 0x54, 0xc0, 0x69, - 0x1b, 0xb4, 0x0e, 0x97, 0x86, 0x4e, 0x1a, 0x4a, 0x84, 0xf2, 0x15, 0x09, 0x75, 0x69, 0x33, 0xad, - 0x82, 0xb3, 0xec, 0xd0, 0x76, 0x8c, 0xca, 0x8e, 0xf3, 0xf4, 0x7c, 0xa3, 0xf0, 0xc6, 0x2b, 0xcc, - 0x65, 0x33, 0xe8, 0x76, 0xb5, 0x28, 0xdd, 0xd6, 0x7f, 0xaf, 0x45, 0x8b, 0x90, 0xa2, 0xc0, 0xc7, - 0xbd, 0x65, 0x4a, 0x59, 0x44, 0xd8, 0x91, 0x9b, 0xcd, 0x7e, 0x6f, 0x8e, 0xc4, 0x7e, 0xc3, 0xe2, - 0x79, 0x3c, 0xfd, 0xfd, 0x83, 0x06, 0xb3, 0x77, 0x3b, 0x9d, 0xd6, 0xaa, 0xc3, 0x77, 0x4b, 0xcb, - 0xa0, 0x3b, 0xac, 0x8a, 0x0e, 0x0c, 0xba, 0x93, 0xac, 0xa2, 0x4c, 0x86, 0xb9, 0x04, 0x3d, 0x0d, - 0x55, 0xf6, 0x97, 0x39, 0xce, 0xc3, 0x75, 0x92, 0x27, 0x99, 0x6a, 0x4b, 0xb6, 0x3d, 0x88, 0xfc, - 0xc6, 0x4a, 0x13, 0x7d, 0x03, 0x26, 0xd8, 0xde, 0x26, 0x4e, 0xb7, 0x20, 0xf9, 0x95, 0x4e, 0x35, - 0x84, 0x51, 0xc8, 0x67, 0x64, 0x03, 0x0e, 0xe0, 0xf4, 0x5d, 0x98, 0x8b, 0x0c, 0x02, 0x0f, 0x6d, - 0xf2, 0x06, 0xab, 0x57, 0xa8, 0x0d, 0x15, 0xd6, 0x3b, 0xab, 0x4a, 0xe5, 0x02, 0xaf, 0x17, 0x13, - 0x13, 0x11, 0x72, 0x0f, 0xf6, 0xe4, 0x63, 0x81, 0xa5, 0x6f, 0xc0, 0xc4, 0x6a, 0xab, 0x61, 0xbb, - 0x82, 0x6f, 0x98, 0x56, 0xd7, 0x4b, 0xce, 0xd4, 0xca, 0xea, 0x6d, 0x8c, 0xb9, 0x04, 0xe9, 0x30, - 0x4e, 0xee, 0x99, 0x64, 0x40, 0x39, 0xc5, 0x98, 0x6c, 0x00, 0x4b, 0xa4, 0x77, 0x78, 0x0b, 0x96, - 0x12, 0xfd, 0xc7, 0x25, 0x98, 0x90, 0xdd, 0x9e, 0xc1, 0xf9, 0x63, 0x2d, 0x76, 0xfe, 0x78, 0xa2, - 0xd8, 0x12, 0xe4, 0x1e, 0x3e, 0x3a, 0x89, 0xc3, 0xc7, 0xb5, 0x82, 0x78, 0x47, 0x9f, 0x3c, 0xde, - 0x2d, 0xc1, 0x4c, 0x7c, 0xf1, 0xd1, 0x33, 0x30, 0xc5, 0x52, 0xad, 0x65, 0x92, 0x66, 0xc8, 0xf0, - 0xd4, 0xeb, 0x87, 0x76, 0x28, 0xc2, 0x51, 0x3d, 0xd4, 0x53, 0x66, 0x2d, 0xd7, 0xa3, 0x72, 0xd0, - 0xf9, 0x53, 0x3a, 0xa4, 0x96, 0x5d, 0x13, 0x2f, 0xdb, 0x6b, 0xab, 0x0e, 0xdd, 0xf0, 0xda, 0xd4, - 0xb3, 0x9c, 0x5e, 0xaa, 0x23, 0x06, 0x86, 0xa3, 0xc8, 0xe8, 0x4d, 0x96, 0xf6, 0x7d, 0x77, 0xe8, - 0x99, 0x24, 0x8b, 0xbe, 0x05, 0xd4, 0x83, 0x6d, 0x84, 0xee, 0x9a, 0x6b, 0x1a, 0xb6, 0x58, 0x1c, - 0x4c, 0xb6, 0x89, 0x47, 0x1c, 0x93, 0x04, 0x94, 0x49, 0x40, 0x60, 0x05, 0xa6, 0xff, 0x46, 0x83, - 0x29, 0x39, 0x17, 0x67, 0x40, 0xd4, 0x5f, 0x8b, 0x13, 0xf5, 0xc7, 0x0a, 0xee, 0xd0, 0x6c, 0x96, - 0xfe, 0x5b, 0x0d, 0x16, 0x03, 0xd7, 0x5d, 0xa3, 0xdb, 0x30, 0x6c, 0xc3, 0x31, 0x89, 0x17, 0xc4, - 0xfa, 0x22, 0x94, 0xac, 0x81, 0x5c, 0x49, 0x90, 0x00, 0xa5, 0xd5, 0x16, 0x2e, 0x59, 0x03, 0x56, - 0x45, 0x77, 0x5c, 0x9f, 0x72, 0x36, 0x2f, 0x0e, 0x8a, 0xca, 0xeb, 0xbb, 0xb2, 0x1d, 0x2b, 0x0d, - 0xb4, 0x09, 0x95, 0x81, 0xeb, 0x51, 0x56, 0xb9, 0xca, 0x89, 0xf5, 0x3d, 0xc2, 0x6b, 0xb6, 0x6e, - 0x32, 0x10, 0xc3, 0x9d, 0xce, 0x60, 0xb0, 0x40, 0xd3, 0x7f, 0xa0, 0xc1, 0xc3, 0x19, 0xfe, 0x4b, - 0xd2, 0xd0, 0x85, 0x09, 0x4b, 0x08, 0x65, 0x7a, 0x79, 0xae, 0x58, 0xb7, 0x19, 0x53, 0x11, 0xa6, - 0xb6, 0x20, 0x85, 0x05, 0xd0, 0xfa, 0x2f, 0x35, 0xb8, 0x98, 0xf2, 0x97, 0xa7, 0x68, 0x16, 0xcf, - 0x92, 0x6d, 0xab, 0x14, 0xcd, 0xc2, 0x92, 0x4b, 0xd0, 0x6b, 0x50, 0xe5, 0x77, 0x44, 0xa6, 0x6b, - 0xcb, 0x09, 0xac, 0x07, 0x13, 0xd8, 0x92, 0xed, 0x0f, 0x0e, 0x96, 0xaf, 0x64, 0x9c, 0xb5, 0x03, - 0x31, 0x56, 0x00, 0x68, 0x19, 0x2a, 0xc4, 0xf3, 0x5c, 0x4f, 0x26, 0xfb, 0x49, 0x36, 0x53, 0x77, - 0x58, 0x03, 0x16, 0xed, 0xfa, 0xaf, 0xc2, 0x20, 0x65, 0xd9, 0x97, 0xf9, 0xc7, 0x16, 0x27, 0x99, - 0x18, 0xd9, 0xd2, 0x61, 0x2e, 0x41, 0x43, 0xb8, 0x60, 0x25, 0xd2, 0xb5, 0xdc, 0x9d, 0xf5, 0x62, - 0xd3, 0xa8, 0xcc, 0x1a, 0x0b, 0x12, 0xfe, 0x42, 0x52, 0x82, 0x53, 0x5d, 0xe8, 0x04, 0x52, 0x5a, - 0xe8, 0x75, 0x18, 0xdb, 0xa1, 0x74, 0x90, 0xf1, 0xb2, 0xff, 0x98, 0x22, 0x11, 0xba, 0x50, 0xe5, - 0xa3, 0xeb, 0x74, 0x5a, 0x98, 0x43, 0xe9, 0xbf, 0x2b, 0xa9, 0xf9, 0xe0, 0x27, 0xa4, 0xaf, 0xab, - 0xd1, 0xae, 0xd8, 0x86, 0xef, 0xf3, 0x14, 0x26, 0x4e, 0xf3, 0x73, 0x11, 0xc7, 0x95, 0x0c, 0xa7, - 0xb4, 0x51, 0x27, 0x2c, 0x9e, 0xda, 0x49, 0x8a, 0xe7, 0x54, 0x56, 0xe1, 0x44, 0x77, 0xa1, 0x4c, - 0xed, 0xa2, 0xa7, 0x72, 0x89, 0xd8, 0x59, 0x6b, 0x37, 0xa6, 0xe4, 0x94, 0x97, 0x3b, 0x6b, 0x6d, - 0xcc, 0x20, 0xd0, 0x06, 0x54, 0xbc, 0xa1, 0x4d, 0x58, 0x1d, 0x28, 0x17, 0xaf, 0x2b, 0x6c, 0x06, - 0xc3, 0xcd, 0xc7, 0x9e, 0x7c, 0x2c, 0x70, 0xf4, 0x1f, 0x6a, 0x30, 0x1d, 0xab, 0x16, 0xc8, 0x83, - 0xf3, 0x76, 0x64, 0xef, 0xc8, 0x79, 0x78, 0x76, 0xf4, 0x5d, 0x27, 0x37, 0xfd, 0x9c, 0xec, 0xf7, - 0x7c, 0x54, 0x86, 0x63, 0x7d, 0xe8, 0x06, 0x40, 0x38, 0x6c, 0xb6, 0x0f, 0x58, 0xf0, 0x8a, 0x0d, - 0x2f, 0xf7, 0x01, 0x8b, 0x69, 0x1f, 0x8b, 0x76, 0x74, 0x03, 0xc0, 0x27, 0xa6, 0x47, 0x68, 0x33, - 0x4c, 0x5c, 0xaa, 0x1c, 0xb7, 0x95, 0x04, 0x47, 0xb4, 0xf4, 0x5f, 0x94, 0x60, 0xba, 0x49, 0xe8, - 0x77, 0x5d, 0x6f, 0xb7, 0xe5, 0xda, 0x96, 0xb9, 0x7f, 0x06, 0x24, 0x00, 0xc7, 0x48, 0xc0, 0x71, - 0xf9, 0x32, 0xe6, 0x5d, 0x2e, 0x15, 0x78, 0x2b, 0x41, 0x05, 0x6e, 0x8c, 0x84, 0x7a, 0x34, 0x21, - 0xf8, 0x50, 0x83, 0xf9, 0x98, 0xfe, 0x9d, 0x30, 0xd7, 0xa8, 0xe4, 0xaf, 0x15, 0x4a, 0xfe, 0x31, - 0x18, 0x96, 0x30, 0xb3, 0x93, 0x3f, 0x5a, 0x83, 0x12, 0x75, 0xe5, 0xce, 0x18, 0x0d, 0x93, 0x10, - 0x2f, 0xac, 0x67, 0x1d, 0x17, 0x97, 0xa8, 0xab, 0xff, 0x51, 0x83, 0x85, 0x98, 0x56, 0x34, 0x5b, - 0x7e, 0x4e, 0x23, 0xc0, 0x30, 0xb6, 0xed, 0xb9, 0xfd, 0x13, 0x8f, 0x41, 0x2d, 0xf2, 0xcb, 0x9e, - 0xdb, 0xc7, 0x1c, 0x4b, 0xff, 0x48, 0x83, 0x8b, 0x31, 0xcd, 0x33, 0xe0, 0x24, 0xaf, 0xc7, 0x39, - 0xc9, 0xb5, 0x51, 0x06, 0x92, 0xc3, 0x4c, 0x3e, 0x2a, 0x25, 0x86, 0xc1, 0x06, 0x8c, 0xb6, 0x61, - 0x6a, 0xe0, 0x76, 0xdb, 0xa7, 0x70, 0xf9, 0x3b, 0xcb, 0xb8, 0x62, 0x2b, 0xc4, 0xc2, 0x51, 0x60, - 0x74, 0x0f, 0x2e, 0x32, 0xda, 0xe2, 0x0f, 0x0c, 0x93, 0xb4, 0x4f, 0xe1, 0x75, 0xd8, 0x43, 0xfc, - 0x76, 0x29, 0x89, 0x88, 0xd3, 0x9d, 0xa0, 0x75, 0x98, 0xb0, 0x06, 0xfc, 0xec, 0x22, 0x37, 0xe9, - 0xb1, 0x04, 0x4f, 0x9c, 0x74, 0x44, 0xf9, 0x90, 0x0f, 0x38, 0xc0, 0xd0, 0xff, 0x92, 0x8c, 0x06, - 0x4e, 0x85, 0x5f, 0x89, 0x50, 0x0f, 0x79, 0x0f, 0x74, 0x32, 0xda, 0xd1, 0x94, 0x2c, 0xe7, 0xa4, - 0xac, 0xbd, 0x9a, 0xe0, 0x44, 0x5f, 0x82, 0x09, 0xe2, 0x74, 0xf9, 0x41, 0x40, 0xbc, 0x64, 0xe1, - 0xa3, 0xba, 0x23, 0x9a, 0x70, 0x20, 0xd3, 0x7f, 0x54, 0x4e, 0x8c, 0x8a, 0x97, 0xf0, 0x77, 0x4e, - 0x2d, 0x38, 0xd4, 0x61, 0x22, 0x37, 0x40, 0xb6, 0x42, 0x6a, 0x29, 0x62, 0xfe, 0x6b, 0xa3, 0xc4, - 0x7c, 0xb4, 0xb6, 0xe6, 0x12, 0x4b, 0xf4, 0x2d, 0x18, 0x27, 0xa2, 0x0b, 0x51, 0xb1, 0x6f, 0x8e, - 0xd2, 0x45, 0x98, 0x7e, 0xc3, 0x94, 0x2d, 0xdb, 0x24, 0x2a, 0x7a, 0x89, 0xcd, 0x17, 0xd3, 0x65, - 0x47, 0x1e, 0xc1, 0xcc, 0x27, 0x1b, 0x8f, 0x88, 0x61, 0xab, 0xe6, 0x07, 0x07, 0xcb, 0x10, 0x3e, - 0xe2, 0xa8, 0x85, 0xfe, 0x3d, 0xb8, 0x94, 0x51, 0x22, 0x90, 0x19, 0x7b, 0x33, 0x24, 0x32, 0x66, - 0xbd, 0xd8, 0x32, 0x14, 0xbf, 0xe2, 0x7c, 0xbf, 0x04, 0x20, 0xdf, 0x45, 0x9d, 0xcd, 0x97, 0x55, - 0xa3, 0xdd, 0x0a, 0x86, 0xae, 0x9d, 0xda, 0xad, 0x60, 0x04, 0xf2, 0xe8, 0x52, 0xfc, 0x8f, 0x12, - 0x5c, 0x0a, 0x95, 0x0b, 0xdf, 0x0a, 0x66, 0x98, 0xfc, 0xef, 0xeb, 0xaa, 0x62, 0x37, 0x75, 0xe1, - 0xd4, 0xfd, 0xe7, 0xdd, 0xd4, 0x85, 0xbe, 0xe5, 0x54, 0xda, 0x5f, 0x97, 0xa2, 0x03, 0x18, 0xf1, - 0xba, 0xe8, 0x14, 0x3e, 0x30, 0xfa, 0xc2, 0xdd, 0x38, 0xe9, 0x7f, 0x2e, 0xc3, 0x85, 0xe4, 0x6e, - 0x8c, 0xdd, 0x2a, 0x68, 0xc7, 0xde, 0x2a, 0xb4, 0x60, 0x6e, 0x7b, 0x68, 0xdb, 0xfb, 0x7c, 0x0c, - 0x91, 0xab, 0x05, 0x71, 0x1f, 0xf1, 0x7f, 0xd2, 0x72, 0xee, 0xe5, 0x0c, 0x1d, 0x9c, 0x69, 0x99, - 0xbe, 0x64, 0x18, 0xfb, 0x77, 0x2f, 0x19, 0x2a, 0x27, 0xb8, 0x64, 0xc8, 0xbe, 0xa7, 0x29, 0x9f, - 0xe8, 0x9e, 0xe6, 0x24, 0x37, 0x0c, 0x19, 0x49, 0xec, 0xd8, 0x52, 0xf2, 0x22, 0xcc, 0xc4, 0x6f, - 0xbd, 0xc4, 0x5a, 0x8a, 0x8b, 0x37, 0x79, 0xc7, 0x14, 0x59, 0x4b, 0xd1, 0x8e, 0x95, 0x86, 0x7e, - 0xa8, 0xc1, 0xe5, 0xec, 0xaf, 0x5b, 0x90, 0x0d, 0x33, 0x7d, 0xe3, 0x5e, 0xf4, 0x8b, 0x23, 0xed, - 0x84, 0x4c, 0x89, 0x5f, 0x77, 0xac, 0xc7, 0xb0, 0x70, 0x02, 0x1b, 0xbd, 0x05, 0xd5, 0xbe, 0x71, - 0xaf, 0x3d, 0xf4, 0x7a, 0xe4, 0xc4, 0x8c, 0x8c, 0x6f, 0xa3, 0x75, 0x89, 0x82, 0x15, 0x9e, 0xfe, - 0x99, 0x06, 0xf3, 0x39, 0x97, 0x18, 0xff, 0x45, 0xa3, 0x7c, 0xb7, 0x04, 0x95, 0xb6, 0x69, 0xd8, - 0xe4, 0x0c, 0x08, 0xc5, 0xab, 0x31, 0x42, 0x71, 0xdc, 0x57, 0xb2, 0xdc, 0xab, 0x5c, 0x2e, 0x81, - 0x13, 0x5c, 0xe2, 0x89, 0x42, 0x68, 0x47, 0xd3, 0x88, 0xe7, 0x60, 0x52, 0x75, 0x3a, 0x5a, 0x76, - 0xd3, 0x7f, 0x5e, 0x82, 0xa9, 0x48, 0x17, 0x23, 0xe6, 0xc6, 0xed, 0x58, 0x41, 0x28, 0x17, 0x78, - 0x83, 0x14, 0xe9, 0xab, 0x16, 0x94, 0x00, 0xf1, 0x95, 0x47, 0x78, 0xaf, 0x9f, 0xae, 0x0c, 0x2f, - 0xc2, 0x0c, 0x35, 0xbc, 0x1e, 0xa1, 0xea, 0xc8, 0x20, 0x5e, 0x9e, 0xaa, 0xcf, 0x8d, 0x3a, 0x31, - 0x29, 0x4e, 0x68, 0x2f, 0xbe, 0x00, 0xd3, 0xb1, 0xce, 0x46, 0xf9, 0x48, 0xa3, 0xb1, 0x72, 0xff, - 0xd3, 0xa5, 0x73, 0x1f, 0x7f, 0xba, 0x74, 0xee, 0x93, 0x4f, 0x97, 0xce, 0x7d, 0xff, 0x70, 0x49, - 0xbb, 0x7f, 0xb8, 0xa4, 0x7d, 0x7c, 0xb8, 0xa4, 0x7d, 0x72, 0xb8, 0xa4, 0xfd, 0xed, 0x70, 0x49, - 0xfb, 0xc9, 0x67, 0x4b, 0xe7, 0xde, 0x7a, 0xe4, 0xc8, 0xff, 0xd9, 0xf8, 0x57, 0x00, 0x00, 0x00, - 0xff, 0xff, 0x39, 0x36, 0x95, 0x55, 0xec, 0x31, 0x00, 0x00, + // 2858 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5b, 0xcd, 0x6f, 0x24, 0x47, + 0x15, 0xdf, 0x9e, 0xf1, 0xd8, 0xe3, 0xe7, 0xb5, 0xbd, 0x5b, 0xeb, 0xac, 0x1d, 0x2f, 0xb1, 0xa3, + 0x46, 0x84, 0x4d, 0xd8, 0xcc, 0xb0, 0x9b, 0x64, 0xc9, 0x87, 0x94, 0xb0, 0xe3, 0xdd, 0x64, 0x9d, + 0xd8, 0xe3, 0x49, 0xcd, 0x38, 0x41, 0x11, 0x01, 0xda, 0x3d, 0xe5, 0x71, 0xc7, 0x3d, 0xdd, 0xa3, + 0xee, 0x1a, 0xb3, 0xbe, 0x81, 0xe0, 0x92, 0x13, 0x5c, 0x02, 0x1c, 0x91, 0x90, 0xb8, 0x72, 0xe5, + 0x10, 0x22, 0x10, 0x41, 0x5a, 0x21, 0x0e, 0x91, 0x38, 0x90, 0x93, 0x45, 0x9c, 0x13, 0xe2, 0x1f, + 0x40, 0x7b, 0x42, 0xf5, 0xd1, 0xd5, 0xdf, 0x76, 0x8f, 0xf1, 0x5a, 0x04, 0x71, 0x5a, 0x4f, 0xbd, + 0xf7, 0x7e, 0xf5, 0xaa, 0xea, 0xd5, 0x7b, 0xbf, 0xaa, 0xea, 0x85, 0x57, 0x77, 0x9f, 0xf7, 0x6b, + 0x96, 0x5b, 0xdf, 0x1d, 0x6e, 0x11, 0xcf, 0x21, 0x94, 0xf8, 0xf5, 0x3d, 0xe2, 0x74, 0x5d, 0xaf, + 0x2e, 0x05, 0xc6, 0xc0, 0xaa, 0x93, 0x7b, 0x94, 0x38, 0xbe, 0xe5, 0x3a, 0x7e, 0x7d, 0xef, 0xfa, + 0x16, 0xa1, 0xc6, 0xf5, 0x7a, 0x8f, 0x38, 0xc4, 0x33, 0x28, 0xe9, 0xd6, 0x06, 0x9e, 0x4b, 0x5d, + 0xf4, 0x98, 0x50, 0xaf, 0x19, 0x03, 0xab, 0x16, 0xaa, 0xd7, 0xa4, 0xfa, 0xe2, 0xd3, 0x3d, 0x8b, + 0xee, 0x0c, 0xb7, 0x6a, 0xa6, 0xdb, 0xaf, 0xf7, 0xdc, 0x9e, 0x5b, 0xe7, 0x56, 0x5b, 0xc3, 0x6d, + 0xfe, 0x8b, 0xff, 0xe0, 0x7f, 0x09, 0xb4, 0x45, 0x3d, 0xd2, 0xb9, 0xe9, 0x7a, 0xa4, 0xbe, 0x97, + 0xea, 0x71, 0xf1, 0xd9, 0x50, 0xa7, 0x6f, 0x98, 0x3b, 0x96, 0x43, 0xbc, 0xfd, 0xfa, 0x60, 0xb7, + 0xc7, 0x1a, 0xfc, 0x7a, 0x9f, 0x50, 0x23, 0xcb, 0xaa, 0x9e, 0x67, 0xe5, 0x0d, 0x1d, 0x6a, 0xf5, + 0x49, 0xca, 0xe0, 0xe6, 0x71, 0x06, 0xbe, 0xb9, 0x43, 0xfa, 0x46, 0xca, 0xee, 0x99, 0x3c, 0xbb, + 0x21, 0xb5, 0xec, 0xba, 0xe5, 0x50, 0x9f, 0x7a, 0x49, 0x23, 0xfd, 0x83, 0x12, 0x4c, 0xde, 0x36, + 0x48, 0xdf, 0x75, 0xda, 0x84, 0xa2, 0xef, 0x41, 0x95, 0x0d, 0xa3, 0x6b, 0x50, 0x63, 0x41, 0x7b, + 0x5c, 0xbb, 0x3a, 0x75, 0xe3, 0xeb, 0xb5, 0x70, 0x9a, 0x15, 0x6a, 0x6d, 0xb0, 0xdb, 0x63, 0x0d, + 0x7e, 0x8d, 0x69, 0xd7, 0xf6, 0xae, 0xd7, 0x36, 0xb6, 0xde, 0x23, 0x26, 0x5d, 0x27, 0xd4, 0x68, + 0xa0, 0xfb, 0x07, 0xcb, 0xe7, 0x0e, 0x0f, 0x96, 0x21, 0x6c, 0xc3, 0x0a, 0x15, 0x35, 0x61, 0xcc, + 0x1f, 0x10, 0x73, 0xa1, 0xc4, 0xd1, 0xaf, 0xd5, 0x8e, 0x5c, 0xc4, 0x9a, 0xf2, 0xac, 0x3d, 0x20, + 0x66, 0xe3, 0xbc, 0x44, 0x1e, 0x63, 0xbf, 0x30, 0xc7, 0x41, 0x6f, 0xc1, 0xb8, 0x4f, 0x0d, 0x3a, + 0xf4, 0x17, 0xca, 0x1c, 0xb1, 0x56, 0x18, 0x91, 0x5b, 0x35, 0x66, 0x24, 0xe6, 0xb8, 0xf8, 0x8d, + 0x25, 0x9a, 0xfe, 0x8f, 0x12, 0x20, 0xa5, 0xbb, 0xe2, 0x3a, 0x5d, 0x8b, 0x5a, 0xae, 0x83, 0x5e, + 0x84, 0x31, 0xba, 0x3f, 0x20, 0x7c, 0x72, 0x26, 0x1b, 0x4f, 0x04, 0x0e, 0x75, 0xf6, 0x07, 0xe4, + 0xc1, 0xc1, 0xf2, 0xe5, 0xb4, 0x05, 0x93, 0x60, 0x6e, 0x83, 0xd6, 0x94, 0xab, 0x25, 0x6e, 0xfd, + 0x6c, 0xbc, 0xeb, 0x07, 0x07, 0xcb, 0x19, 0x41, 0x58, 0x53, 0x48, 0x71, 0x07, 0xd1, 0x1e, 0x20, + 0xdb, 0xf0, 0x69, 0xc7, 0x33, 0x1c, 0x5f, 0xf4, 0x64, 0xf5, 0x89, 0x9c, 0x84, 0xa7, 0x8a, 0x2d, + 0x1a, 0xb3, 0x68, 0x2c, 0x4a, 0x2f, 0xd0, 0x5a, 0x0a, 0x0d, 0x67, 0xf4, 0x80, 0x9e, 0x80, 0x71, + 0x8f, 0x18, 0xbe, 0xeb, 0x2c, 0x8c, 0xf1, 0x51, 0xa8, 0x09, 0xc4, 0xbc, 0x15, 0x4b, 0x29, 0x7a, + 0x12, 0x26, 0xfa, 0xc4, 0xf7, 0x8d, 0x1e, 0x59, 0xa8, 0x70, 0xc5, 0x59, 0xa9, 0x38, 0xb1, 0x2e, + 0x9a, 0x71, 0x20, 0xd7, 0x3f, 0xd4, 0x60, 0x5a, 0xcd, 0xdc, 0x9a, 0xe5, 0x53, 0xf4, 0xed, 0x54, + 0x1c, 0xd6, 0x8a, 0x0d, 0x89, 0x59, 0xf3, 0x28, 0xbc, 0x20, 0x7b, 0xab, 0x06, 0x2d, 0x91, 0x18, + 0x5c, 0x87, 0x8a, 0x45, 0x49, 0x9f, 0xad, 0x43, 0xf9, 0xea, 0xd4, 0x8d, 0xab, 0x45, 0x43, 0xa6, + 0x31, 0x2d, 0x41, 0x2b, 0xab, 0xcc, 0x1c, 0x0b, 0x14, 0xfd, 0x67, 0x63, 0x11, 0xf7, 0x59, 0x68, + 0xa2, 0x77, 0xa1, 0xea, 0x13, 0x9b, 0x98, 0xd4, 0xf5, 0xa4, 0xfb, 0xcf, 0x14, 0x74, 0xdf, 0xd8, + 0x22, 0x76, 0x5b, 0x9a, 0x36, 0xce, 0x33, 0xff, 0x83, 0x5f, 0x58, 0x41, 0xa2, 0x37, 0xa1, 0x4a, + 0x49, 0x7f, 0x60, 0x1b, 0x94, 0xc8, 0x7d, 0xf4, 0xe5, 0xe8, 0x10, 0x58, 0xe4, 0x30, 0xb0, 0x96, + 0xdb, 0xed, 0x48, 0x35, 0xbe, 0x7d, 0xd4, 0x94, 0x04, 0xad, 0x58, 0xc1, 0xa0, 0x3d, 0x98, 0x19, + 0x0e, 0xba, 0x4c, 0x93, 0xb2, 0xec, 0xd0, 0xdb, 0x97, 0x91, 0x74, 0xb3, 0xe8, 0xdc, 0x6c, 0xc6, + 0xac, 0x1b, 0x97, 0x65, 0x5f, 0x33, 0xf1, 0x76, 0x9c, 0xe8, 0x05, 0xdd, 0x82, 0xd9, 0xbe, 0xe5, + 0x60, 0x62, 0x74, 0xf7, 0xdb, 0xc4, 0x74, 0x9d, 0xae, 0xcf, 0xc3, 0xaa, 0xd2, 0x98, 0x97, 0x00, + 0xb3, 0xeb, 0x71, 0x31, 0x4e, 0xea, 0xa3, 0xd7, 0x01, 0x05, 0xc3, 0x78, 0x4d, 0x24, 0x37, 0xcb, + 0x75, 0x78, 0xcc, 0x95, 0xc3, 0xe0, 0xee, 0xa4, 0x34, 0x70, 0x86, 0x15, 0x5a, 0x83, 0x39, 0x8f, + 0xec, 0x59, 0x6c, 0x8c, 0x77, 0x2d, 0x9f, 0xba, 0xde, 0xfe, 0x9a, 0xd5, 0xb7, 0xe8, 0xc2, 0x38, + 0xf7, 0x69, 0xe1, 0xf0, 0x60, 0x79, 0x0e, 0x67, 0xc8, 0x71, 0xa6, 0x95, 0xfe, 0xf3, 0x71, 0x98, + 0x4d, 0xe4, 0x1b, 0xf4, 0x16, 0x5c, 0x36, 0x87, 0x9e, 0x47, 0x1c, 0xda, 0x1c, 0xf6, 0xb7, 0x88, + 0xd7, 0x36, 0x77, 0x48, 0x77, 0x68, 0x93, 0x2e, 0x0f, 0x94, 0x4a, 0x63, 0x49, 0x7a, 0x7c, 0x79, + 0x25, 0x53, 0x0b, 0xe7, 0x58, 0xb3, 0x59, 0x70, 0x78, 0xd3, 0xba, 0xe5, 0xfb, 0x0a, 0xb3, 0xc4, + 0x31, 0xd5, 0x2c, 0x34, 0x53, 0x1a, 0x38, 0xc3, 0x8a, 0xf9, 0xd8, 0x25, 0xbe, 0xe5, 0x91, 0x6e, + 0xd2, 0xc7, 0x72, 0xdc, 0xc7, 0xdb, 0x99, 0x5a, 0x38, 0xc7, 0x1a, 0x3d, 0x07, 0x53, 0xa2, 0x37, + 0xbe, 0x7e, 0x72, 0xa1, 0x2f, 0x49, 0xb0, 0xa9, 0x66, 0x28, 0xc2, 0x51, 0x3d, 0x36, 0x34, 0x77, + 0xcb, 0x27, 0xde, 0x1e, 0xe9, 0xe6, 0x2f, 0xf0, 0x46, 0x4a, 0x03, 0x67, 0x58, 0xb1, 0xa1, 0x89, + 0x08, 0x4c, 0x0d, 0x6d, 0x3c, 0x3e, 0xb4, 0xcd, 0x4c, 0x2d, 0x9c, 0x63, 0xcd, 0xe2, 0x58, 0xb8, + 0x7c, 0x6b, 0xcf, 0xb0, 0x6c, 0x63, 0xcb, 0x26, 0x0b, 0x13, 0xf1, 0x38, 0x6e, 0xc6, 0xc5, 0x38, + 0xa9, 0x8f, 0x5e, 0x83, 0x8b, 0xa2, 0x69, 0xd3, 0x31, 0x14, 0x48, 0x95, 0x83, 0x3c, 0x2a, 0x41, + 0x2e, 0x36, 0x93, 0x0a, 0x38, 0x6d, 0x83, 0x5e, 0x84, 0x19, 0xd3, 0xb5, 0x6d, 0x1e, 0x8f, 0x2b, + 0xee, 0xd0, 0xa1, 0x0b, 0x93, 0x1c, 0x05, 0xb1, 0xfd, 0xb8, 0x12, 0x93, 0xe0, 0x84, 0x26, 0x22, + 0x00, 0x66, 0x50, 0x70, 0xfc, 0x05, 0xe0, 0xf9, 0xf1, 0x7a, 0xd1, 0x1c, 0xa0, 0x4a, 0x55, 0xc8, + 0x01, 0x54, 0x93, 0x8f, 0x23, 0xc0, 0xfa, 0x9f, 0x35, 0x98, 0xcf, 0x49, 0x1d, 0xe8, 0x95, 0x58, + 0x89, 0xfd, 0x5a, 0xa2, 0xc4, 0x5e, 0xc9, 0x31, 0x8b, 0xd4, 0x59, 0x07, 0xa6, 0x3d, 0x36, 0x2a, + 0xa7, 0x27, 0x54, 0x64, 0x8e, 0x7c, 0xee, 0x98, 0x61, 0xe0, 0xa8, 0x4d, 0x98, 0xf3, 0x2f, 0x1e, + 0x1e, 0x2c, 0x4f, 0xc7, 0x64, 0x38, 0x0e, 0xaf, 0xff, 0xa2, 0x04, 0x70, 0x9b, 0x0c, 0x6c, 0x77, + 0xbf, 0x4f, 0x9c, 0xb3, 0xe0, 0x50, 0x1b, 0x31, 0x0e, 0xf5, 0xf4, 0x71, 0xcb, 0xa3, 0x5c, 0xcb, + 0x25, 0x51, 0x6f, 0x27, 0x48, 0x54, 0xbd, 0x38, 0xe4, 0xd1, 0x2c, 0xea, 0x6f, 0x65, 0xb8, 0x14, + 0x2a, 0x87, 0x34, 0xea, 0xa5, 0xd8, 0x1a, 0x7f, 0x35, 0xb1, 0xc6, 0xf3, 0x19, 0x26, 0x0f, 0x8d, + 0x47, 0xbd, 0x07, 0x33, 0x8c, 0xe5, 0x88, 0xb5, 0xe4, 0x1c, 0x6a, 0x7c, 0x64, 0x0e, 0xa5, 0xaa, + 0xdd, 0x5a, 0x0c, 0x09, 0x27, 0x90, 0x73, 0x38, 0xdb, 0xc4, 0x17, 0x91, 0xb3, 0x7d, 0xa4, 0xc1, + 0x4c, 0xb8, 0x4c, 0x67, 0x40, 0xda, 0x9a, 0x71, 0xd2, 0xf6, 0x64, 0xe1, 0x10, 0xcd, 0x61, 0x6d, + 0xff, 0x62, 0x04, 0x5f, 0x29, 0xb1, 0x0d, 0xbe, 0x65, 0x98, 0xbb, 0xe8, 0x71, 0x18, 0x73, 0x8c, + 0x7e, 0x10, 0x99, 0x6a, 0xb3, 0x34, 0x8d, 0x3e, 0xc1, 0x5c, 0x82, 0x3e, 0xd0, 0x00, 0xc9, 0x2a, + 0x70, 0xcb, 0x71, 0x5c, 0x6a, 0x88, 0x5c, 0x29, 0xdc, 0x5a, 0x2d, 0xec, 0x56, 0xd0, 0x63, 0x6d, + 0x33, 0x85, 0x75, 0xc7, 0xa1, 0xde, 0x7e, 0xb8, 0xc8, 0x69, 0x05, 0x9c, 0xe1, 0x00, 0x32, 0x00, + 0x3c, 0x89, 0xd9, 0x71, 0xe5, 0x46, 0x7e, 0xba, 0x40, 0xce, 0x63, 0x06, 0x2b, 0xae, 0xb3, 0x6d, + 0xf5, 0xc2, 0xb4, 0x83, 0x15, 0x10, 0x8e, 0x80, 0x2e, 0xde, 0x81, 0xf9, 0x1c, 0x6f, 0xd1, 0x05, + 0x28, 0xef, 0x92, 0x7d, 0x31, 0x6d, 0x98, 0xfd, 0x89, 0xe6, 0xa0, 0xb2, 0x67, 0xd8, 0x43, 0x91, + 0x7e, 0x27, 0xb1, 0xf8, 0xf1, 0x62, 0xe9, 0x79, 0x4d, 0xff, 0xb0, 0x12, 0x8d, 0x1d, 0xce, 0x98, + 0xaf, 0x42, 0xd5, 0x23, 0x03, 0xdb, 0x32, 0x0d, 0x5f, 0x12, 0x21, 0x4e, 0x7e, 0xb1, 0x6c, 0xc3, + 0x4a, 0x1a, 0xe3, 0xd6, 0xa5, 0x87, 0xcb, 0xad, 0xcb, 0xa7, 0xc3, 0xad, 0xbf, 0x0b, 0x55, 0x3f, + 0x60, 0xd5, 0x63, 0x1c, 0xf2, 0xfa, 0x08, 0xf9, 0x55, 0x12, 0x6a, 0xd5, 0x81, 0xa2, 0xd2, 0x0a, + 0x34, 0x8b, 0x44, 0x57, 0x46, 0x24, 0xd1, 0xa7, 0x4a, 0x7c, 0x59, 0xbe, 0x19, 0x18, 0x43, 0x9f, + 0x74, 0x79, 0x6e, 0xab, 0x86, 0xf9, 0xa6, 0xc5, 0x5b, 0xb1, 0x94, 0xa2, 0x77, 0x63, 0x21, 0x5b, + 0x3d, 0x49, 0xc8, 0xce, 0xe4, 0x87, 0x2b, 0xda, 0x84, 0xf9, 0x81, 0xe7, 0xf6, 0x3c, 0xe2, 0xfb, + 0xb7, 0x89, 0xd1, 0xb5, 0x2d, 0x87, 0x04, 0xf3, 0x23, 0x18, 0xd1, 0x95, 0xc3, 0x83, 0xe5, 0xf9, + 0x56, 0xb6, 0x0a, 0xce, 0xb3, 0xd5, 0xef, 0x8f, 0xc1, 0x85, 0x64, 0x05, 0xcc, 0x21, 0xa9, 0xda, + 0x89, 0x48, 0xea, 0xb5, 0xc8, 0x66, 0x10, 0x0c, 0x5e, 0xad, 0x7e, 0xc6, 0x86, 0xb8, 0x05, 0xb3, + 0x32, 0x1b, 0x04, 0x42, 0x49, 0xd3, 0xd5, 0xea, 0x6f, 0xc6, 0xc5, 0x38, 0xa9, 0x8f, 0x5e, 0x82, + 0x69, 0x8f, 0xf3, 0xee, 0x00, 0x40, 0x70, 0xd7, 0x47, 0x24, 0xc0, 0x34, 0x8e, 0x0a, 0x71, 0x5c, + 0x97, 0xf1, 0xd6, 0x90, 0x8e, 0x06, 0x00, 0x63, 0x71, 0xde, 0x7a, 0x2b, 0xa9, 0x80, 0xd3, 0x36, + 0x68, 0x1d, 0x2e, 0x0d, 0x9d, 0x34, 0x94, 0x08, 0xe5, 0x2b, 0x12, 0xea, 0xd2, 0x66, 0x5a, 0x05, + 0x67, 0xd9, 0xa1, 0xed, 0x18, 0x95, 0x1d, 0xe7, 0xe9, 0xf9, 0x46, 0xe1, 0x8d, 0x57, 0x98, 0xcb, + 0x66, 0xd0, 0xed, 0x6a, 0x51, 0xba, 0xad, 0xff, 0x41, 0x8b, 0x16, 0x21, 0x45, 0x81, 0x8f, 0xbb, + 0x65, 0x4a, 0x59, 0x44, 0xd8, 0x91, 0x9b, 0xcd, 0x7e, 0x6f, 0x8e, 0xc4, 0x7e, 0xc3, 0xe2, 0x79, + 0x3c, 0xfd, 0xfd, 0xa3, 0x06, 0xb3, 0x77, 0x3b, 0x9d, 0xd6, 0xaa, 0xc3, 0x77, 0x4b, 0xcb, 0xa0, + 0x3b, 0xac, 0x8a, 0x0e, 0x0c, 0xba, 0x93, 0xac, 0xa2, 0x4c, 0x86, 0xb9, 0x04, 0x3d, 0x0b, 0x55, + 0xf6, 0x2f, 0x73, 0x9c, 0x87, 0xeb, 0x24, 0x4f, 0x32, 0xd5, 0x96, 0x6c, 0x7b, 0x10, 0xf9, 0x1b, + 0x2b, 0x4d, 0xf4, 0x2d, 0x98, 0x60, 0x7b, 0x9b, 0x38, 0xdd, 0x82, 0xe4, 0x57, 0x3a, 0xd5, 0x10, + 0x46, 0x21, 0x9f, 0x91, 0x0d, 0x38, 0x80, 0xd3, 0x77, 0x61, 0x2e, 0x32, 0x08, 0x3c, 0xb4, 0xc9, + 0x5b, 0xac, 0x5e, 0xa1, 0x36, 0x54, 0x58, 0xef, 0xac, 0x2a, 0x95, 0x0b, 0x5c, 0x2f, 0x26, 0x26, + 0x22, 0xe4, 0x1e, 0xec, 0x97, 0x8f, 0x05, 0x96, 0xbe, 0x01, 0x13, 0xab, 0xad, 0x86, 0xed, 0x0a, + 0xbe, 0x61, 0x5a, 0x5d, 0x2f, 0x39, 0x53, 0x2b, 0xab, 0xb7, 0x31, 0xe6, 0x12, 0xa4, 0xc3, 0x38, + 0xb9, 0x67, 0x92, 0x01, 0xe5, 0x14, 0x63, 0xb2, 0x01, 0x2c, 0x91, 0xde, 0xe1, 0x2d, 0x58, 0x4a, + 0xf4, 0x9f, 0x94, 0x60, 0x42, 0x76, 0x7b, 0x06, 0xe7, 0x8f, 0xb5, 0xd8, 0xf9, 0xe3, 0xa9, 0x62, + 0x4b, 0x90, 0x7b, 0xf8, 0xe8, 0x24, 0x0e, 0x1f, 0xd7, 0x0a, 0xe2, 0x1d, 0x7d, 0xf2, 0x78, 0xbf, + 0x04, 0x33, 0xf1, 0xc5, 0x47, 0xcf, 0xc1, 0x14, 0x4b, 0xb5, 0x96, 0x49, 0x9a, 0x21, 0xc3, 0x53, + 0xd7, 0x0f, 0xed, 0x50, 0x84, 0xa3, 0x7a, 0xa8, 0xa7, 0xcc, 0x5a, 0xae, 0x47, 0xe5, 0xa0, 0xf3, + 0xa7, 0x74, 0x48, 0x2d, 0xbb, 0x26, 0x2e, 0xdb, 0x6b, 0xab, 0x0e, 0xdd, 0xf0, 0xda, 0xd4, 0xb3, + 0x9c, 0x5e, 0xaa, 0x23, 0x06, 0x86, 0xa3, 0xc8, 0xe8, 0x6d, 0x96, 0xf6, 0x7d, 0x77, 0xe8, 0x99, + 0x24, 0x8b, 0xbe, 0x05, 0xd4, 0x83, 0x6d, 0x84, 0xee, 0x9a, 0x6b, 0x1a, 0xb6, 0x58, 0x1c, 0x4c, + 0xb6, 0x89, 0x47, 0x1c, 0x93, 0x04, 0x94, 0x49, 0x40, 0x60, 0x05, 0xa6, 0xff, 0x56, 0x83, 0x29, + 0x39, 0x17, 0x67, 0x40, 0xd4, 0xdf, 0x88, 0x13, 0xf5, 0x27, 0x0a, 0xee, 0xd0, 0x6c, 0x96, 0xfe, + 0x3b, 0x0d, 0x16, 0x03, 0xd7, 0x5d, 0xa3, 0xdb, 0x30, 0x6c, 0xc3, 0x31, 0x89, 0x17, 0xc4, 0xfa, + 0x22, 0x94, 0xac, 0x81, 0x5c, 0x49, 0x90, 0x00, 0xa5, 0xd5, 0x16, 0x2e, 0x59, 0x03, 0x56, 0x45, + 0x77, 0x5c, 0x9f, 0x72, 0x36, 0x2f, 0x0e, 0x8a, 0xca, 0xeb, 0xbb, 0xb2, 0x1d, 0x2b, 0x0d, 0xb4, + 0x09, 0x95, 0x81, 0xeb, 0x51, 0x56, 0xb9, 0xca, 0x89, 0xf5, 0x3d, 0xc2, 0x6b, 0xb6, 0x6e, 0x32, + 0x10, 0xc3, 0x9d, 0xce, 0x60, 0xb0, 0x40, 0xd3, 0x7f, 0xa8, 0xc1, 0xa3, 0x19, 0xfe, 0x4b, 0xd2, + 0xd0, 0x85, 0x09, 0x4b, 0x08, 0x65, 0x7a, 0x79, 0xa1, 0x58, 0xb7, 0x19, 0x53, 0x11, 0xa6, 0xb6, + 0x20, 0x85, 0x05, 0xd0, 0xfa, 0xaf, 0x34, 0xb8, 0x98, 0xf2, 0x97, 0xa7, 0x68, 0x16, 0xcf, 0x92, + 0x6d, 0xab, 0x14, 0xcd, 0xc2, 0x92, 0x4b, 0xd0, 0x1b, 0x50, 0xe5, 0x6f, 0x44, 0xa6, 0x6b, 0xcb, + 0x09, 0xac, 0x07, 0x13, 0xd8, 0x92, 0xed, 0x0f, 0x0e, 0x96, 0xaf, 0x64, 0x9c, 0xb5, 0x03, 0x31, + 0x56, 0x00, 0x68, 0x19, 0x2a, 0xc4, 0xf3, 0x5c, 0x4f, 0x26, 0xfb, 0x49, 0x36, 0x53, 0x77, 0x58, + 0x03, 0x16, 0xed, 0xfa, 0xaf, 0xc3, 0x20, 0x65, 0xd9, 0x97, 0xf9, 0xc7, 0x16, 0x27, 0x99, 0x18, + 0xd9, 0xd2, 0x61, 0x2e, 0x41, 0x43, 0xb8, 0x60, 0x25, 0xd2, 0xb5, 0xdc, 0x9d, 0xf5, 0x62, 0xd3, + 0xa8, 0xcc, 0x1a, 0x0b, 0x12, 0xfe, 0x42, 0x52, 0x82, 0x53, 0x5d, 0xe8, 0x04, 0x52, 0x5a, 0xe8, + 0x4d, 0x18, 0xdb, 0xa1, 0x74, 0x90, 0x71, 0xd9, 0x7f, 0x4c, 0x91, 0x08, 0x5d, 0xa8, 0xf2, 0xd1, + 0x75, 0x3a, 0x2d, 0xcc, 0xa1, 0xf4, 0xdf, 0x97, 0xd4, 0x7c, 0xf0, 0x13, 0xd2, 0x37, 0xd5, 0x68, + 0x57, 0x6c, 0xc3, 0xf7, 0x79, 0x0a, 0x13, 0xa7, 0xf9, 0xb9, 0x88, 0xe3, 0x4a, 0x86, 0x53, 0xda, + 0xa8, 0x13, 0x16, 0x4f, 0xed, 0x24, 0xc5, 0x73, 0x2a, 0xab, 0x70, 0xa2, 0xbb, 0x50, 0xa6, 0x76, + 0xd1, 0x53, 0xb9, 0x44, 0xec, 0xac, 0xb5, 0x1b, 0x53, 0x72, 0xca, 0xcb, 0x9d, 0xb5, 0x36, 0x66, + 0x10, 0x68, 0x03, 0x2a, 0xde, 0xd0, 0x26, 0xac, 0x0e, 0x94, 0x8b, 0xd7, 0x15, 0x36, 0x83, 0xe1, + 0xe6, 0x63, 0xbf, 0x7c, 0x2c, 0x70, 0xf4, 0x1f, 0x69, 0x30, 0x1d, 0xab, 0x16, 0xc8, 0x83, 0xf3, + 0x76, 0x64, 0xef, 0xc8, 0x79, 0x78, 0x7e, 0xf4, 0x5d, 0x27, 0x37, 0xfd, 0x9c, 0xec, 0xf7, 0x7c, + 0x54, 0x86, 0x63, 0x7d, 0xe8, 0x06, 0x40, 0x38, 0x6c, 0xb6, 0x0f, 0x58, 0xf0, 0x8a, 0x0d, 0x2f, + 0xf7, 0x01, 0x8b, 0x69, 0x1f, 0x8b, 0x76, 0x74, 0x03, 0xc0, 0x27, 0xa6, 0x47, 0x68, 0x33, 0x4c, + 0x5c, 0xaa, 0x1c, 0xb7, 0x95, 0x04, 0x47, 0xb4, 0xf4, 0x3f, 0x69, 0x30, 0xdd, 0x24, 0xf4, 0xfb, + 0xae, 0xb7, 0xdb, 0x72, 0x6d, 0xcb, 0xdc, 0x3f, 0x03, 0x12, 0x80, 0x63, 0x24, 0xe0, 0xb8, 0x7c, + 0x19, 0xf3, 0x2e, 0x8f, 0x0a, 0xe8, 0x1f, 0x69, 0x30, 0x1f, 0xd3, 0xbc, 0x13, 0xe6, 0x03, 0x95, + 0xa0, 0xb5, 0x42, 0x09, 0x3a, 0x06, 0xc3, 0x92, 0x5a, 0x76, 0x82, 0x46, 0x6b, 0x50, 0xa2, 0xae, + 0x8c, 0xde, 0xd1, 0x30, 0x09, 0xf1, 0xc2, 0x9a, 0xd3, 0x71, 0x71, 0x89, 0xba, 0x6c, 0x21, 0x16, + 0x62, 0x5a, 0xd1, 0x8c, 0xf6, 0x90, 0x46, 0x80, 0x61, 0x6c, 0xdb, 0x73, 0xfb, 0x27, 0x1e, 0x83, + 0x5a, 0x88, 0x57, 0x3d, 0xb7, 0x8f, 0x39, 0x96, 0xfe, 0xb1, 0x06, 0x17, 0x63, 0x9a, 0x67, 0xc0, + 0x1b, 0xde, 0x8c, 0xf3, 0x86, 0x6b, 0xa3, 0x0c, 0x24, 0x87, 0x3d, 0x7c, 0x5c, 0x4a, 0x0c, 0x83, + 0x0d, 0x18, 0x6d, 0xc3, 0xd4, 0xc0, 0xed, 0xb6, 0x4f, 0xe1, 0x81, 0x76, 0x96, 0xf1, 0xb9, 0x56, + 0x88, 0x85, 0xa3, 0xc0, 0xe8, 0x1e, 0x5c, 0x64, 0xd4, 0xc2, 0x1f, 0x18, 0x26, 0x69, 0x9f, 0xc2, + 0x95, 0xd5, 0x23, 0xfc, 0x05, 0x28, 0x89, 0x88, 0xd3, 0x9d, 0xa0, 0x75, 0x98, 0xb0, 0x06, 0xfc, + 0x7c, 0x21, 0x89, 0xe4, 0xb1, 0x24, 0x4c, 0x9c, 0x46, 0x44, 0x8a, 0x97, 0x3f, 0x70, 0x80, 0xa1, + 0xff, 0x35, 0x19, 0x0d, 0x9c, 0xae, 0xbe, 0x16, 0xa1, 0x07, 0xf2, 0xad, 0xe6, 0x64, 0xd4, 0xa0, + 0x29, 0x99, 0xc8, 0x49, 0x99, 0x75, 0x35, 0xc1, 0x5b, 0xbe, 0x02, 0x13, 0xc4, 0xe9, 0x72, 0xb2, + 0x2e, 0x2e, 0x42, 0xf8, 0xa8, 0xee, 0x88, 0x26, 0x1c, 0xc8, 0xf4, 0x1f, 0x97, 0x13, 0xa3, 0xe2, + 0x65, 0xf6, 0xbd, 0x53, 0x0b, 0x0e, 0x45, 0xf8, 0x73, 0x03, 0x64, 0x2b, 0xa4, 0x7f, 0x22, 0xe6, + 0xbf, 0x31, 0x4a, 0xcc, 0x47, 0xeb, 0x5f, 0x2e, 0xf9, 0x43, 0xdf, 0x81, 0x71, 0x22, 0xba, 0x10, + 0x55, 0xf5, 0xe6, 0x28, 0x5d, 0x84, 0xe9, 0x37, 0x3c, 0x67, 0xc9, 0x36, 0x89, 0x8a, 0x5e, 0x61, + 0xf3, 0xc5, 0x74, 0xd9, 0xb1, 0x44, 0xb0, 0xe7, 0xc9, 0xc6, 0x63, 0x62, 0xd8, 0xaa, 0xf9, 0xc1, + 0xc1, 0x32, 0x84, 0x3f, 0x71, 0xd4, 0x82, 0xbf, 0x9e, 0xc9, 0x3b, 0x9b, 0xb3, 0xf9, 0x02, 0x69, + 0xb4, 0xd7, 0xb3, 0xd0, 0xb5, 0x53, 0x7b, 0x3d, 0x8b, 0x40, 0x1e, 0x7d, 0x86, 0xfd, 0x67, 0x09, + 0x2e, 0x85, 0xca, 0x85, 0x5f, 0xcf, 0x32, 0x4c, 0xfe, 0xff, 0x15, 0x52, 0xb1, 0x17, 0xad, 0x70, + 0xea, 0xfe, 0xfb, 0x5e, 0xb4, 0x42, 0xdf, 0x72, 0xaa, 0xdd, 0x6f, 0x4a, 0xd1, 0x01, 0x8c, 0xf8, + 0xac, 0x72, 0x0a, 0x1f, 0xe2, 0x7c, 0xe1, 0x5e, 0x66, 0xf4, 0xbf, 0x94, 0xe1, 0x42, 0x72, 0x37, + 0xc6, 0x6e, 0xdf, 0xb5, 0x63, 0x6f, 0xdf, 0x5b, 0x30, 0xb7, 0x3d, 0xb4, 0xed, 0x7d, 0x3e, 0x86, + 0xc8, 0x15, 0xbc, 0xb8, 0xb7, 0xff, 0x92, 0xb4, 0x9c, 0x7b, 0x35, 0x43, 0x07, 0x67, 0x5a, 0xa6, + 0x2f, 0xe3, 0xc7, 0xfe, 0xd3, 0xcb, 0xf8, 0xca, 0x09, 0x2e, 0xe3, 0xb3, 0xdf, 0x33, 0xca, 0x27, + 0x7a, 0xcf, 0x38, 0xc9, 0x4d, 0x7c, 0x46, 0x12, 0x3b, 0xf6, 0xab, 0x92, 0x97, 0x61, 0x26, 0xfe, + 0x3a, 0x24, 0xd6, 0x52, 0x3c, 0x50, 0xc9, 0xb7, 0x98, 0xc8, 0x5a, 0x8a, 0x76, 0xac, 0x34, 0xf4, + 0x43, 0x0d, 0x2e, 0x67, 0x7f, 0x05, 0x82, 0x6c, 0x98, 0xe9, 0x1b, 0xf7, 0xa2, 0x5f, 0xe6, 0x68, + 0x27, 0x64, 0x2b, 0xfc, 0x59, 0x60, 0x3d, 0x86, 0x85, 0x13, 0xd8, 0xe8, 0x1d, 0xa8, 0xf6, 0x8d, + 0x7b, 0xed, 0xa1, 0xd7, 0x23, 0x27, 0x66, 0x45, 0x7c, 0x1b, 0xad, 0x4b, 0x14, 0xac, 0xf0, 0xf4, + 0xcf, 0x35, 0x98, 0xcf, 0xb9, 0xec, 0xff, 0x1f, 0x1a, 0xe5, 0xfb, 0x25, 0xa8, 0xb4, 0x4d, 0xc3, + 0x26, 0x67, 0x40, 0x28, 0x5e, 0x8f, 0x11, 0x8a, 0xe3, 0xbe, 0x26, 0xe5, 0x5e, 0xe5, 0x72, 0x09, + 0x9c, 0xe0, 0x12, 0x4f, 0x15, 0x42, 0x3b, 0x9a, 0x46, 0xbc, 0x00, 0x93, 0xaa, 0xd3, 0xd1, 0xb2, + 0x9b, 0xfe, 0xcb, 0x12, 0x4c, 0x45, 0xba, 0x18, 0x31, 0x37, 0x6e, 0xc7, 0x0a, 0x42, 0xb9, 0xc0, + 0x4d, 0x4b, 0xa4, 0xaf, 0x5a, 0x50, 0x02, 0xc4, 0xd7, 0x10, 0xe1, 0xfb, 0x77, 0xba, 0x32, 0xbc, + 0x0c, 0x33, 0xd4, 0xf0, 0x7a, 0x84, 0x2a, 0xda, 0x2e, 0x2e, 0x19, 0xd5, 0x67, 0x39, 0x9d, 0x98, + 0x14, 0x27, 0xb4, 0x17, 0x5f, 0x82, 0xe9, 0x58, 0x67, 0xa3, 0x7c, 0xcc, 0xd0, 0x58, 0xb9, 0xff, + 0xd9, 0xd2, 0xb9, 0x4f, 0x3e, 0x5b, 0x3a, 0xf7, 0xe9, 0x67, 0x4b, 0xe7, 0x7e, 0x70, 0xb8, 0xa4, + 0xdd, 0x3f, 0x5c, 0xd2, 0x3e, 0x39, 0x5c, 0xd2, 0x3e, 0x3d, 0x5c, 0xd2, 0xfe, 0x7e, 0xb8, 0xa4, + 0xfd, 0xf4, 0xf3, 0xa5, 0x73, 0xef, 0x3c, 0x76, 0xe4, 0xff, 0x6d, 0xf8, 0x77, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xf3, 0x1c, 0xa0, 0x16, 0x14, 0x31, 0x00, 0x00, } func (m *DaemonSet) Marshal() (dAtA []byte, err error) { @@ -2944,16 +2913,6 @@ func (m *NetworkPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a { size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) if err != nil { @@ -3302,43 +3261,6 @@ func (m *NetworkPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *NetworkPolicyStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NetworkPolicyStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NetworkPolicyStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Conditions) > 0 { - for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - func (m *ReplicaSet) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -4362,8 +4284,6 @@ func (m *NetworkPolicy) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = m.Spec.Size() n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) return n } @@ -4496,21 +4416,6 @@ func (m *NetworkPolicySpec) Size() (n int) { return n } -func (m *NetworkPolicyStatus) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - func (m *ReplicaSet) Size() (n int) { if m == nil { return 0 @@ -5098,7 +5003,6 @@ func (this *NetworkPolicy) String() string { s := strings.Join([]string{`&NetworkPolicy{`, `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NetworkPolicySpec", "NetworkPolicySpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "NetworkPolicyStatus", "NetworkPolicyStatus", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -5208,21 +5112,6 @@ func (this *NetworkPolicySpec) String() string { }, "") return s } -func (this *NetworkPolicyStatus) String() string { - if this == nil { - return "nil" - } - repeatedStringForConditions := "[]Condition{" - for _, f := range this.Conditions { - repeatedStringForConditions += fmt.Sprintf("%v", f) + "," - } - repeatedStringForConditions += "}" - s := strings.Join([]string{`&NetworkPolicyStatus{`, - `Conditions:` + repeatedStringForConditions + `,`, - `}`, - }, "") - return s -} func (this *ReplicaSet) String() string { if this == nil { return "nil" @@ -9627,39 +9516,6 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -10514,90 +10370,6 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { } return nil } -func (m *NetworkPolicyStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NetworkPolicyStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkPolicyStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Conditions = append(m.Conditions, v1.Condition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func (m *ReplicaSet) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/cluster-autoscaler/vendor/k8s.io/api/extensions/v1beta1/generated.proto b/cluster-autoscaler/vendor/k8s.io/api/extensions/v1beta1/generated.proto index 3ab6a093b559..3f2549681ecb 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/extensions/v1beta1/generated.proto +++ b/cluster-autoscaler/vendor/k8s.io/api/extensions/v1beta1/generated.proto @@ -646,11 +646,6 @@ message NetworkPolicy { // Specification of the desired behavior for this NetworkPolicy. // +optional optional NetworkPolicySpec spec = 2; - - // Status is the current state of the NetworkPolicy. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - // +optional - optional NetworkPolicyStatus status = 3; } // DEPRECATED 1.9 - This group version of NetworkPolicyEgressRule is deprecated by networking/v1/NetworkPolicyEgressRule. @@ -798,18 +793,6 @@ message NetworkPolicySpec { repeated string policyTypes = 4; } -// NetworkPolicyStatus describe the current state of the NetworkPolicy. -message NetworkPolicyStatus { - // Conditions holds an array of metav1.Condition that describe the state of the NetworkPolicy. - // Current service state - // +optional - // +patchMergeKey=type - // +patchStrategy=merge - // +listType=map - // +listMapKey=type - repeated k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 1; -} - // DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1beta2/ReplicaSet. See the release notes for // more information. // ReplicaSet ensures that a specified number of pod replicas are running at any given time. diff --git a/cluster-autoscaler/vendor/k8s.io/api/extensions/v1beta1/types.go b/cluster-autoscaler/vendor/k8s.io/api/extensions/v1beta1/types.go index c0ac6fa25dd9..70b349f654b3 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/extensions/v1beta1/types.go +++ b/cluster-autoscaler/vendor/k8s.io/api/extensions/v1beta1/types.go @@ -1041,10 +1041,10 @@ type NetworkPolicy struct { // +optional Spec NetworkPolicySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - // Status is the current state of the NetworkPolicy. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - // +optional - Status NetworkPolicyStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` + // Status is tombstoned to show why 3 is a reserved protobuf tag. + // This commented field should remain, so in the future if we decide to reimplement + // NetworkPolicyStatus a different protobuf name and tag SHOULD be used! + // Status NetworkPolicyStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } // DEPRECATED 1.9 - This group version of PolicyType is deprecated by networking/v1/PolicyType. @@ -1207,48 +1207,6 @@ type NetworkPolicyPeer struct { IPBlock *IPBlock `json:"ipBlock,omitempty" protobuf:"bytes,3,rep,name=ipBlock"` } -// NetworkPolicyConditionType is the type for status conditions on -// a NetworkPolicy. This type should be used with the -// NetworkPolicyStatus.Conditions field. -type NetworkPolicyConditionType string - -const ( - // NetworkPolicyConditionStatusAccepted represents status of a Network Policy that could be properly parsed by - // the Network Policy provider and will be implemented in the cluster - NetworkPolicyConditionStatusAccepted NetworkPolicyConditionType = "Accepted" - - // NetworkPolicyConditionStatusPartialFailure represents status of a Network Policy that could be partially - // parsed by the Network Policy provider and may not be completely implemented due to a lack of a feature or some - // other condition - NetworkPolicyConditionStatusPartialFailure NetworkPolicyConditionType = "PartialFailure" - - // NetworkPolicyConditionStatusFailure represents status of a Network Policy that could not be parsed by the - // Network Policy provider and will not be implemented in the cluster - NetworkPolicyConditionStatusFailure NetworkPolicyConditionType = "Failure" -) - -// NetworkPolicyConditionReason defines the set of reasons that explain why a -// particular NetworkPolicy condition type has been raised. -type NetworkPolicyConditionReason string - -const ( - // NetworkPolicyConditionReasonFeatureNotSupported represents a reason where the Network Policy may not have been - // implemented in the cluster due to a lack of some feature not supported by the Network Policy provider - NetworkPolicyConditionReasonFeatureNotSupported NetworkPolicyConditionReason = "FeatureNotSupported" -) - -// NetworkPolicyStatus describe the current state of the NetworkPolicy. -type NetworkPolicyStatus struct { - // Conditions holds an array of metav1.Condition that describe the state of the NetworkPolicy. - // Current service state - // +optional - // +patchMergeKey=type - // +patchStrategy=merge - // +listType=map - // +listMapKey=type - Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` -} - // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.3 // +k8s:prerelease-lifecycle-gen:deprecated=1.9 diff --git a/cluster-autoscaler/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go index 39aaf485377b..408022c9d84e 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go @@ -338,7 +338,6 @@ var map_NetworkPolicy = map[string]string{ "": "DEPRECATED 1.9 - This group version of NetworkPolicy is deprecated by networking/v1/NetworkPolicy. NetworkPolicy describes what network traffic is allowed for a set of Pods", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "spec": "Specification of the desired behavior for this NetworkPolicy.", - "status": "Status is the current state of the NetworkPolicy. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (NetworkPolicy) SwaggerDoc() map[string]string { @@ -409,15 +408,6 @@ func (NetworkPolicySpec) SwaggerDoc() map[string]string { return map_NetworkPolicySpec } -var map_NetworkPolicyStatus = map[string]string{ - "": "NetworkPolicyStatus describe the current state of the NetworkPolicy.", - "conditions": "Conditions holds an array of metav1.Condition that describe the state of the NetworkPolicy. Current service state", -} - -func (NetworkPolicyStatus) SwaggerDoc() map[string]string { - return map_NetworkPolicyStatus -} - var map_ReplicaSet = map[string]string{ "": "DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1beta2/ReplicaSet. See the release notes for more information. ReplicaSet ensures that a specified number of pod replicas are running at any given time.", "metadata": "If the Labels of a ReplicaSet are empty, they are defaulted to be the same as the Pod(s) that the ReplicaSet manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", diff --git a/cluster-autoscaler/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go b/cluster-autoscaler/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go index b6e92729928c..6b474ae483e8 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go +++ b/cluster-autoscaler/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go @@ -725,7 +725,6 @@ func (in *NetworkPolicy) DeepCopyInto(out *NetworkPolicy) { out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) return } @@ -938,29 +937,6 @@ func (in *NetworkPolicySpec) DeepCopy() *NetworkPolicySpec { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NetworkPolicyStatus) DeepCopyInto(out *NetworkPolicyStatus) { - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]v1.Condition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyStatus. -func (in *NetworkPolicyStatus) DeepCopy() *NetworkPolicyStatus { - if in == nil { - return nil - } - out := new(NetworkPolicyStatus) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ReplicaSet) DeepCopyInto(out *ReplicaSet) { *out = *in diff --git a/cluster-autoscaler/vendor/k8s.io/api/networking/v1/generated.pb.go b/cluster-autoscaler/vendor/k8s.io/api/networking/v1/generated.pb.go index e9566d57e277..daeaea5dce7c 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/networking/v1/generated.pb.go +++ b/cluster-autoscaler/vendor/k8s.io/api/networking/v1/generated.pb.go @@ -776,38 +776,10 @@ func (m *NetworkPolicySpec) XXX_DiscardUnknown() { var xxx_messageInfo_NetworkPolicySpec proto.InternalMessageInfo -func (m *NetworkPolicyStatus) Reset() { *m = NetworkPolicyStatus{} } -func (*NetworkPolicyStatus) ProtoMessage() {} -func (*NetworkPolicyStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_1c72867a70a7cc90, []int{26} -} -func (m *NetworkPolicyStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NetworkPolicyStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *NetworkPolicyStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_NetworkPolicyStatus.Merge(m, src) -} -func (m *NetworkPolicyStatus) XXX_Size() int { - return m.Size() -} -func (m *NetworkPolicyStatus) XXX_DiscardUnknown() { - xxx_messageInfo_NetworkPolicyStatus.DiscardUnknown(m) -} - -var xxx_messageInfo_NetworkPolicyStatus proto.InternalMessageInfo - func (m *ServiceBackendPort) Reset() { *m = ServiceBackendPort{} } func (*ServiceBackendPort) ProtoMessage() {} func (*ServiceBackendPort) Descriptor() ([]byte, []int) { - return fileDescriptor_1c72867a70a7cc90, []int{27} + return fileDescriptor_1c72867a70a7cc90, []int{26} } func (m *ServiceBackendPort) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -859,7 +831,6 @@ func init() { proto.RegisterType((*NetworkPolicyPeer)(nil), "k8s.io.api.networking.v1.NetworkPolicyPeer") proto.RegisterType((*NetworkPolicyPort)(nil), "k8s.io.api.networking.v1.NetworkPolicyPort") proto.RegisterType((*NetworkPolicySpec)(nil), "k8s.io.api.networking.v1.NetworkPolicySpec") - proto.RegisterType((*NetworkPolicyStatus)(nil), "k8s.io.api.networking.v1.NetworkPolicyStatus") proto.RegisterType((*ServiceBackendPort)(nil), "k8s.io.api.networking.v1.ServiceBackendPort") } @@ -868,115 +839,112 @@ func init() { } var fileDescriptor_1c72867a70a7cc90 = []byte{ - // 1715 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0x4b, 0x6f, 0x1b, 0x47, - 0x12, 0xd6, 0x50, 0xa2, 0x48, 0x35, 0x25, 0x59, 0x6a, 0xdb, 0x58, 0xae, 0x16, 0x4b, 0x6a, 0x07, - 0x6b, 0x5b, 0xbb, 0xb6, 0xc9, 0xb5, 0x6c, 0x2c, 0x76, 0x2f, 0x49, 0x3c, 0xb2, 0x2c, 0x2b, 0x96, - 0x29, 0xa2, 0xc9, 0x38, 0x48, 0x90, 0x87, 0x47, 0xc3, 0x16, 0x35, 0xe6, 0x70, 0x7a, 0xd0, 0xd3, - 0x54, 0xac, 0x20, 0x08, 0x72, 0xc9, 0x21, 0xb7, 0xdc, 0x72, 0x0e, 0xf2, 0x0b, 0x82, 0xe4, 0x10, - 0x20, 0x48, 0x8c, 0x5c, 0x02, 0x1f, 0x0d, 0xe4, 0xe2, 0x4b, 0x88, 0x98, 0xf9, 0x17, 0x3a, 0x05, - 0xfd, 0x98, 0x17, 0x1f, 0x22, 0x63, 0x18, 0x3a, 0x49, 0x5d, 0x55, 0xfd, 0x75, 0xbd, 0xab, 0x86, - 0xe0, 0x66, 0xeb, 0x7f, 0x7e, 0xc9, 0x26, 0xe5, 0x56, 0x67, 0x0f, 0x53, 0x17, 0x33, 0xec, 0x97, - 0x0f, 0xb1, 0xdb, 0x20, 0xb4, 0xac, 0x18, 0xa6, 0x67, 0x97, 0x5d, 0xcc, 0x3e, 0x20, 0xb4, 0x65, - 0xbb, 0xcd, 0xf2, 0xe1, 0xb5, 0x72, 0x13, 0xbb, 0x98, 0x9a, 0x0c, 0x37, 0x4a, 0x1e, 0x25, 0x8c, - 0xc0, 0xbc, 0x94, 0x2c, 0x99, 0x9e, 0x5d, 0x8a, 0x24, 0x4b, 0x87, 0xd7, 0x56, 0xae, 0x36, 0x6d, - 0x76, 0xd0, 0xd9, 0x2b, 0x59, 0xa4, 0x5d, 0x6e, 0x92, 0x26, 0x29, 0x8b, 0x0b, 0x7b, 0x9d, 0x7d, - 0x71, 0x12, 0x07, 0xf1, 0x9f, 0x04, 0x5a, 0xd1, 0x63, 0x4f, 0x5a, 0x84, 0xe2, 0x21, 0x8f, 0xad, - 0xdc, 0x88, 0x64, 0xda, 0xa6, 0x75, 0x60, 0xbb, 0x98, 0x1e, 0x95, 0xbd, 0x56, 0x93, 0x13, 0xfc, - 0x72, 0x1b, 0x33, 0x73, 0xd8, 0xad, 0xf2, 0xa8, 0x5b, 0xb4, 0xe3, 0x32, 0xbb, 0x8d, 0x07, 0x2e, - 0xfc, 0x77, 0xdc, 0x05, 0xdf, 0x3a, 0xc0, 0x6d, 0x73, 0xe0, 0xde, 0xf5, 0x51, 0xf7, 0x3a, 0xcc, - 0x76, 0xca, 0xb6, 0xcb, 0x7c, 0x46, 0xfb, 0x2f, 0xe9, 0x3f, 0x6a, 0xe0, 0xcc, 0x9d, 0x7a, 0xbd, - 0xba, 0xed, 0x36, 0x29, 0xf6, 0xfd, 0xaa, 0xc9, 0x0e, 0xe0, 0x2a, 0x98, 0xf1, 0x4c, 0x76, 0x90, - 0xd7, 0x56, 0xb5, 0xb5, 0x39, 0x63, 0xfe, 0x49, 0xb7, 0x38, 0xd5, 0xeb, 0x16, 0x67, 0x38, 0x0f, - 0x09, 0x0e, 0xbc, 0x01, 0xb2, 0xfc, 0x6f, 0xfd, 0xc8, 0xc3, 0xf9, 0x69, 0x21, 0x95, 0xef, 0x75, - 0x8b, 0xd9, 0xaa, 0xa2, 0x1d, 0xc7, 0xfe, 0x47, 0xa1, 0x24, 0xac, 0x81, 0xcc, 0x9e, 0x69, 0xb5, - 0xb0, 0xdb, 0xc8, 0xa7, 0x56, 0xb5, 0xb5, 0xdc, 0xfa, 0x5a, 0x69, 0x54, 0xf8, 0x4a, 0x4a, 0x1f, - 0x43, 0xca, 0x1b, 0x67, 0x94, 0x12, 0x19, 0x45, 0x40, 0x01, 0x92, 0xbe, 0x0f, 0xce, 0xc5, 0xf4, - 0x47, 0x1d, 0x07, 0xdf, 0x37, 0x9d, 0x0e, 0x86, 0x15, 0x90, 0xe6, 0x0f, 0xfb, 0x79, 0x6d, 0x75, - 0x7a, 0x2d, 0xb7, 0xfe, 0xaf, 0xd1, 0x4f, 0xf5, 0x99, 0x6f, 0x2c, 0xa8, 0xb7, 0xd2, 0xfc, 0xe4, - 0x23, 0x09, 0xa3, 0xef, 0x82, 0xcc, 0x76, 0xd5, 0x70, 0x88, 0xd5, 0xe2, 0xfe, 0xb1, 0xec, 0x06, - 0xed, 0xf7, 0xcf, 0xc6, 0xf6, 0x2d, 0x84, 0x04, 0x07, 0xea, 0x60, 0x16, 0x3f, 0xb2, 0xb0, 0xc7, - 0xf2, 0xa9, 0xd5, 0xe9, 0xb5, 0x39, 0x03, 0xf4, 0xba, 0xc5, 0xd9, 0x4d, 0x41, 0x41, 0x8a, 0xa3, - 0x7f, 0x9a, 0x02, 0x19, 0xf5, 0x2c, 0x7c, 0x00, 0xb2, 0x3c, 0x7d, 0x1a, 0x26, 0x33, 0x05, 0x6a, - 0x6e, 0xfd, 0x3f, 0x31, 0x7d, 0xc3, 0x68, 0x96, 0xbc, 0x56, 0x93, 0x13, 0xfc, 0x12, 0x97, 0xe6, - 0xba, 0xef, 0xee, 0x3d, 0xc4, 0x16, 0xbb, 0x87, 0x99, 0x69, 0x40, 0xa5, 0x07, 0x88, 0x68, 0x28, - 0x44, 0x85, 0x5b, 0x60, 0xc6, 0xf7, 0xb0, 0xa5, 0x1c, 0x7f, 0x61, 0xac, 0xe3, 0x6b, 0x1e, 0xb6, - 0x22, 0xd3, 0xf8, 0x09, 0x09, 0x00, 0xb8, 0x0b, 0x66, 0x7d, 0x66, 0xb2, 0x8e, 0x2f, 0x02, 0x9f, - 0x5b, 0xbf, 0x34, 0x1e, 0x4a, 0x88, 0x1b, 0x8b, 0x0a, 0x6c, 0x56, 0x9e, 0x91, 0x82, 0xd1, 0x7f, - 0xd2, 0xc0, 0x62, 0x32, 0xda, 0xf0, 0x3e, 0xc8, 0xf8, 0x98, 0x1e, 0xda, 0x16, 0xce, 0xcf, 0x88, - 0x47, 0xca, 0xe3, 0x1f, 0x91, 0xf2, 0x41, 0xbe, 0xe4, 0x78, 0xae, 0x28, 0x1a, 0x0a, 0xc0, 0xe0, - 0x9b, 0x20, 0x4b, 0xb1, 0x4f, 0x3a, 0xd4, 0xc2, 0x4a, 0xfb, 0xab, 0x71, 0x60, 0x5e, 0xf7, 0x1c, - 0x92, 0x27, 0x6b, 0x63, 0x87, 0x58, 0xa6, 0x23, 0x5d, 0x89, 0xf0, 0x3e, 0xa6, 0xd8, 0xb5, 0xb0, - 0x31, 0xcf, 0xb3, 0x1c, 0x29, 0x08, 0x14, 0x82, 0xf1, 0x2a, 0x9a, 0x57, 0x8a, 0x6c, 0x38, 0xe6, - 0xa9, 0x04, 0x74, 0x27, 0x11, 0xd0, 0x7f, 0x8f, 0x75, 0x90, 0xd0, 0x6b, 0x54, 0x54, 0xf5, 0x1f, - 0x34, 0xb0, 0x14, 0x17, 0xdc, 0xb1, 0x7d, 0x06, 0xdf, 0x19, 0x30, 0xa2, 0x34, 0x99, 0x11, 0xfc, - 0xb6, 0x30, 0x61, 0x49, 0x3d, 0x95, 0x0d, 0x28, 0x31, 0x03, 0xee, 0x82, 0xb4, 0xcd, 0x70, 0xdb, - 0x17, 0x25, 0x92, 0x5b, 0xbf, 0x38, 0x99, 0x05, 0x51, 0x75, 0x6e, 0xf3, 0xcb, 0x48, 0x62, 0xe8, - 0xbf, 0x6a, 0xa0, 0x18, 0x17, 0xab, 0x9a, 0xd4, 0x6c, 0x63, 0x86, 0xa9, 0x1f, 0x06, 0x0f, 0xae, - 0x81, 0xac, 0x59, 0xdd, 0xde, 0xa2, 0xa4, 0xe3, 0x05, 0xa5, 0xcb, 0x55, 0xbb, 0xa9, 0x68, 0x28, - 0xe4, 0xf2, 0x02, 0x6f, 0xd9, 0xaa, 0x4b, 0xc5, 0x0a, 0xfc, 0xae, 0xed, 0x36, 0x90, 0xe0, 0x70, - 0x09, 0xd7, 0x6c, 0x07, 0xcd, 0x2f, 0x94, 0xa8, 0x98, 0x6d, 0x8c, 0x04, 0x07, 0x16, 0x41, 0xda, - 0xb7, 0x88, 0x27, 0x33, 0x78, 0xce, 0x98, 0xe3, 0x2a, 0xd7, 0x38, 0x01, 0x49, 0x3a, 0xbc, 0x0c, - 0xe6, 0xb8, 0xa0, 0xef, 0x99, 0x16, 0xce, 0xa7, 0x85, 0xd0, 0x42, 0xaf, 0x5b, 0x9c, 0xab, 0x04, - 0x44, 0x14, 0xf1, 0xf5, 0xaf, 0xfb, 0xe2, 0xc3, 0x43, 0x07, 0xd7, 0x01, 0xb0, 0x88, 0xcb, 0x28, - 0x71, 0x1c, 0x1c, 0x74, 0xa3, 0x30, 0x69, 0x36, 0x42, 0x0e, 0x8a, 0x49, 0x41, 0x1b, 0x00, 0x2f, - 0xf4, 0x8d, 0x4a, 0x9e, 0xff, 0x4f, 0xe6, 0xfa, 0x21, 0x3e, 0x35, 0x16, 0xf9, 0x53, 0x31, 0x46, - 0x0c, 0x5c, 0xff, 0x46, 0x03, 0x39, 0x75, 0xff, 0x14, 0xd2, 0xe9, 0x76, 0x32, 0x9d, 0xfe, 0x31, - 0x7e, 0xb4, 0x0c, 0xcf, 0xa4, 0xef, 0x34, 0xb0, 0x12, 0x68, 0x4d, 0xcc, 0x86, 0x61, 0x3a, 0xa6, - 0x6b, 0x61, 0x1a, 0x74, 0xea, 0x15, 0x90, 0xb2, 0x83, 0xf4, 0x01, 0x0a, 0x20, 0xb5, 0x5d, 0x45, - 0x29, 0xdb, 0x83, 0x57, 0x40, 0xf6, 0x80, 0xf8, 0x4c, 0x24, 0x86, 0x4c, 0x9d, 0x50, 0xe1, 0x3b, - 0x8a, 0x8e, 0x42, 0x09, 0x58, 0x05, 0x69, 0x8f, 0x50, 0xe6, 0xe7, 0x67, 0x84, 0xc2, 0x97, 0xc7, - 0x2a, 0x5c, 0x25, 0x94, 0xa9, 0x5e, 0x1a, 0x8d, 0x28, 0x8e, 0x80, 0x24, 0x90, 0xfe, 0x11, 0xf8, - 0xeb, 0x10, 0xcd, 0xe5, 0x15, 0xf8, 0x3e, 0xc8, 0xd8, 0x92, 0xa9, 0x26, 0xe2, 0x8d, 0xb1, 0x0f, - 0x0e, 0xb1, 0x3f, 0x1a, 0xc4, 0xc1, 0xc0, 0x0d, 0x50, 0xf5, 0xaf, 0x34, 0xb0, 0x3c, 0xa0, 0xa9, - 0xd8, 0x25, 0x08, 0x65, 0xc2, 0x63, 0xe9, 0xd8, 0x2e, 0x41, 0x28, 0x43, 0x82, 0x03, 0xef, 0x82, - 0xac, 0x58, 0x45, 0x2c, 0xe2, 0x28, 0xaf, 0x95, 0x03, 0xaf, 0x55, 0x15, 0xfd, 0xb8, 0x5b, 0xfc, - 0xdb, 0xe0, 0x7e, 0x56, 0x0a, 0xd8, 0x28, 0x04, 0xe0, 0x55, 0x87, 0x29, 0x25, 0x54, 0x15, 0xa6, - 0xa8, 0xba, 0x4d, 0x4e, 0x40, 0x92, 0xae, 0x7f, 0x19, 0x25, 0x25, 0xdf, 0x15, 0xb8, 0x7e, 0x3c, - 0x22, 0xfd, 0xb3, 0x9c, 0xc7, 0x0b, 0x09, 0x0e, 0xf4, 0xc0, 0x92, 0xdd, 0xb7, 0x5c, 0x4c, 0xdc, - 0x74, 0xc3, 0x1b, 0x46, 0x5e, 0x21, 0x2f, 0xf5, 0x73, 0xd0, 0x00, 0xba, 0xfe, 0x00, 0x0c, 0x48, - 0xf1, 0x76, 0x7f, 0xc0, 0x98, 0x37, 0xa4, 0x70, 0x46, 0x6f, 0x33, 0xd1, 0xeb, 0x59, 0x61, 0x53, - 0xbd, 0x5e, 0x45, 0x02, 0x45, 0xff, 0x4c, 0x03, 0xe7, 0x87, 0x0e, 0xce, 0xb0, 0xb1, 0x69, 0x23, - 0x1b, 0x5b, 0x45, 0x45, 0x54, 0xfa, 0xe0, 0xca, 0x68, 0x4d, 0x92, 0xc8, 0x3c, 0xe2, 0xc3, 0xe2, - 0xaf, 0xff, 0x9c, 0x0a, 0x23, 0x22, 0xba, 0xda, 0x6b, 0xa1, 0xbf, 0x45, 0xd7, 0xe1, 0x2f, 0xab, - 0x1e, 0x7a, 0x2e, 0xe6, 0xbf, 0x90, 0x87, 0x06, 0xa4, 0x61, 0x03, 0x2c, 0x36, 0xf0, 0xbe, 0xd9, - 0x71, 0x98, 0x7a, 0x5b, 0x79, 0x6d, 0xf2, 0x75, 0x13, 0xf6, 0xba, 0xc5, 0xc5, 0x5b, 0x09, 0x0c, - 0xd4, 0x87, 0x09, 0x37, 0xc0, 0x34, 0x73, 0x82, 0x76, 0xf3, 0xcf, 0xb1, 0xd0, 0xf5, 0x9d, 0x9a, - 0x91, 0x53, 0xe6, 0x4f, 0xd7, 0x77, 0x6a, 0x88, 0xdf, 0x86, 0xaf, 0x83, 0x34, 0xed, 0x38, 0x98, - 0x2f, 0x53, 0xd3, 0x13, 0xed, 0x65, 0x3c, 0xa6, 0x51, 0xf9, 0xf3, 0x93, 0x8f, 0x24, 0x84, 0xfe, - 0x31, 0x58, 0x48, 0x6c, 0x5c, 0xb0, 0x0d, 0xe6, 0x9d, 0x58, 0x09, 0x2b, 0x2f, 0x5c, 0xff, 0x53, - 0x75, 0xaf, 0x1a, 0xce, 0x39, 0xf5, 0xe2, 0x7c, 0x9c, 0x87, 0x12, 0xf0, 0xba, 0x09, 0x40, 0x64, - 0x2b, 0xaf, 0x44, 0x5e, 0x3e, 0xb2, 0xdb, 0xa8, 0x4a, 0xe4, 0x55, 0xe5, 0x23, 0x49, 0xe7, 0xd3, - 0xcb, 0xc7, 0x16, 0xc5, 0xac, 0x12, 0xf5, 0xcb, 0x70, 0x7a, 0xd5, 0x42, 0x0e, 0x8a, 0x49, 0xe9, - 0x5f, 0xa4, 0xc0, 0x42, 0x45, 0xaa, 0x5c, 0x25, 0x8e, 0x6d, 0x1d, 0x9d, 0xc2, 0xa2, 0x75, 0x2f, - 0xb1, 0x68, 0x9d, 0xd0, 0xa6, 0x13, 0x8a, 0x8d, 0xdc, 0x9f, 0xdf, 0xe8, 0xdb, 0x9f, 0xaf, 0x4e, - 0x0a, 0x78, 0xf2, 0x16, 0xfd, 0xad, 0x06, 0xfe, 0x92, 0x90, 0xdf, 0x8c, 0x7a, 0x5c, 0x38, 0x69, - 0xb4, 0x71, 0x93, 0x26, 0x81, 0x20, 0x2a, 0x76, 0xe8, 0xa4, 0x81, 0x5b, 0x20, 0xc5, 0x88, 0x4a, - 0xfd, 0x89, 0xe1, 0x30, 0xa6, 0xd1, 0xc8, 0xac, 0x13, 0x94, 0x62, 0x44, 0xff, 0x5e, 0x03, 0xf9, - 0x84, 0x54, 0xbc, 0x37, 0xbf, 0x7c, 0xbd, 0xef, 0x81, 0x99, 0x7d, 0x4a, 0xda, 0x2f, 0xa2, 0x79, - 0x18, 0xcb, 0xdb, 0x94, 0xb4, 0x91, 0x80, 0xd1, 0x1f, 0x6b, 0x60, 0x39, 0x21, 0x79, 0x0a, 0x7b, - 0xce, 0x4e, 0x72, 0xcf, 0xb9, 0x34, 0xa1, 0x0d, 0x23, 0xb6, 0x9d, 0xc7, 0xa9, 0x3e, 0x0b, 0xb8, - 0xad, 0x70, 0x1f, 0xe4, 0x3c, 0xd2, 0xa8, 0x61, 0x07, 0x5b, 0x8c, 0x0c, 0xeb, 0x1b, 0x27, 0x19, - 0x61, 0xee, 0x61, 0x27, 0xb8, 0x6a, 0x9c, 0xe9, 0x75, 0x8b, 0xb9, 0x6a, 0x84, 0x85, 0xe2, 0xc0, - 0xf0, 0x11, 0x58, 0x0e, 0x57, 0xdc, 0xf0, 0xb5, 0xd4, 0x8b, 0xbf, 0x76, 0xbe, 0xd7, 0x2d, 0x2e, - 0x57, 0xfa, 0x11, 0xd1, 0xe0, 0x23, 0xf0, 0x0e, 0xc8, 0xd8, 0x9e, 0xf8, 0x9a, 0x57, 0x65, 0x78, - 0xd2, 0xbe, 0x28, 0x3f, 0xfb, 0xe5, 0x37, 0xa5, 0x3a, 0xa0, 0xe0, 0xba, 0xfe, 0x4b, 0x7f, 0x0e, - 0xf0, 0x84, 0x83, 0x5b, 0xb1, 0xa5, 0x46, 0x8e, 0xd2, 0xcb, 0x2f, 0xb6, 0xd0, 0x24, 0xa7, 0xed, - 0xe8, 0xde, 0xd6, 0x61, 0xb6, 0x53, 0x92, 0xbf, 0xf1, 0x94, 0xb6, 0x5d, 0xb6, 0x4b, 0x6b, 0x8c, - 0xda, 0x6e, 0x53, 0x4e, 0xfe, 0xd8, 0xb6, 0x75, 0x01, 0x64, 0xd4, 0x30, 0x16, 0x86, 0xa7, 0xa5, - 0x55, 0x9b, 0x92, 0x84, 0x02, 0x9e, 0x7e, 0xdc, 0x9f, 0x17, 0x62, 0x34, 0x3f, 0x7c, 0x69, 0x79, - 0x71, 0x56, 0x65, 0xe3, 0xe8, 0xdc, 0x78, 0x37, 0xda, 0x57, 0x65, 0xa6, 0xaf, 0x4f, 0x98, 0xe9, - 0xf1, 0x41, 0x39, 0x72, 0x5b, 0x85, 0x6f, 0x81, 0x59, 0x2c, 0xd1, 0xe5, 0xe4, 0xbd, 0x36, 0x21, - 0x7a, 0xd4, 0x56, 0xa3, 0x56, 0xac, 0x68, 0x0a, 0x10, 0xbe, 0xca, 0xbd, 0xc4, 0x65, 0xeb, 0x47, - 0x1e, 0x96, 0xeb, 0xfd, 0x9c, 0xf1, 0x77, 0x69, 0x6c, 0x48, 0x3e, 0xe6, 0xdf, 0x4d, 0xe1, 0x11, - 0xc5, 0x6f, 0xe8, 0x1f, 0x82, 0xb3, 0x43, 0x5a, 0x3f, 0xb4, 0xc4, 0xe7, 0x5e, 0xc3, 0x66, 0x36, - 0x71, 0x83, 0x9e, 0x58, 0x9e, 0xcc, 0xf9, 0x1b, 0xc1, 0xbd, 0xc4, 0xf7, 0xa1, 0x82, 0x42, 0x31, - 0x58, 0xfd, 0x3d, 0x00, 0x07, 0xf7, 0xb6, 0x09, 0xb6, 0xc2, 0x8b, 0x60, 0xd6, 0xed, 0xb4, 0xf7, - 0xb0, 0xac, 0xdf, 0x74, 0xe4, 0x9c, 0x8a, 0xa0, 0x22, 0xc5, 0x35, 0x5e, 0x79, 0xf2, 0xbc, 0x30, - 0xf5, 0xf4, 0x79, 0x61, 0xea, 0xd9, 0xf3, 0xc2, 0xd4, 0x27, 0xbd, 0x82, 0xf6, 0xa4, 0x57, 0xd0, - 0x9e, 0xf6, 0x0a, 0xda, 0xb3, 0x5e, 0x41, 0xfb, 0xad, 0x57, 0xd0, 0x3e, 0xff, 0xbd, 0x30, 0xf5, - 0x76, 0x7e, 0xd4, 0x0f, 0xc0, 0x7f, 0x04, 0x00, 0x00, 0xff, 0xff, 0x61, 0x0f, 0x0a, 0xd7, 0x34, - 0x16, 0x00, 0x00, + // 1671 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0xcb, 0x6f, 0x1b, 0xd5, + 0x1a, 0xcf, 0x38, 0x71, 0xec, 0x1c, 0x27, 0x69, 0x72, 0x6e, 0xab, 0xeb, 0x9b, 0xab, 0x6b, 0xe7, + 0x8e, 0x68, 0x1b, 0x68, 0x6b, 0xd3, 0xb4, 0x42, 0xb0, 0x01, 0x3a, 0x69, 0x9a, 0x86, 0xa6, 0x8e, + 0x75, 0x6c, 0x15, 0x81, 0x78, 0x74, 0x32, 0x3e, 0xb1, 0xa7, 0x1e, 0xcf, 0x19, 0x9d, 0x39, 0x0e, + 0xad, 0x84, 0x10, 0x1b, 0x16, 0xec, 0xf8, 0x17, 0x10, 0x7f, 0x01, 0x82, 0x05, 0x12, 0x82, 0xc2, + 0x06, 0x75, 0x59, 0x89, 0x4d, 0x37, 0x58, 0xd4, 0xfc, 0x17, 0x59, 0xa1, 0xf3, 0x98, 0x97, 0x1f, + 0xb5, 0xa9, 0xaa, 0xac, 0x92, 0xf3, 0x7d, 0xdf, 0xf9, 0x7d, 0x8f, 0xf3, 0xbd, 0xc6, 0xe0, 0x5a, + 0xfb, 0x75, 0xbf, 0x64, 0x93, 0x72, 0xbb, 0x7b, 0x80, 0xa9, 0x8b, 0x19, 0xf6, 0xcb, 0x47, 0xd8, + 0x6d, 0x10, 0x5a, 0x56, 0x0c, 0xd3, 0xb3, 0xcb, 0x2e, 0x66, 0x9f, 0x10, 0xda, 0xb6, 0xdd, 0x66, + 0xf9, 0xe8, 0x72, 0xb9, 0x89, 0x5d, 0x4c, 0x4d, 0x86, 0x1b, 0x25, 0x8f, 0x12, 0x46, 0x60, 0x5e, + 0x4a, 0x96, 0x4c, 0xcf, 0x2e, 0x45, 0x92, 0xa5, 0xa3, 0xcb, 0x6b, 0x97, 0x9a, 0x36, 0x6b, 0x75, + 0x0f, 0x4a, 0x16, 0xe9, 0x94, 0x9b, 0xa4, 0x49, 0xca, 0xe2, 0xc2, 0x41, 0xf7, 0x50, 0x9c, 0xc4, + 0x41, 0xfc, 0x27, 0x81, 0xd6, 0xf4, 0x98, 0x4a, 0x8b, 0x50, 0x3c, 0x42, 0xd9, 0xda, 0xd5, 0x48, + 0xa6, 0x63, 0x5a, 0x2d, 0xdb, 0xc5, 0xf4, 0x41, 0xd9, 0x6b, 0x37, 0x39, 0xc1, 0x2f, 0x77, 0x30, + 0x33, 0x47, 0xdd, 0x2a, 0x8f, 0xbb, 0x45, 0xbb, 0x2e, 0xb3, 0x3b, 0x78, 0xe8, 0xc2, 0x6b, 0x93, + 0x2e, 0xf8, 0x56, 0x0b, 0x77, 0xcc, 0xa1, 0x7b, 0x57, 0xc6, 0xdd, 0xeb, 0x32, 0xdb, 0x29, 0xdb, + 0x2e, 0xf3, 0x19, 0x1d, 0xbc, 0xa4, 0xff, 0xac, 0x81, 0x53, 0x37, 0xeb, 0xf5, 0xea, 0xae, 0xdb, + 0xa4, 0xd8, 0xf7, 0xab, 0x26, 0x6b, 0xc1, 0x75, 0x30, 0xe7, 0x99, 0xac, 0x95, 0xd7, 0xd6, 0xb5, + 0x8d, 0x05, 0x63, 0xf1, 0x51, 0xaf, 0x38, 0xd3, 0xef, 0x15, 0xe7, 0x38, 0x0f, 0x09, 0x0e, 0xbc, + 0x0a, 0xb2, 0xfc, 0x6f, 0xfd, 0x81, 0x87, 0xf3, 0xb3, 0x42, 0x2a, 0xdf, 0xef, 0x15, 0xb3, 0x55, + 0x45, 0x3b, 0x8e, 0xfd, 0x8f, 0x42, 0x49, 0x58, 0x03, 0x99, 0x03, 0xd3, 0x6a, 0x63, 0xb7, 0x91, + 0x4f, 0xad, 0x6b, 0x1b, 0xb9, 0xcd, 0x8d, 0xd2, 0xb8, 0xe7, 0x2b, 0x29, 0x7b, 0x0c, 0x29, 0x6f, + 0x9c, 0x52, 0x46, 0x64, 0x14, 0x01, 0x05, 0x48, 0xfa, 0x21, 0x38, 0x1d, 0xb3, 0x1f, 0x75, 0x1d, + 0x7c, 0xc7, 0x74, 0xba, 0x18, 0x56, 0x40, 0x9a, 0x2b, 0xf6, 0xf3, 0xda, 0xfa, 0xec, 0x46, 0x6e, + 0xf3, 0xe5, 0xf1, 0xaa, 0x06, 0xdc, 0x37, 0x96, 0x94, 0xae, 0x34, 0x3f, 0xf9, 0x48, 0xc2, 0xe8, + 0xfb, 0x20, 0xb3, 0x5b, 0x35, 0x1c, 0x62, 0xb5, 0x79, 0x7c, 0x2c, 0xbb, 0x41, 0x07, 0xe3, 0xb3, + 0xb5, 0x7b, 0x1d, 0x21, 0xc1, 0x81, 0x3a, 0x98, 0xc7, 0xf7, 0x2d, 0xec, 0xb1, 0x7c, 0x6a, 0x7d, + 0x76, 0x63, 0xc1, 0x00, 0xfd, 0x5e, 0x71, 0x7e, 0x5b, 0x50, 0x90, 0xe2, 0xe8, 0x5f, 0xa4, 0x40, + 0x46, 0xa9, 0x85, 0x77, 0x41, 0x96, 0xa7, 0x4f, 0xc3, 0x64, 0xa6, 0x40, 0xcd, 0x6d, 0xbe, 0x1a, + 0xb3, 0x37, 0x7c, 0xcd, 0x92, 0xd7, 0x6e, 0x72, 0x82, 0x5f, 0xe2, 0xd2, 0xdc, 0xf6, 0xfd, 0x83, + 0x7b, 0xd8, 0x62, 0xb7, 0x31, 0x33, 0x0d, 0xa8, 0xec, 0x00, 0x11, 0x0d, 0x85, 0xa8, 0x70, 0x07, + 0xcc, 0xf9, 0x1e, 0xb6, 0x54, 0xe0, 0xcf, 0x4e, 0x0c, 0x7c, 0xcd, 0xc3, 0x56, 0xe4, 0x1a, 0x3f, + 0x21, 0x01, 0x00, 0xf7, 0xc1, 0xbc, 0xcf, 0x4c, 0xd6, 0xf5, 0xc5, 0xc3, 0xe7, 0x36, 0xcf, 0x4f, + 0x86, 0x12, 0xe2, 0xc6, 0xb2, 0x02, 0x9b, 0x97, 0x67, 0xa4, 0x60, 0xf4, 0x5f, 0x35, 0xb0, 0x9c, + 0x7c, 0x6d, 0x78, 0x07, 0x64, 0x7c, 0x4c, 0x8f, 0x6c, 0x0b, 0xe7, 0xe7, 0x84, 0x92, 0xf2, 0x64, + 0x25, 0x52, 0x3e, 0xc8, 0x97, 0x1c, 0xcf, 0x15, 0x45, 0x43, 0x01, 0x18, 0x7c, 0x17, 0x64, 0x29, + 0xf6, 0x49, 0x97, 0x5a, 0x58, 0x59, 0x7f, 0x29, 0x0e, 0xcc, 0xeb, 0x9e, 0x43, 0xf2, 0x64, 0x6d, + 0xec, 0x11, 0xcb, 0x74, 0x64, 0x28, 0x11, 0x3e, 0xc4, 0x14, 0xbb, 0x16, 0x36, 0x16, 0x79, 0x96, + 0x23, 0x05, 0x81, 0x42, 0x30, 0x5e, 0x45, 0x8b, 0xca, 0x90, 0x2d, 0xc7, 0x3c, 0x91, 0x07, 0xdd, + 0x4b, 0x3c, 0xe8, 0x2b, 0x13, 0x03, 0x24, 0xec, 0x1a, 0xf7, 0xaa, 0xfa, 0x4f, 0x1a, 0x58, 0x89, + 0x0b, 0xee, 0xd9, 0x3e, 0x83, 0x1f, 0x0c, 0x39, 0x51, 0x9a, 0xce, 0x09, 0x7e, 0x5b, 0xb8, 0xb0, + 0xa2, 0x54, 0x65, 0x03, 0x4a, 0xcc, 0x81, 0x5b, 0x20, 0x6d, 0x33, 0xdc, 0xf1, 0x45, 0x89, 0xe4, + 0x36, 0xcf, 0x4d, 0xe7, 0x41, 0x54, 0x9d, 0xbb, 0xfc, 0x32, 0x92, 0x18, 0xfa, 0x1f, 0x1a, 0x28, + 0xc6, 0xc5, 0xaa, 0x26, 0x35, 0x3b, 0x98, 0x61, 0xea, 0x87, 0x8f, 0x07, 0x37, 0x40, 0xd6, 0xac, + 0xee, 0xee, 0x50, 0xd2, 0xf5, 0x82, 0xd2, 0xe5, 0xa6, 0x5d, 0x53, 0x34, 0x14, 0x72, 0x79, 0x81, + 0xb7, 0x6d, 0xd5, 0xa5, 0x62, 0x05, 0x7e, 0xcb, 0x76, 0x1b, 0x48, 0x70, 0xb8, 0x84, 0x6b, 0x76, + 0x82, 0xe6, 0x17, 0x4a, 0x54, 0xcc, 0x0e, 0x46, 0x82, 0x03, 0x8b, 0x20, 0xed, 0x5b, 0xc4, 0x93, + 0x19, 0xbc, 0x60, 0x2c, 0x70, 0x93, 0x6b, 0x9c, 0x80, 0x24, 0x1d, 0x5e, 0x00, 0x0b, 0x5c, 0xd0, + 0xf7, 0x4c, 0x0b, 0xe7, 0xd3, 0x42, 0x68, 0xa9, 0xdf, 0x2b, 0x2e, 0x54, 0x02, 0x22, 0x8a, 0xf8, + 0xfa, 0xb7, 0x03, 0xef, 0xc3, 0x9f, 0x0e, 0x6e, 0x02, 0x60, 0x11, 0x97, 0x51, 0xe2, 0x38, 0x38, + 0xe8, 0x46, 0x61, 0xd2, 0x6c, 0x85, 0x1c, 0x14, 0x93, 0x82, 0x36, 0x00, 0x5e, 0x18, 0x1b, 0x95, + 0x3c, 0x6f, 0x4c, 0x17, 0xfa, 0x11, 0x31, 0x35, 0x96, 0xb9, 0xaa, 0x18, 0x23, 0x06, 0xae, 0x7f, + 0xa7, 0x81, 0x9c, 0xba, 0x7f, 0x02, 0xe9, 0x74, 0x23, 0x99, 0x4e, 0xff, 0x9f, 0x3c, 0x5a, 0x46, + 0x67, 0xd2, 0x0f, 0x1a, 0x58, 0x0b, 0xac, 0x26, 0x66, 0xc3, 0x30, 0x1d, 0xd3, 0xb5, 0x30, 0x0d, + 0x3a, 0xf5, 0x1a, 0x48, 0xd9, 0x41, 0xfa, 0x00, 0x05, 0x90, 0xda, 0xad, 0xa2, 0x94, 0xed, 0xc1, + 0x8b, 0x20, 0xdb, 0x22, 0x3e, 0x13, 0x89, 0x21, 0x53, 0x27, 0x34, 0xf8, 0xa6, 0xa2, 0xa3, 0x50, + 0x02, 0x56, 0x41, 0xda, 0x23, 0x94, 0xf9, 0xf9, 0x39, 0x61, 0xf0, 0x85, 0x89, 0x06, 0x57, 0x09, + 0x65, 0xaa, 0x97, 0x46, 0x23, 0x8a, 0x23, 0x20, 0x09, 0xa4, 0x7f, 0x0a, 0xfe, 0x33, 0xc2, 0x72, + 0x79, 0x05, 0x7e, 0x0c, 0x32, 0xb6, 0x64, 0xaa, 0x89, 0x78, 0x75, 0xa2, 0xc2, 0x11, 0xfe, 0x47, + 0x83, 0x38, 0x18, 0xb8, 0x01, 0xaa, 0xfe, 0x8d, 0x06, 0x56, 0x87, 0x2c, 0x15, 0xbb, 0x04, 0xa1, + 0x4c, 0x44, 0x2c, 0x1d, 0xdb, 0x25, 0x08, 0x65, 0x48, 0x70, 0xe0, 0x2d, 0x90, 0x15, 0xab, 0x88, + 0x45, 0x1c, 0x15, 0xb5, 0x72, 0x10, 0xb5, 0xaa, 0xa2, 0x1f, 0xf7, 0x8a, 0xff, 0x1d, 0xde, 0xcf, + 0x4a, 0x01, 0x1b, 0x85, 0x00, 0xbc, 0xea, 0x30, 0xa5, 0x84, 0xaa, 0xc2, 0x14, 0x55, 0xb7, 0xcd, + 0x09, 0x48, 0xd2, 0xf5, 0xaf, 0xa3, 0xa4, 0xe4, 0xbb, 0x02, 0xb7, 0x8f, 0xbf, 0xc8, 0xe0, 0x2c, + 0xe7, 0xef, 0x85, 0x04, 0x07, 0x7a, 0x60, 0xc5, 0x1e, 0x58, 0x2e, 0xa6, 0x6e, 0xba, 0xe1, 0x0d, + 0x23, 0xaf, 0x90, 0x57, 0x06, 0x39, 0x68, 0x08, 0x5d, 0xbf, 0x0b, 0x86, 0xa4, 0x78, 0xbb, 0x6f, + 0x31, 0xe6, 0x8d, 0x28, 0x9c, 0xf1, 0xdb, 0x4c, 0xa4, 0x3d, 0x2b, 0x7c, 0xaa, 0xd7, 0xab, 0x48, + 0xa0, 0xe8, 0x5f, 0x6a, 0xe0, 0xcc, 0xc8, 0xc1, 0x19, 0x36, 0x36, 0x6d, 0x6c, 0x63, 0xab, 0xa8, + 0x17, 0x95, 0x31, 0xb8, 0x38, 0xde, 0x92, 0x24, 0x32, 0x7f, 0xf1, 0x51, 0xef, 0xaf, 0xff, 0x96, + 0x0a, 0x5f, 0x44, 0x74, 0xb5, 0xb7, 0xc3, 0x78, 0x8b, 0xae, 0xc3, 0x35, 0xab, 0x1e, 0x7a, 0x3a, + 0x16, 0xbf, 0x90, 0x87, 0x86, 0xa4, 0x61, 0x03, 0x2c, 0x37, 0xf0, 0xa1, 0xd9, 0x75, 0x98, 0xd2, + 0xad, 0xa2, 0x36, 0xfd, 0xba, 0x09, 0xfb, 0xbd, 0xe2, 0xf2, 0xf5, 0x04, 0x06, 0x1a, 0xc0, 0x84, + 0x5b, 0x60, 0x96, 0x39, 0x41, 0xbb, 0x79, 0x69, 0x22, 0x74, 0x7d, 0xaf, 0x66, 0xe4, 0x94, 0xfb, + 0xb3, 0xf5, 0xbd, 0x1a, 0xe2, 0xb7, 0xe1, 0x3b, 0x20, 0x4d, 0xbb, 0x0e, 0xe6, 0xcb, 0xd4, 0xec, + 0x54, 0x7b, 0x19, 0x7f, 0xd3, 0xa8, 0xfc, 0xf9, 0xc9, 0x47, 0x12, 0x42, 0xff, 0x0c, 0x2c, 0x25, + 0x36, 0x2e, 0xd8, 0x01, 0x8b, 0x4e, 0xac, 0x84, 0x55, 0x14, 0xae, 0xfc, 0xa3, 0xba, 0x57, 0x0d, + 0xe7, 0xb4, 0xd2, 0xb8, 0x18, 0xe7, 0xa1, 0x04, 0xbc, 0x6e, 0x02, 0x10, 0xf9, 0xca, 0x2b, 0x91, + 0x97, 0x8f, 0xec, 0x36, 0xaa, 0x12, 0x79, 0x55, 0xf9, 0x48, 0xd2, 0xf9, 0xf4, 0xf2, 0xb1, 0x45, + 0x31, 0xab, 0x44, 0xfd, 0x32, 0x9c, 0x5e, 0xb5, 0x90, 0x83, 0x62, 0x52, 0xfa, 0x2f, 0x1a, 0x58, + 0xaa, 0x48, 0x93, 0xab, 0xc4, 0xb1, 0xad, 0x07, 0x27, 0xb0, 0x68, 0xdd, 0x4e, 0x2c, 0x5a, 0xcf, + 0x68, 0xd3, 0x09, 0xc3, 0xc6, 0x6e, 0x5a, 0xdf, 0x6b, 0xe0, 0xdf, 0x09, 0xc9, 0xed, 0xa8, 0x19, + 0x85, 0x23, 0x41, 0x9b, 0x34, 0x12, 0x12, 0x08, 0xa2, 0xb4, 0x46, 0x8e, 0x04, 0xb8, 0x03, 0x52, + 0x8c, 0xa8, 0x1c, 0x9d, 0x1a, 0x0e, 0x63, 0x1a, 0xcd, 0xb6, 0x3a, 0x41, 0x29, 0x46, 0xf4, 0x1f, + 0x35, 0x90, 0x4f, 0x48, 0xc5, 0x9b, 0xe8, 0x8b, 0xb7, 0xfb, 0x36, 0x98, 0x3b, 0xa4, 0xa4, 0xf3, + 0x3c, 0x96, 0x87, 0x41, 0xbf, 0x41, 0x49, 0x07, 0x09, 0x18, 0xfd, 0xa1, 0x06, 0x56, 0x13, 0x92, + 0x27, 0xb0, 0x90, 0xec, 0x25, 0x17, 0x92, 0xf3, 0x53, 0xfa, 0x30, 0x66, 0x2d, 0x79, 0x98, 0x1a, + 0xf0, 0x80, 0xfb, 0x0a, 0x0f, 0x41, 0xce, 0x23, 0x8d, 0x1a, 0x76, 0xb0, 0xc5, 0xc8, 0xa8, 0x02, + 0x7f, 0x96, 0x13, 0xe6, 0x01, 0x76, 0x82, 0xab, 0xc6, 0xa9, 0x7e, 0xaf, 0x98, 0xab, 0x46, 0x58, + 0x28, 0x0e, 0x0c, 0xef, 0x83, 0xd5, 0x70, 0x17, 0x0d, 0xb5, 0xa5, 0x9e, 0x5f, 0xdb, 0x99, 0x7e, + 0xaf, 0xb8, 0x5a, 0x19, 0x44, 0x44, 0xc3, 0x4a, 0xe0, 0x4d, 0x90, 0xb1, 0x3d, 0xf1, 0xd9, 0xad, + 0xbe, 0xd8, 0x9e, 0xb5, 0xd8, 0xc9, 0xef, 0x73, 0xf9, 0xf1, 0xa7, 0x0e, 0x28, 0xb8, 0xae, 0xff, + 0x3e, 0x98, 0x03, 0x3c, 0xe1, 0xe0, 0x4e, 0x6c, 0xfb, 0x90, 0x33, 0xef, 0xc2, 0xf3, 0x6d, 0x1e, + 0xc9, 0xb1, 0x38, 0xbe, 0x09, 0x75, 0x99, 0xed, 0x94, 0xe4, 0x8f, 0x31, 0xa5, 0x5d, 0x97, 0xed, + 0xd3, 0x1a, 0xa3, 0xb6, 0xdb, 0x94, 0x23, 0x3a, 0xb6, 0x16, 0x9d, 0x05, 0x19, 0x35, 0x35, 0x85, + 0xe3, 0x69, 0xe9, 0xd5, 0xb6, 0x24, 0xa1, 0x80, 0xa7, 0x1f, 0x0f, 0xe6, 0x85, 0x98, 0xa1, 0xf7, + 0x5e, 0x58, 0x5e, 0xfc, 0x4b, 0x65, 0xe3, 0xf8, 0xdc, 0xf8, 0x30, 0x5a, 0x2c, 0x65, 0xa6, 0x6f, + 0x4e, 0x99, 0xe9, 0xf1, 0x89, 0x36, 0x76, 0xad, 0x84, 0xef, 0x81, 0x79, 0x2c, 0xd1, 0xe5, 0x88, + 0xbc, 0x3c, 0x25, 0x7a, 0xd4, 0x56, 0xa3, 0x5f, 0x1e, 0x14, 0x4d, 0x01, 0xc2, 0xb7, 0x78, 0x94, + 0xb8, 0x2c, 0xff, 0xe0, 0x97, 0x7b, 0xf8, 0x82, 0xf1, 0x3f, 0xe9, 0x6c, 0x48, 0x3e, 0xe6, 0x1f, + 0x38, 0xe1, 0x11, 0xc5, 0x6f, 0xe8, 0x1f, 0x01, 0x38, 0xbc, 0xe4, 0x4c, 0xb1, 0x42, 0x9d, 0x03, + 0xf3, 0x6e, 0xb7, 0x73, 0x80, 0x65, 0x0d, 0xa5, 0x23, 0x03, 0x2b, 0x82, 0x8a, 0x14, 0xd7, 0x78, + 0xf3, 0xd1, 0xd3, 0xc2, 0xcc, 0xe3, 0xa7, 0x85, 0x99, 0x27, 0x4f, 0x0b, 0x33, 0x9f, 0xf7, 0x0b, + 0xda, 0xa3, 0x7e, 0x41, 0x7b, 0xdc, 0x2f, 0x68, 0x4f, 0xfa, 0x05, 0xed, 0xcf, 0x7e, 0x41, 0xfb, + 0xea, 0xaf, 0xc2, 0xcc, 0xfb, 0xf9, 0x71, 0xbf, 0x96, 0xfe, 0x1d, 0x00, 0x00, 0xff, 0xff, 0xd4, + 0x46, 0x40, 0xf2, 0x61, 0x15, 0x00, 0x00, } func (m *HTTPIngressPath) Marshal() (dAtA []byte, err error) { @@ -1822,16 +1790,6 @@ func (m *NetworkPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a { size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) if err != nil { @@ -2180,43 +2138,6 @@ func (m *NetworkPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *NetworkPolicyStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NetworkPolicyStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NetworkPolicyStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Conditions) > 0 { - for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - func (m *ServiceBackendPort) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -2583,8 +2504,6 @@ func (m *NetworkPolicy) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = m.Spec.Size() n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) return n } @@ -2717,21 +2636,6 @@ func (m *NetworkPolicySpec) Size() (n int) { return n } -func (m *NetworkPolicyStatus) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - func (m *ServiceBackendPort) Size() (n int) { if m == nil { return 0 @@ -3006,7 +2910,6 @@ func (this *NetworkPolicy) String() string { s := strings.Join([]string{`&NetworkPolicy{`, `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NetworkPolicySpec", "NetworkPolicySpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "NetworkPolicyStatus", "NetworkPolicyStatus", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -3116,21 +3019,6 @@ func (this *NetworkPolicySpec) String() string { }, "") return s } -func (this *NetworkPolicyStatus) String() string { - if this == nil { - return "nil" - } - repeatedStringForConditions := "[]Condition{" - for _, f := range this.Conditions { - repeatedStringForConditions += fmt.Sprintf("%v", f) + "," - } - repeatedStringForConditions += "}" - s := strings.Join([]string{`&NetworkPolicyStatus{`, - `Conditions:` + repeatedStringForConditions + `,`, - `}`, - }, "") - return s -} func (this *ServiceBackendPort) String() string { if this == nil { return "nil" @@ -5609,39 +5497,6 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -6496,90 +6351,6 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { } return nil } -func (m *NetworkPolicyStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NetworkPolicyStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkPolicyStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Conditions = append(m.Conditions, v1.Condition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func (m *ServiceBackendPort) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/cluster-autoscaler/vendor/k8s.io/api/networking/v1/generated.proto b/cluster-autoscaler/vendor/k8s.io/api/networking/v1/generated.proto index ed194a89d564..b50dd491e0fa 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/networking/v1/generated.proto +++ b/cluster-autoscaler/vendor/k8s.io/api/networking/v1/generated.proto @@ -384,11 +384,6 @@ message NetworkPolicy { // spec represents the specification of the desired behavior for this NetworkPolicy. // +optional optional NetworkPolicySpec spec = 2; - - // status represents the current state of the NetworkPolicy. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - // +optional - optional NetworkPolicyStatus status = 3; } // NetworkPolicyEgressRule describes a particular set of traffic that is allowed out of pods @@ -536,18 +531,6 @@ message NetworkPolicySpec { repeated string policyTypes = 4; } -// NetworkPolicyStatus describes the current state of the NetworkPolicy. -message NetworkPolicyStatus { - // conditions holds an array of metav1.Condition that describe the state of the NetworkPolicy. - // Current service state - // +optional - // +patchMergeKey=type - // +patchStrategy=merge - // +listType=map - // +listMapKey=type - repeated k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 1; -} - // ServiceBackendPort is the service port being referenced. message ServiceBackendPort { // name is the name of the port on the Service. diff --git a/cluster-autoscaler/vendor/k8s.io/api/networking/v1/types.go b/cluster-autoscaler/vendor/k8s.io/api/networking/v1/types.go index fa7cf1bd700f..a17e2cb5b393 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/networking/v1/types.go +++ b/cluster-autoscaler/vendor/k8s.io/api/networking/v1/types.go @@ -38,10 +38,10 @@ type NetworkPolicy struct { // +optional Spec NetworkPolicySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - // status represents the current state of the NetworkPolicy. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - // +optional - Status NetworkPolicyStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` + // Status is tombstoned to show why 3 is a reserved protobuf tag. + // This commented field should remain, so in the future if we decide to reimplement + // NetworkPolicyStatus a different protobuf name and tag SHOULD be used! + // Status NetworkPolicyStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } // PolicyType string describes the NetworkPolicy type @@ -205,48 +205,6 @@ type NetworkPolicyPeer struct { IPBlock *IPBlock `json:"ipBlock,omitempty" protobuf:"bytes,3,rep,name=ipBlock"` } -// NetworkPolicyConditionType is the type for status conditions on -// a NetworkPolicy. This type should be used with the -// NetworkPolicyStatus.Conditions field. -type NetworkPolicyConditionType string - -const ( - // NetworkPolicyConditionStatusAccepted represents status of a Network Policy that could be properly parsed by - // the Network Policy provider and will be implemented in the cluster - NetworkPolicyConditionStatusAccepted NetworkPolicyConditionType = "Accepted" - - // NetworkPolicyConditionStatusPartialFailure represents status of a Network Policy that could be partially - // parsed by the Network Policy provider and may not be completely implemented due to a lack of a feature or some - // other condition - NetworkPolicyConditionStatusPartialFailure NetworkPolicyConditionType = "PartialFailure" - - // NetworkPolicyConditionStatusFailure represents status of a Network Policy that could not be parsed by the - // Network Policy provider and will not be implemented in the cluster - NetworkPolicyConditionStatusFailure NetworkPolicyConditionType = "Failure" -) - -// NetworkPolicyConditionReason defines the set of reasons that explain why a -// particular NetworkPolicy condition type has been raised. -type NetworkPolicyConditionReason string - -const ( - // NetworkPolicyConditionReasonFeatureNotSupported represents a reason where the Network Policy may not have been - // implemented in the cluster due to a lack of some feature not supported by the Network Policy provider - NetworkPolicyConditionReasonFeatureNotSupported NetworkPolicyConditionReason = "FeatureNotSupported" -) - -// NetworkPolicyStatus describes the current state of the NetworkPolicy. -type NetworkPolicyStatus struct { - // conditions holds an array of metav1.Condition that describe the state of the NetworkPolicy. - // Current service state - // +optional - // +patchMergeKey=type - // +patchStrategy=merge - // +listType=map - // +listMapKey=type - Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` -} - // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // NetworkPolicyList is a list of NetworkPolicy objects. diff --git a/cluster-autoscaler/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go index 91161d5ca4e6..ff080540d39b 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go @@ -224,7 +224,6 @@ var map_NetworkPolicy = map[string]string{ "": "NetworkPolicy describes what network traffic is allowed for a set of Pods", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "spec": "spec represents the specification of the desired behavior for this NetworkPolicy.", - "status": "status represents the current state of the NetworkPolicy. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (NetworkPolicy) SwaggerDoc() map[string]string { @@ -295,15 +294,6 @@ func (NetworkPolicySpec) SwaggerDoc() map[string]string { return map_NetworkPolicySpec } -var map_NetworkPolicyStatus = map[string]string{ - "": "NetworkPolicyStatus describes the current state of the NetworkPolicy.", - "conditions": "conditions holds an array of metav1.Condition that describe the state of the NetworkPolicy. Current service state", -} - -func (NetworkPolicyStatus) SwaggerDoc() map[string]string { - return map_NetworkPolicyStatus -} - var map_ServiceBackendPort = map[string]string{ "": "ServiceBackendPort is the service port being referenced.", "name": "name is the name of the port on the Service. This is a mutually exclusive setting with \"Number\".", diff --git a/cluster-autoscaler/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go b/cluster-autoscaler/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go index c95653c918ce..540873833f3c 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go +++ b/cluster-autoscaler/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go @@ -499,7 +499,6 @@ func (in *NetworkPolicy) DeepCopyInto(out *NetworkPolicy) { out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) return } @@ -712,29 +711,6 @@ func (in *NetworkPolicySpec) DeepCopy() *NetworkPolicySpec { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NetworkPolicyStatus) DeepCopyInto(out *NetworkPolicyStatus) { - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]metav1.Condition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyStatus. -func (in *NetworkPolicyStatus) DeepCopy() *NetworkPolicyStatus { - if in == nil { - return nil - } - out := new(NetworkPolicyStatus) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ServiceBackendPort) DeepCopyInto(out *ServiceBackendPort) { *out = *in diff --git a/cluster-autoscaler/vendor/k8s.io/api/rbac/v1/generated.proto b/cluster-autoscaler/vendor/k8s.io/api/rbac/v1/generated.proto index 222f2b9052b1..13ff60ea718c 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/rbac/v1/generated.proto +++ b/cluster-autoscaler/vendor/k8s.io/api/rbac/v1/generated.proto @@ -66,6 +66,7 @@ message ClusterRoleBinding { // RoleRef can only reference a ClusterRole in the global namespace. // If the RoleRef cannot be resolved, the Authorizer must return an error. + // This field is immutable. optional RoleRef roleRef = 3; } @@ -140,6 +141,7 @@ message RoleBinding { // RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. // If the RoleRef cannot be resolved, the Authorizer must return an error. + // This field is immutable. optional RoleRef roleRef = 3; } diff --git a/cluster-autoscaler/vendor/k8s.io/api/rbac/v1/types.go b/cluster-autoscaler/vendor/k8s.io/api/rbac/v1/types.go index 5a8e4a85c88d..ce845d69b426 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/rbac/v1/types.go +++ b/cluster-autoscaler/vendor/k8s.io/api/rbac/v1/types.go @@ -132,6 +132,7 @@ type RoleBinding struct { // RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. // If the RoleRef cannot be resolved, the Authorizer must return an error. + // This field is immutable. RoleRef RoleRef `json:"roleRef" protobuf:"bytes,3,opt,name=roleRef"` } @@ -209,6 +210,7 @@ type ClusterRoleBinding struct { // RoleRef can only reference a ClusterRole in the global namespace. // If the RoleRef cannot be resolved, the Authorizer must return an error. + // This field is immutable. RoleRef RoleRef `json:"roleRef" protobuf:"bytes,3,opt,name=roleRef"` } diff --git a/cluster-autoscaler/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go index 370398198bca..0471a5594466 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go @@ -51,7 +51,7 @@ var map_ClusterRoleBinding = map[string]string{ "": "ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, and adds who information via Subject.", "metadata": "Standard object's metadata.", "subjects": "Subjects holds references to the objects the role applies to.", - "roleRef": "RoleRef can only reference a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error.", + "roleRef": "RoleRef can only reference a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error. This field is immutable.", } func (ClusterRoleBinding) SwaggerDoc() map[string]string { @@ -105,7 +105,7 @@ var map_RoleBinding = map[string]string{ "": "RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given namespace only have effect in that namespace.", "metadata": "Standard object's metadata.", "subjects": "Subjects holds references to the objects the role applies to.", - "roleRef": "RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error.", + "roleRef": "RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error. This field is immutable.", } func (RoleBinding) SwaggerDoc() map[string]string { diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS index 155648acb6df..1a9f5e7706b5 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS @@ -2,7 +2,6 @@ reviewers: - thockin - - lavalamp - smarterclayton - wojtek-t - deads2k diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/api/meta/help.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/api/meta/help.go index 1bf6b06d47f8..1fdd32c4ba3e 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/api/meta/help.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/api/meta/help.go @@ -112,8 +112,27 @@ func getItemsPtr(list runtime.Object) (interface{}, error) { // EachListItem invokes fn on each runtime.Object in the list. Any error immediately terminates // the loop. +// +// If items passed to fn are retained for different durations, and you want to avoid +// retaining all items in obj as long as any item is referenced, use EachListItemWithAlloc instead. func EachListItem(obj runtime.Object, fn func(runtime.Object) error) error { + return eachListItem(obj, fn, false) +} + +// EachListItemWithAlloc works like EachListItem, but avoids retaining references to the items slice in obj. +// It does this by making a shallow copy of non-pointer items in obj. +// +// If the items passed to fn are not retained, or are retained for the same duration, use EachListItem instead for memory efficiency. +func EachListItemWithAlloc(obj runtime.Object, fn func(runtime.Object) error) error { + return eachListItem(obj, fn, true) +} + +// allocNew: Whether shallow copy is required when the elements in Object.Items are struct +func eachListItem(obj runtime.Object, fn func(runtime.Object) error, allocNew bool) error { if unstructured, ok := obj.(runtime.Unstructured); ok { + if allocNew { + return unstructured.EachListItemWithAlloc(fn) + } return unstructured.EachListItem(fn) } // TODO: Change to an interface call? @@ -140,8 +159,19 @@ func EachListItem(obj runtime.Object, fn func(runtime.Object) error) error { for i := 0; i < len; i++ { raw := items.Index(i) if takeAddr { - raw = raw.Addr() + if allocNew { + // shallow copy to avoid retaining a reference to the original list item + itemCopy := reflect.New(raw.Type()) + // assign to itemCopy and type-assert + itemCopy.Elem().Set(raw) + // reflect.New will guarantee that itemCopy must be a pointer. + raw = itemCopy + } else { + raw = raw.Addr() + } } + // raw must be a pointer or an interface + // allocate a pointer is cheap switch item := raw.Interface().(type) { case *runtime.RawExtension: if err := fn(item.Object); err != nil { @@ -166,7 +196,23 @@ func EachListItem(obj runtime.Object, fn func(runtime.Object) error) error { // ExtractList returns obj's Items element as an array of runtime.Objects. // Returns an error if obj is not a List type (does not have an Items member). +// +// If items in the returned list are retained for different durations, and you want to avoid +// retaining all items in obj as long as any item is referenced, use ExtractListWithAlloc instead. func ExtractList(obj runtime.Object) ([]runtime.Object, error) { + return extractList(obj, false) +} + +// ExtractListWithAlloc works like ExtractList, but avoids retaining references to the items slice in obj. +// It does this by making a shallow copy of non-pointer items in obj. +// +// If the items in the returned list are not retained, or are retained for the same duration, use ExtractList instead for memory efficiency. +func ExtractListWithAlloc(obj runtime.Object) ([]runtime.Object, error) { + return extractList(obj, true) +} + +// allocNew: Whether shallow copy is required when the elements in Object.Items are struct +func extractList(obj runtime.Object, allocNew bool) ([]runtime.Object, error) { itemsPtr, err := GetItemsPtr(obj) if err != nil { return nil, err @@ -176,10 +222,17 @@ func ExtractList(obj runtime.Object) ([]runtime.Object, error) { return nil, err } list := make([]runtime.Object, items.Len()) + if len(list) == 0 { + return list, nil + } + elemType := items.Type().Elem() + isRawExtension := elemType == rawExtensionObjectType + implementsObject := elemType.Implements(objectType) for i := range list { raw := items.Index(i) - switch item := raw.Interface().(type) { - case runtime.RawExtension: + switch { + case isRawExtension: + item := raw.Interface().(runtime.RawExtension) switch { case item.Object != nil: list[i] = item.Object @@ -189,8 +242,18 @@ func ExtractList(obj runtime.Object) ([]runtime.Object, error) { default: list[i] = nil } - case runtime.Object: - list[i] = item + case implementsObject: + list[i] = raw.Interface().(runtime.Object) + case allocNew: + // shallow copy to avoid retaining a reference to the original list item + itemCopy := reflect.New(raw.Type()) + // assign to itemCopy and type-assert + itemCopy.Elem().Set(raw) + var ok bool + // reflect.New will guarantee that itemCopy must be a pointer. + if list[i], ok = itemCopy.Interface().(runtime.Object); !ok { + return nil, fmt.Errorf("%v: item[%v]: Expected object, got %#v(%s)", obj, i, raw.Interface(), raw.Kind()) + } default: var found bool if list[i], found = raw.Addr().Interface().(runtime.Object); !found { @@ -201,8 +264,12 @@ func ExtractList(obj runtime.Object) ([]runtime.Object, error) { return list, nil } -// objectSliceType is the type of a slice of Objects -var objectSliceType = reflect.TypeOf([]runtime.Object{}) +var ( + // objectSliceType is the type of a slice of Objects + objectSliceType = reflect.TypeOf([]runtime.Object{}) + objectType = reflect.TypeOf((*runtime.Object)(nil)).Elem() + rawExtensionObjectType = reflect.TypeOf(runtime.RawExtension{}) +) // LenList returns the length of this list or 0 if it is not a list. func LenList(list runtime.Object) int { @@ -237,7 +304,7 @@ func SetList(list runtime.Object, objects []runtime.Object) error { slice := reflect.MakeSlice(items.Type(), len(objects), len(objects)) for i := range objects { dest := slice.Index(i) - if dest.Type() == reflect.TypeOf(runtime.RawExtension{}) { + if dest.Type() == rawExtensionObjectType { dest = dest.FieldByName("Object") } diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS index d1c9f53074d5..063fd285dad1 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS @@ -2,7 +2,6 @@ reviewers: - thockin - - lavalamp - smarterclayton - wojtek-t - derekwaynecarr diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto index 48955dca85bb..a2cd8015fb5f 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto @@ -425,8 +425,6 @@ message LabelSelector { // relates the key and values. message LabelSelectorRequirement { // key is the label key that the selector applies to. - // +patchMergeKey=key - // +patchStrategy=merge optional string key = 1; // operator represents a key's relationship to a set of values. diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go index 352d58ebc24c..dea82839ce5a 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go @@ -1207,9 +1207,7 @@ type LabelSelector struct { // relates the key and values. type LabelSelectorRequirement struct { // key is the label key that the selector applies to. - // +patchMergeKey=key - // +patchStrategy=merge - Key string `json:"key" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,1,opt,name=key"` + Key string `json:"key" protobuf:"bytes,1,opt,name=key"` // operator represents a key's relationship to a set of values. // Valid operators are In, NotIn, Exists and DoesNotExist. Operator LabelSelectorOperator `json:"operator" protobuf:"bytes,2,opt,name=operator,casttype=LabelSelectorOperator"` diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go index a499eee8ebb8..40d289f3750c 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go @@ -101,6 +101,11 @@ func (obj *Unstructured) EachListItem(fn func(runtime.Object) error) error { return nil } +func (obj *Unstructured) EachListItemWithAlloc(fn func(runtime.Object) error) error { + // EachListItem has allocated a new Object for the user, we can use it directly. + return obj.EachListItem(fn) +} + func (obj *Unstructured) UnstructuredContent() map[string]interface{} { if obj.Object == nil { return make(map[string]interface{}) diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go index 5028f5fb57a3..82beda2a29c2 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go @@ -52,6 +52,15 @@ func (u *UnstructuredList) EachListItem(fn func(runtime.Object) error) error { return nil } +func (u *UnstructuredList) EachListItemWithAlloc(fn func(runtime.Object) error) error { + for i := range u.Items { + if err := fn(&Unstructured{Object: u.Items[i].Object}); err != nil { + return err + } + } + return nil +} + // NewEmptyInstance returns a new instance of the concrete type containing only kind/apiVersion and no other data. // This should be called instead of reflect.New() for unstructured types because the go type alone does not preserve kind/apiVersion info. func (u *UnstructuredList) NewEmptyInstance() runtime.Unstructured { diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/codec.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/codec.go index 7fc513dd0e7f..73f85286c235 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/codec.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/codec.go @@ -45,7 +45,6 @@ func NewCodec(e Encoder, d Decoder) Codec { // Encode is a convenience wrapper for encoding to a []byte from an Encoder func Encode(e Encoder, obj Object) ([]byte, error) { - // TODO: reuse buffer buf := &bytes.Buffer{} if err := e.Encode(obj, buf); err != nil { return nil, err diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go index 710a977952f2..e89ea8939178 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go @@ -365,4 +365,9 @@ type Unstructured interface { // error should terminate the iteration. If IsList() returns false, this method should return an error // instead of calling the provided function. EachListItem(func(Object) error) error + // EachListItemWithAlloc works like EachListItem, but avoids retaining references to a slice of items. + // It does this by making a shallow copy of non-pointer items before passing them to fn. + // + // If the items passed to fn are not retained, or are retained for the same duration, use EachListItem instead for memory efficiency. + EachListItemWithAlloc(func(Object) error) error } diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go index 54ccb7a74c77..d1c37c9429c9 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go @@ -39,7 +39,7 @@ func ParseResourceArg(arg string) (*GroupVersionResource, GroupResource) { // ParseKindArg takes the common style of string which may be either `Kind.group.com` or `Kind.version.group.com` // and parses it out into both possibilities. This code takes no responsibility for knowing which representation was intended // but with a knowledge of all GroupKinds, calling code can take a very good guess. If there are only two segments, then -// `*GroupVersionResource` is nil. +// `*GroupVersionKind` is nil. // `Kind.group.com` -> `group=com, version=group, kind=Kind` and `group=group.com, kind=Kind` func ParseKindArg(arg string) (*GroupVersionKind, GroupKind) { var gvk *GroupVersionKind diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/splice.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/splice.go new file mode 100644 index 000000000000..2badb7b97f3a --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/splice.go @@ -0,0 +1,76 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + "bytes" + "io" +) + +// Splice is the interface that wraps the Splice method. +// +// Splice moves data from given slice without copying the underlying data for +// efficiency purpose. Therefore, the caller should make sure the underlying +// data is not changed later. +type Splice interface { + Splice([]byte) + io.Writer + Reset() + Bytes() []byte +} + +// A spliceBuffer implements Splice and io.Writer interfaces. +type spliceBuffer struct { + raw []byte + buf *bytes.Buffer +} + +func NewSpliceBuffer() Splice { + return &spliceBuffer{} +} + +// Splice implements the Splice interface. +func (sb *spliceBuffer) Splice(raw []byte) { + sb.raw = raw +} + +// Write implements the io.Writer interface. +func (sb *spliceBuffer) Write(p []byte) (n int, err error) { + if sb.buf == nil { + sb.buf = &bytes.Buffer{} + } + return sb.buf.Write(p) +} + +// Reset resets the buffer to be empty. +func (sb *spliceBuffer) Reset() { + if sb.buf != nil { + sb.buf.Reset() + } + sb.raw = nil +} + +// Bytes returns the data held by the buffer. +func (sb *spliceBuffer) Bytes() []byte { + if sb.buf != nil && len(sb.buf.Bytes()) > 0 { + return sb.buf.Bytes() + } + if sb.raw != nil { + return sb.raw + } + return []byte{} +} diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/types/namespacedname.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/types/namespacedname.go index 29fb4f950a40..db18ce1ce21b 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/types/namespacedname.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/types/namespacedname.go @@ -41,7 +41,8 @@ func (n NamespacedName) String() string { // MarshalLog emits a struct containing required key/value pair func (n NamespacedName) MarshalLog() interface{} { return struct { - Name, Namespace string + Name string `json:"name"` + Namespace string `json:"namespace,omitempty"` }{ Name: n.Name, Namespace: n.Namespace, diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/cache/expiring.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/cache/expiring.go index 0d2f153bf9eb..1396274c7bf9 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/cache/expiring.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/cache/expiring.go @@ -40,6 +40,13 @@ func NewExpiringWithClock(clock clock.Clock) *Expiring { // Expiring is a map whose entries expire after a per-entry timeout. type Expiring struct { + // AllowExpiredGet causes the expiration check to be skipped on Get. + // It should only be used when a key always corresponds to the exact same value. + // Thus when this field is true, expired keys are considered valid + // until the next call to Set (which causes the GC to run). + // It may not be changed concurrently with calls to Get. + AllowExpiredGet bool + clock clock.Clock // mu protects the below fields @@ -70,7 +77,10 @@ func (c *Expiring) Get(key interface{}) (val interface{}, ok bool) { c.mu.RLock() defer c.mu.RUnlock() e, ok := c.cache[key] - if !ok || !c.clock.Now().Before(e.expiry) { + if !ok { + return nil, false + } + if !c.AllowExpiredGet && !c.clock.Now().Before(e.expiry) { return nil, false } return e.val, true diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/diff/diff.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/diff/diff.go index ec4002e38a26..fc0301844906 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/diff/diff.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/diff/diff.go @@ -23,34 +23,20 @@ import ( "strings" "text/tabwriter" - "github.com/davecgh/go-spew/spew" "github.com/google/go-cmp/cmp" + "k8s.io/apimachinery/pkg/util/dump" ) -// StringDiff diffs a and b and returns a human readable diff. -func StringDiff(a, b string) string { - ba := []byte(a) - bb := []byte(b) - out := []byte{} - i := 0 - for ; i < len(ba) && i < len(bb); i++ { - if ba[i] != bb[i] { - break - } - out = append(out, ba[i]) - } - out = append(out, []byte("\n\nA: ")...) - out = append(out, ba[i:]...) - out = append(out, []byte("\n\nB: ")...) - out = append(out, bb[i:]...) - out = append(out, []byte("\n\n")...) - return string(out) -} - func legacyDiff(a, b interface{}) string { return cmp.Diff(a, b) } +// StringDiff diffs a and b and returns a human readable diff. +// DEPRECATED: use github.com/google/go-cmp/cmp.Diff +func StringDiff(a, b string) string { + return legacyDiff(a, b) +} + // ObjectDiff prints the diff of two go objects and fails if the objects // contain unhandled unexported fields. // DEPRECATED: use github.com/google/go-cmp/cmp.Diff @@ -75,13 +61,8 @@ func ObjectReflectDiff(a, b interface{}) string { // ObjectGoPrintSideBySide prints a and b as textual dumps side by side, // enabling easy visual scanning for mismatches. func ObjectGoPrintSideBySide(a, b interface{}) string { - s := spew.ConfigState{ - Indent: " ", - // Extra deep spew. - DisableMethods: true, - } - sA := s.Sdump(a) - sB := s.Sdump(b) + sA := dump.Pretty(a) + sB := dump.Pretty(b) linesA := strings.Split(sA, "\n") linesB := strings.Split(sB, "\n") diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/dump/dump.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/dump/dump.go new file mode 100644 index 000000000000..cf61ef76aed3 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/dump/dump.go @@ -0,0 +1,54 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dump + +import ( + "github.com/davecgh/go-spew/spew" +) + +var prettyPrintConfig = &spew.ConfigState{ + Indent: " ", + DisableMethods: true, + DisablePointerAddresses: true, + DisableCapacities: true, +} + +// The config MUST NOT be changed because that could change the result of a hash operation +var prettyPrintConfigForHash = &spew.ConfigState{ + Indent: " ", + SortKeys: true, + DisableMethods: true, + SpewKeys: true, + DisablePointerAddresses: true, + DisableCapacities: true, +} + +// Pretty wrap the spew.Sdump with Indent, and disabled methods like error() and String() +// The output may change over time, so for guaranteed output please take more direct control +func Pretty(a interface{}) string { + return prettyPrintConfig.Sdump(a) +} + +// ForHash keeps the original Spew.Sprintf format to ensure the same checksum +func ForHash(a interface{}) string { + return prettyPrintConfigForHash.Sprintf("%#v", a) +} + +// OneLine outputs the object in one line +func OneLine(a interface{}) string { + return prettyPrintConfig.Sprintf("%#v", a) +} diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go index 27c3d2d56451..7fe52ee568ed 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go @@ -23,7 +23,7 @@ import ( "encoding/base64" "errors" "fmt" - "io/ioutil" + "io" "net" "net/http" "net/http/httputil" @@ -337,7 +337,7 @@ func (s *SpdyRoundTripper) NewConnection(resp *http.Response) (httpstream.Connec if (resp.StatusCode != http.StatusSwitchingProtocols) || !strings.Contains(connectionHeader, strings.ToLower(httpstream.HeaderUpgrade)) || !strings.Contains(upgradeHeader, strings.ToLower(HeaderSpdy31)) { defer resp.Body.Close() responseError := "" - responseErrorBytes, err := ioutil.ReadAll(resp.Body) + responseErrorBytes, err := io.ReadAll(resp.Body) if err != nil { responseError = "unable to read error from server response" } else { diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go index 5e8009704535..0ea88156bef1 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go @@ -54,7 +54,7 @@ const ( // FromInt creates an IntOrString object with an int32 value. It is // your responsibility not to call this method with a value greater // than int32. -// TODO: convert to (val int32) +// Deprecated: use FromInt32 instead. func FromInt(val int) IntOrString { if val > math.MaxInt32 || val < math.MinInt32 { klog.Errorf("value: %d overflows int32\n%s\n", val, debug.Stack()) @@ -62,6 +62,11 @@ func FromInt(val int) IntOrString { return IntOrString{Type: Int, IntVal: int32(val)} } +// FromInt32 creates an IntOrString object with an int32 value. +func FromInt32(val int32) IntOrString { + return IntOrString{Type: Int, IntVal: val} +} + // FromString creates an IntOrString object with a string value. func FromString(val string) IntOrString { return IntOrString{Type: String, StrVal: val} diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/fieldmanager.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/fieldmanager.go index f3111d4bc723..eca04a711638 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/fieldmanager.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/fieldmanager.go @@ -56,17 +56,20 @@ func NewFieldManager(f Manager, subresource string) *FieldManager { // newDefaultFieldManager is a helper function which wraps a Manager with certain default logic. func NewDefaultFieldManager(f Manager, typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectCreater runtime.ObjectCreater, kind schema.GroupVersionKind, subresource string) *FieldManager { return NewFieldManager( - NewLastAppliedUpdater( - NewLastAppliedManager( - NewProbabilisticSkipNonAppliedManager( - NewCapManagersManager( - NewBuildManagerInfoManager( - NewManagedFieldsUpdater( - NewStripMetaManager(f), - ), kind.GroupVersion(), subresource, - ), DefaultMaxUpdateManagers, - ), objectCreater, kind, DefaultTrackOnCreateProbability, - ), typeConverter, objectConverter, kind.GroupVersion()), + NewVersionCheckManager( + NewLastAppliedUpdater( + NewLastAppliedManager( + NewProbabilisticSkipNonAppliedManager( + NewCapManagersManager( + NewBuildManagerInfoManager( + NewManagedFieldsUpdater( + NewStripMetaManager(f), + ), kind.GroupVersion(), subresource, + ), DefaultMaxUpdateManagers, + ), objectCreater, DefaultTrackOnCreateProbability, + ), typeConverter, objectConverter, kind.GroupVersion(), + ), + ), kind, ), subresource, ) } diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/skipnonapplied.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/skipnonapplied.go index 6b281ec1e575..f24c040edd03 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/skipnonapplied.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/skipnonapplied.go @@ -22,13 +22,11 @@ import ( "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" ) type skipNonAppliedManager struct { fieldManager Manager objectCreater runtime.ObjectCreater - gvk schema.GroupVersionKind beforeApplyManagerName string probability float32 } @@ -36,17 +34,16 @@ type skipNonAppliedManager struct { var _ Manager = &skipNonAppliedManager{} // NewSkipNonAppliedManager creates a new wrapped FieldManager that only starts tracking managers after the first apply. -func NewSkipNonAppliedManager(fieldManager Manager, objectCreater runtime.ObjectCreater, gvk schema.GroupVersionKind) Manager { - return NewProbabilisticSkipNonAppliedManager(fieldManager, objectCreater, gvk, 0.0) +func NewSkipNonAppliedManager(fieldManager Manager, objectCreater runtime.ObjectCreater) Manager { + return NewProbabilisticSkipNonAppliedManager(fieldManager, objectCreater, 0.0) } // NewProbabilisticSkipNonAppliedManager creates a new wrapped FieldManager that starts tracking managers after the first apply, // or starts tracking on create with p probability. -func NewProbabilisticSkipNonAppliedManager(fieldManager Manager, objectCreater runtime.ObjectCreater, gvk schema.GroupVersionKind, p float32) Manager { +func NewProbabilisticSkipNonAppliedManager(fieldManager Manager, objectCreater runtime.ObjectCreater, p float32) Manager { return &skipNonAppliedManager{ fieldManager: fieldManager, objectCreater: objectCreater, - gvk: gvk, beforeApplyManagerName: "before-first-apply", probability: p, } @@ -78,9 +75,10 @@ func (f *skipNonAppliedManager) Update(liveObj, newObj runtime.Object, managed M // Apply implements Manager. func (f *skipNonAppliedManager) Apply(liveObj, appliedObj runtime.Object, managed Managed, fieldManager string, force bool) (runtime.Object, Managed, error) { if len(managed.Fields()) == 0 { - emptyObj, err := f.objectCreater.New(f.gvk) + gvk := appliedObj.GetObjectKind().GroupVersionKind() + emptyObj, err := f.objectCreater.New(gvk) if err != nil { - return nil, nil, fmt.Errorf("failed to create empty object of type %v: %v", f.gvk, err) + return nil, nil, fmt.Errorf("failed to create empty object of type %v: %v", gvk, err) } liveObj, managed, err = f.fieldManager.Update(emptyObj, liveObj, managed, f.beforeApplyManagerName) if err != nil { diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/structuredmerge.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/structuredmerge.go index eb5598ac3bfa..2112c9ab7e90 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/structuredmerge.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/structuredmerge.go @@ -41,6 +41,9 @@ var _ Manager = &structuredMergeManager{} // NewStructuredMergeManager creates a new Manager that merges apply requests // and update managed fields for other types of requests. func NewStructuredMergeManager(typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, gv schema.GroupVersion, hub schema.GroupVersion, resetFields map[fieldpath.APIVersion]*fieldpath.Set) (Manager, error) { + if typeConverter == nil { + return nil, fmt.Errorf("typeconverter must not be nil") + } return &structuredMergeManager{ typeConverter: typeConverter, objectConverter: objectConverter, diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/versioncheck.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/versioncheck.go new file mode 100644 index 000000000000..ee1e2bca7017 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/versioncheck.go @@ -0,0 +1,52 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +type versionCheckManager struct { + fieldManager Manager + gvk schema.GroupVersionKind +} + +var _ Manager = &versionCheckManager{} + +// NewVersionCheckManager creates a manager that makes sure that the +// applied object is in the proper version. +func NewVersionCheckManager(fieldManager Manager, gvk schema.GroupVersionKind) Manager { + return &versionCheckManager{fieldManager: fieldManager, gvk: gvk} +} + +// Update implements Manager. +func (f *versionCheckManager) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) { + // Nothing to do for updates, this is checked in many other places. + return f.fieldManager.Update(liveObj, newObj, managed, manager) +} + +// Apply implements Manager. +func (f *versionCheckManager) Apply(liveObj, appliedObj runtime.Object, managed Managed, fieldManager string, force bool) (runtime.Object, Managed, error) { + if gvk := appliedObj.GetObjectKind().GroupVersionKind(); gvk != f.gvk { + return nil, nil, errors.NewBadRequest(fmt.Sprintf("invalid object type: %v", gvk)) + } + return f.fieldManager.Apply(liveObj, appliedObj, managed, fieldManager, force) +} diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go index a20efd187159..25626cf3af2a 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go @@ -20,7 +20,7 @@ import ( "fmt" "reflect" - "github.com/davecgh/go-spew/spew" + "k8s.io/apimachinery/pkg/util/dump" "sigs.k8s.io/yaml" ) @@ -76,7 +76,7 @@ func ToYAMLOrError(v interface{}) string { func toYAML(v interface{}) (string, error) { y, err := yaml.Marshal(v) if err != nil { - return "", fmt.Errorf("yaml marshal failed:%v\n%v\n", err, spew.Sdump(v)) + return "", fmt.Errorf("yaml marshal failed:%v\n%v\n", err, dump.Pretty(v)) } return string(y), nil diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/net/util.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/net/util.go index 1c2aba55f7bf..1635e69a5c00 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/net/util.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/net/util.go @@ -20,6 +20,7 @@ import ( "errors" "net" "reflect" + "strings" "syscall" ) @@ -47,6 +48,11 @@ func IsConnectionReset(err error) bool { return false } +// Returns if the given err is "http2: client connection lost" error. +func IsHTTP2ConnectionLost(err error) bool { + return err != nil && strings.Contains(err.Error(), "http2: client connection lost") +} + // Returns if the given err is "connection refused" error func IsConnectionRefused(err error) bool { var errno syscall.Errno diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/proxy/transport.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/proxy/transport.go index 489d9b042641..5a2dd6e14c87 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/proxy/transport.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/proxy/transport.go @@ -22,7 +22,6 @@ import ( "compress/gzip" "fmt" "io" - "io/ioutil" "net/http" "net/url" "path" @@ -263,7 +262,7 @@ func (t *Transport) rewriteResponse(req *http.Request, resp *http.Response) (*ht return resp, err } - resp.Body = ioutil.NopCloser(newContent) + resp.Body = io.NopCloser(newContent) // Update header node with new content-length // TODO: Remove any hash/signature headers here? resp.Header.Del("Content-Length") diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/proxy/upgradeaware.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/proxy/upgradeaware.go index a5bb58575807..ac2ada5472c3 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/proxy/upgradeaware.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/proxy/upgradeaware.go @@ -21,7 +21,6 @@ import ( "bytes" "fmt" "io" - "io/ioutil" "log" "net" "net/http" @@ -148,7 +147,7 @@ func (onewayRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { return &http.Response{ Status: "200 OK", StatusCode: http.StatusOK, - Body: ioutil.NopCloser(&bytes.Buffer{}), + Body: io.NopCloser(&bytes.Buffer{}), Request: req, }, nil } diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go index 3ee683b99701..920c113bbd73 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go @@ -1182,7 +1182,13 @@ func mergePatchIntoOriginal(original, patch map[string]interface{}, schema Looku merged = originalFieldValue case !foundOriginal && foundPatch: // list was added - merged = patchFieldValue + v, keep := removeDirectives(patchFieldValue) + if !keep { + // Shouldn't be possible since patchFieldValue is a slice + continue + } + + merged = v.([]interface{}) case foundOriginal && foundPatch: merged, err = mergeSliceHandler(originalList, patchList, subschema, patchStrategy, patchMeta.GetPatchMergeKey(), false, mergeOptions) @@ -1270,6 +1276,42 @@ func partitionMapsByPresentInList(original, partitionBy []interface{}, mergeKey return patch, serverOnly, nil } +// Removes directives from an object and returns value to use instead and whether +// or not the field/index should even be kept +// May modify input +func removeDirectives(obj interface{}) (interface{}, bool) { + if obj == nil { + return obj, true + } else if typedV, ok := obj.(map[string]interface{}); ok { + if _, hasDirective := typedV[directiveMarker]; hasDirective { + return nil, false + } + + for k, v := range typedV { + var keep bool + typedV[k], keep = removeDirectives(v) + if !keep { + delete(typedV, k) + } + } + return typedV, true + } else if typedV, ok := obj.([]interface{}); ok { + var res []interface{} + if typedV != nil { + // Make sure res is non-nil if patch is non-nil + res = []interface{}{} + } + for _, v := range typedV { + if newV, keep := removeDirectives(v); keep { + res = append(res, newV) + } + } + return res, true + } else { + return obj, true + } +} + // Merge fields from a patch map into the original map. Note: This may modify // both the original map and the patch because getting a deep copy of a map in // golang is highly non-trivial. @@ -1333,7 +1375,10 @@ func mergeMap(original, patch map[string]interface{}, schema LookupPatchMeta, me if mergeOptions.IgnoreUnmatchedNulls { discardNullValuesFromPatch(patchV) } - original[k] = patchV + original[k], ok = removeDirectives(patchV) + if !ok { + delete(original, k) + } } continue } @@ -1345,7 +1390,10 @@ func mergeMap(original, patch map[string]interface{}, schema LookupPatchMeta, me if mergeOptions.IgnoreUnmatchedNulls { discardNullValuesFromPatch(patchV) } - original[k] = patchV + original[k], ok = removeDirectives(patchV) + if !ok { + delete(original, k) + } } continue } @@ -1372,7 +1420,11 @@ func mergeMap(original, patch map[string]interface{}, schema LookupPatchMeta, me } original[k], err = mergeSliceHandler(original[k], patchV, subschema, patchStrategy, patchMeta.GetPatchMergeKey(), isDeleteList, mergeOptions) default: - original[k] = patchV + original[k], ok = removeDirectives(patchV) + if !ok { + // if patchV itself is a directive, then don't keep it + delete(original, k) + } } if err != nil { return nil, err @@ -1425,7 +1477,8 @@ func mergeSliceHandler(original, patch interface{}, schema LookupPatchMeta, return nil, err } - if fieldPatchStrategy == mergeDirective { + // Delete lists are handled the same way regardless of what the field's patch strategy is + if fieldPatchStrategy == mergeDirective || isDeleteList { return mergeSlice(typedOriginal, typedPatch, schema, fieldPatchMergeKey, mergeOptions, isDeleteList) } else { return typedPatch, nil diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/version/version.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/version/version.go index 8c997ec4502b..4c6195695336 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/version/version.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/version/version.go @@ -121,6 +121,11 @@ func MustParseSemantic(str string) *Version { return v } +// MajorMinor returns a version with the provided major and minor version. +func MajorMinor(major, minor uint) *Version { + return &Version{components: []uint{major, minor}} +} + // Major returns the major release number func (v *Version) Major() uint { return v.components[0] diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/wait/poll.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/wait/poll.go index 32e8688ca0f0..231d4c384239 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/wait/poll.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/wait/poll.go @@ -38,10 +38,10 @@ func PollUntilContextCancel(ctx context.Context, interval time.Duration, immedia // a deadline and is equivalent to: // // deadlineCtx, deadlineCancel := context.WithTimeout(ctx, timeout) -// err := PollUntilContextCancel(ctx, interval, immediate, condition) +// err := PollUntilContextCancel(deadlineCtx, interval, immediate, condition) // // The deadline context will be cancelled if the Poll succeeds before the timeout, simplifying -// inline usage. All other behavior is identical to PollWithContextTimeout. +// inline usage. All other behavior is identical to PollUntilContextCancel. func PollUntilContextTimeout(ctx context.Context, interval, timeout time.Duration, immediate bool, condition ConditionWithContextFunc) error { deadlineCtx, deadlineCancel := context.WithTimeout(ctx, timeout) defer deadlineCancel() @@ -59,7 +59,7 @@ func PollUntilContextTimeout(ctx context.Context, interval, timeout time.Duratio // // If you want to Poll something forever, see PollInfinite. // -// Deprecated: This method does not return errors from context, use PollWithContextTimeout. +// Deprecated: This method does not return errors from context, use PollUntilContextTimeout. // Note that the new method will no longer return ErrWaitTimeout and instead return errors // defined by the context package. Will be removed in a future release. func Poll(interval, timeout time.Duration, condition ConditionFunc) error { @@ -78,7 +78,7 @@ func Poll(interval, timeout time.Duration, condition ConditionFunc) error { // // If you want to Poll something forever, see PollInfinite. // -// Deprecated: This method does not return errors from context, use PollWithContextTimeout. +// Deprecated: This method does not return errors from context, use PollUntilContextTimeout. // Note that the new method will no longer return ErrWaitTimeout and instead return errors // defined by the context package. Will be removed in a future release. func PollWithContext(ctx context.Context, interval, timeout time.Duration, condition ConditionWithContextFunc) error { @@ -91,7 +91,7 @@ func PollWithContext(ctx context.Context, interval, timeout time.Duration, condi // PollUntil always waits interval before the first run of 'condition'. // 'condition' will always be invoked at least once. // -// Deprecated: This method does not return errors from context, use PollWithContextCancel. +// Deprecated: This method does not return errors from context, use PollUntilContextCancel. // Note that the new method will no longer return ErrWaitTimeout and instead return errors // defined by the context package. Will be removed in a future release. func PollUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error { @@ -104,7 +104,7 @@ func PollUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan st // PollUntilWithContext always waits interval before the first run of 'condition'. // 'condition' will always be invoked at least once. // -// Deprecated: This method does not return errors from context, use PollWithContextCancel. +// Deprecated: This method does not return errors from context, use PollUntilContextCancel. // Note that the new method will no longer return ErrWaitTimeout and instead return errors // defined by the context package. Will be removed in a future release. func PollUntilWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error { @@ -118,7 +118,7 @@ func PollUntilWithContext(ctx context.Context, interval time.Duration, condition // Some intervals may be missed if the condition takes too long or the time // window is too short. // -// Deprecated: This method does not return errors from context, use PollWithContextCancel. +// Deprecated: This method does not return errors from context, use PollUntilContextCancel. // Note that the new method will no longer return ErrWaitTimeout and instead return errors // defined by the context package. Will be removed in a future release. func PollInfinite(interval time.Duration, condition ConditionFunc) error { @@ -132,7 +132,7 @@ func PollInfinite(interval time.Duration, condition ConditionFunc) error { // Some intervals may be missed if the condition takes too long or the time // window is too short. // -// Deprecated: This method does not return errors from context, use PollWithContextCancel. +// Deprecated: This method does not return errors from context, use PollUntilContextCancel. // Note that the new method will no longer return ErrWaitTimeout and instead return errors // defined by the context package. Will be removed in a future release. func PollInfiniteWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error { @@ -150,7 +150,7 @@ func PollInfiniteWithContext(ctx context.Context, interval time.Duration, condit // // If you want to immediately Poll something forever, see PollImmediateInfinite. // -// Deprecated: This method does not return errors from context, use PollWithContextTimeout. +// Deprecated: This method does not return errors from context, use PollUntilContextTimeout. // Note that the new method will no longer return ErrWaitTimeout and instead return errors // defined by the context package. Will be removed in a future release. func PollImmediate(interval, timeout time.Duration, condition ConditionFunc) error { @@ -168,7 +168,7 @@ func PollImmediate(interval, timeout time.Duration, condition ConditionFunc) err // // If you want to immediately Poll something forever, see PollImmediateInfinite. // -// Deprecated: This method does not return errors from context, use PollWithContextTimeout. +// Deprecated: This method does not return errors from context, use PollUntilContextTimeout. // Note that the new method will no longer return ErrWaitTimeout and instead return errors // defined by the context package. Will be removed in a future release. func PollImmediateWithContext(ctx context.Context, interval, timeout time.Duration, condition ConditionWithContextFunc) error { @@ -180,7 +180,7 @@ func PollImmediateWithContext(ctx context.Context, interval, timeout time.Durati // PollImmediateUntil runs the 'condition' before waiting for the interval. // 'condition' will always be invoked at least once. // -// Deprecated: This method does not return errors from context, use PollWithContextCancel. +// Deprecated: This method does not return errors from context, use PollUntilContextCancel. // Note that the new method will no longer return ErrWaitTimeout and instead return errors // defined by the context package. Will be removed in a future release. func PollImmediateUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error { @@ -193,7 +193,7 @@ func PollImmediateUntil(interval time.Duration, condition ConditionFunc, stopCh // PollImmediateUntilWithContext runs the 'condition' before waiting for the interval. // 'condition' will always be invoked at least once. // -// Deprecated: This method does not return errors from context, use PollWithContextCancel. +// Deprecated: This method does not return errors from context, use PollUntilContextCancel. // Note that the new method will no longer return ErrWaitTimeout and instead return errors // defined by the context package. Will be removed in a future release. func PollImmediateUntilWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error { @@ -207,7 +207,7 @@ func PollImmediateUntilWithContext(ctx context.Context, interval time.Duration, // Some intervals may be missed if the condition takes too long or the time // window is too short. // -// Deprecated: This method does not return errors from context, use PollWithContextCancel. +// Deprecated: This method does not return errors from context, use PollUntilContextCancel. // Note that the new method will no longer return ErrWaitTimeout and instead return errors // defined by the context package. Will be removed in a future release. func PollImmediateInfinite(interval time.Duration, condition ConditionFunc) error { @@ -222,7 +222,7 @@ func PollImmediateInfinite(interval time.Duration, condition ConditionFunc) erro // Some intervals may be missed if the condition takes too long or the time // window is too short. // -// Deprecated: This method does not return errors from context, use PollWithContextCancel. +// Deprecated: This method does not return errors from context, use PollUntilContextCancel. // Note that the new method will no longer return ErrWaitTimeout and instead return errors // defined by the context package. Will be removed in a future release. func PollImmediateInfiniteWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error { diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/compile.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/compile.go index bb122de5fafe..745e46aa9c58 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/compile.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/compile.go @@ -18,12 +18,13 @@ package cel import ( "fmt" - celconfig "k8s.io/apiserver/pkg/apis/cel" - "sync" "github.com/google/cel-go/cel" + "k8s.io/apimachinery/pkg/util/version" + celconfig "k8s.io/apiserver/pkg/apis/cel" apiservercel "k8s.io/apiserver/pkg/cel" + "k8s.io/apiserver/pkg/cel/environment" "k8s.io/apiserver/pkg/cel/library" ) @@ -36,104 +37,6 @@ const ( RequestResourceAuthorizerVarName = "authorizer.requestResource" ) -var ( - initEnvsOnce sync.Once - initEnvs envs - initEnvsErr error -) - -func getEnvs() (envs, error) { - initEnvsOnce.Do(func() { - requiredVarsEnv, err := buildRequiredVarsEnv() - if err != nil { - initEnvsErr = err - return - } - - initEnvs, err = buildWithOptionalVarsEnvs(requiredVarsEnv) - if err != nil { - initEnvsErr = err - return - } - }) - return initEnvs, initEnvsErr -} - -// This is a similar code as in k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel/compilation.go -// If any changes are made here, consider to make the same changes there as well. -func buildBaseEnv() (*cel.Env, error) { - var opts []cel.EnvOption - opts = append(opts, cel.HomogeneousAggregateLiterals()) - // Validate function declarations once during base env initialization, - // so they don't need to be evaluated each time a CEL rule is compiled. - // This is a relatively expensive operation. - opts = append(opts, cel.EagerlyValidateDeclarations(true), cel.DefaultUTCTimeZone(true)) - opts = append(opts, library.ExtensionLibs...) - - return cel.NewEnv(opts...) -} - -func buildRequiredVarsEnv() (*cel.Env, error) { - baseEnv, err := buildBaseEnv() - if err != nil { - return nil, err - } - var propDecls []cel.EnvOption - reg := apiservercel.NewRegistry(baseEnv) - - requestType := BuildRequestType() - rt, err := apiservercel.NewRuleTypes(requestType.TypeName(), requestType, reg) - if err != nil { - return nil, err - } - if rt == nil { - return nil, nil - } - opts, err := rt.EnvOptions(baseEnv.TypeProvider()) - if err != nil { - return nil, err - } - propDecls = append(propDecls, cel.Variable(ObjectVarName, cel.DynType)) - propDecls = append(propDecls, cel.Variable(OldObjectVarName, cel.DynType)) - propDecls = append(propDecls, cel.Variable(RequestVarName, requestType.CelType())) - - opts = append(opts, propDecls...) - env, err := baseEnv.Extend(opts...) - if err != nil { - return nil, err - } - return env, nil -} - -type envs map[OptionalVariableDeclarations]*cel.Env - -func buildEnvWithVars(baseVarsEnv *cel.Env, options OptionalVariableDeclarations) (*cel.Env, error) { - var opts []cel.EnvOption - if options.HasParams { - opts = append(opts, cel.Variable(ParamsVarName, cel.DynType)) - } - if options.HasAuthorizer { - opts = append(opts, cel.Variable(AuthorizerVarName, library.AuthorizerType)) - opts = append(opts, cel.Variable(RequestResourceAuthorizerVarName, library.ResourceCheckType)) - } - return baseVarsEnv.Extend(opts...) -} - -func buildWithOptionalVarsEnvs(requiredVarsEnv *cel.Env) (envs, error) { - envs := make(envs, 4) // since the number of variable combinations is small, pre-build a environment for each - for _, hasParams := range []bool{false, true} { - for _, hasAuthorizer := range []bool{false, true} { - opts := OptionalVariableDeclarations{HasParams: hasParams, HasAuthorizer: hasAuthorizer} - env, err := buildEnvWithVars(requiredVarsEnv, opts) - if err != nil { - return nil, err - } - envs[opts] = env - } - } - return envs, nil -} - // BuildRequestType generates a DeclType for AdmissionRequest. This may be replaced with a utility that // converts the native type definition to apiservercel.DeclType once such a utility becomes available. // The 'uid' field is omitted since it is not needed for in-process admission review. @@ -188,40 +91,43 @@ type CompilationResult struct { ExpressionAccessor ExpressionAccessor } +// Compiler provides a CEL expression compiler configured with the desired admission related CEL variables and +// environment mode. +type Compiler interface { + CompileCELExpression(expressionAccessor ExpressionAccessor, options OptionalVariableDeclarations, mode environment.Type) CompilationResult +} + +type compiler struct { + varEnvs variableDeclEnvs +} + +func NewCompiler(env *environment.EnvSet) Compiler { + return &compiler{varEnvs: mustBuildEnvs(env)} +} + +type variableDeclEnvs map[OptionalVariableDeclarations]*environment.EnvSet + // CompileCELExpression returns a compiled CEL expression. // perCallLimit was added for testing purpose only. Callers should always use const PerCallLimit from k8s.io/apiserver/pkg/apis/cel/config.go as input. -func CompileCELExpression(expressionAccessor ExpressionAccessor, optionalVars OptionalVariableDeclarations, perCallLimit uint64) CompilationResult { - var env *cel.Env - envs, err := getEnvs() - if err != nil { +func (c compiler) CompileCELExpression(expressionAccessor ExpressionAccessor, options OptionalVariableDeclarations, envType environment.Type) CompilationResult { + resultError := func(errorString string, errType apiservercel.ErrorType) CompilationResult { return CompilationResult{ Error: &apiservercel.Error{ - Type: apiservercel.ErrorTypeInternal, - Detail: "compiler initialization failed: " + err.Error(), + Type: errType, + Detail: errorString, }, ExpressionAccessor: expressionAccessor, } } - env, ok := envs[optionalVars] - if !ok { - return CompilationResult{ - Error: &apiservercel.Error{ - Type: apiservercel.ErrorTypeInvalid, - Detail: fmt.Sprintf("compiler initialization failed: failed to load environment for %v", optionalVars), - }, - ExpressionAccessor: expressionAccessor, - } + + env, err := c.varEnvs[options].Env(envType) + if err != nil { + return resultError(fmt.Sprintf("unexpected error loading CEL environment: %v", err), apiservercel.ErrorTypeInternal) } ast, issues := env.Compile(expressionAccessor.GetExpression()) if issues != nil { - return CompilationResult{ - Error: &apiservercel.Error{ - Type: apiservercel.ErrorTypeInvalid, - Detail: "compilation failed: " + issues.String(), - }, - ExpressionAccessor: expressionAccessor, - } + return resultError("compilation failed: "+issues.String(), apiservercel.ErrorTypeInvalid) } found := false returnTypes := expressionAccessor.ReturnTypes() @@ -239,43 +145,61 @@ func CompileCELExpression(expressionAccessor ExpressionAccessor, optionalVars Op reason = fmt.Sprintf("must evaluate to one of %v", returnTypes) } - return CompilationResult{ - Error: &apiservercel.Error{ - Type: apiservercel.ErrorTypeInvalid, - Detail: reason, - }, - ExpressionAccessor: expressionAccessor, - } + return resultError(reason, apiservercel.ErrorTypeInvalid) } _, err = cel.AstToCheckedExpr(ast) if err != nil { // should be impossible since env.Compile returned no issues - return CompilationResult{ - Error: &apiservercel.Error{ - Type: apiservercel.ErrorTypeInternal, - Detail: "unexpected compilation error: " + err.Error(), - }, - ExpressionAccessor: expressionAccessor, - } + return resultError("unexpected compilation error: "+err.Error(), apiservercel.ErrorTypeInternal) } prog, err := env.Program(ast, - cel.EvalOptions(cel.OptOptimize, cel.OptTrackCost), - cel.OptimizeRegex(library.ExtensionLibRegexOptimizations...), cel.InterruptCheckFrequency(celconfig.CheckFrequency), - cel.CostLimit(perCallLimit), ) if err != nil { - return CompilationResult{ - Error: &apiservercel.Error{ - Type: apiservercel.ErrorTypeInvalid, - Detail: "program instantiation failed: " + err.Error(), - }, - ExpressionAccessor: expressionAccessor, - } + return resultError("program instantiation failed: "+err.Error(), apiservercel.ErrorTypeInternal) } return CompilationResult{ Program: prog, ExpressionAccessor: expressionAccessor, } } + +func mustBuildEnvs(baseEnv *environment.EnvSet) variableDeclEnvs { + requestType := BuildRequestType() + envs := make(variableDeclEnvs, 4) // since the number of variable combinations is small, pre-build a environment for each + for _, hasParams := range []bool{false, true} { + for _, hasAuthorizer := range []bool{false, true} { + var envOpts []cel.EnvOption + if hasParams { + envOpts = append(envOpts, cel.Variable(ParamsVarName, cel.DynType)) + } + if hasAuthorizer { + envOpts = append(envOpts, + cel.Variable(AuthorizerVarName, library.AuthorizerType), + cel.Variable(RequestResourceAuthorizerVarName, library.ResourceCheckType)) + } + envOpts = append(envOpts, + cel.Variable(ObjectVarName, cel.DynType), + cel.Variable(OldObjectVarName, cel.DynType), + cel.Variable(RequestVarName, requestType.CelType())) + + extended, err := baseEnv.Extend( + environment.VersionedOptions{ + // Feature epoch was actually 1.26, but we artificially set it to 1.0 because these + // options should always be present. + IntroducedVersion: version.MajorMinor(1, 0), + EnvOptions: envOpts, + DeclTypes: []*apiservercel.DeclType{ + requestType, + }, + }, + ) + if err != nil { + panic(fmt.Sprintf("environment misconfigured: %v", err)) + } + envs[OptionalVariableDeclarations{HasParams: hasParams, HasAuthorizer: hasAuthorizer}] = extended + } + } + return envs +} diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/filter.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/filter.go index 6e504897c5a4..9aa10bd6b264 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/filter.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/filter.go @@ -32,15 +32,17 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apiserver/pkg/admission" "k8s.io/apiserver/pkg/cel" + "k8s.io/apiserver/pkg/cel/environment" "k8s.io/apiserver/pkg/cel/library" ) // filterCompiler implement the interface FilterCompiler. type filterCompiler struct { + compiler Compiler } -func NewFilterCompiler() FilterCompiler { - return &filterCompiler{} +func NewFilterCompiler(env *environment.EnvSet) FilterCompiler { + return &filterCompiler{compiler: NewCompiler(env)} } type evaluationActivation struct { @@ -75,13 +77,13 @@ func (a *evaluationActivation) Parent() interpreter.Activation { } // Compile compiles the cel expressions defined in the ExpressionAccessors into a Filter -func (c *filterCompiler) Compile(expressionAccessors []ExpressionAccessor, options OptionalVariableDeclarations, perCallLimit uint64) Filter { +func (c *filterCompiler) Compile(expressionAccessors []ExpressionAccessor, options OptionalVariableDeclarations, mode environment.Type) Filter { compilationResults := make([]CompilationResult, len(expressionAccessors)) for i, expressionAccessor := range expressionAccessors { if expressionAccessor == nil { continue } - compilationResults[i] = CompileCELExpression(expressionAccessor, options, perCallLimit) + compilationResults[i] = c.compiler.CompileCELExpression(expressionAccessor, options, mode) } return NewFilter(compilationResults) } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/interface.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/interface.go index d3c4a0217d13..50782d6f0447 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/interface.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/interface.go @@ -27,6 +27,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apiserver/pkg/admission" "k8s.io/apiserver/pkg/authorization/authorizer" + "k8s.io/apiserver/pkg/cel/environment" ) type ExpressionAccessor interface { @@ -57,8 +58,7 @@ type OptionalVariableDeclarations struct { // FilterCompiler contains a function to assist with converting types and values to/from CEL-typed values. type FilterCompiler interface { // Compile is used for the cel expression compilation - // perCallLimit was added for testing purpose only. Callers should always use const PerCallLimit from k8s.io/apiserver/pkg/apis/cel/config.go as input. - Compile(expressions []ExpressionAccessor, optionalDecls OptionalVariableDeclarations, perCallLimit uint64) Filter + Compile(expressions []ExpressionAccessor, optionalDecls OptionalVariableDeclarations, envType environment.Type) Filter } // OptionalVariableBindings provides expression bindings for optional CEL variables. diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/controller.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/controller.go index f54f1acb36f5..3f6af530530b 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/controller.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/controller.go @@ -43,6 +43,7 @@ import ( "k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/matching" celconfig "k8s.io/apiserver/pkg/apis/cel" "k8s.io/apiserver/pkg/authorization/authorizer" + "k8s.io/apiserver/pkg/cel/environment" "k8s.io/apiserver/pkg/cel/openapi/resolver" "k8s.io/apiserver/pkg/warning" "k8s.io/client-go/dynamic" @@ -140,7 +141,7 @@ func NewAdmissionController( client, dynamicClient, typeChecker, - cel.NewFilterCompiler(), + cel.NewFilterCompiler(environment.MustBaseEnvSet(environment.DefaultCompatibilityVersion())), NewMatcher(matching.NewMatcher(informerFactory.Core().V1().Namespaces().Lister(), client)), generic.NewInformer[*v1alpha1.ValidatingAdmissionPolicy]( informerFactory.Admissionregistration().V1alpha1().ValidatingAdmissionPolicies().Informer()), @@ -336,12 +337,6 @@ func (c *celAdmissionController) Validate( } validationResult := bindingInfo.validator.Validate(ctx, versionedAttr, param, celconfig.RuntimeCELCostBudget) - if err != nil { - // runtime error. Apply failure policy - wrappedError := fmt.Errorf("failed to evaluate CEL expression: %w", err) - addConfigError(wrappedError, definition, binding) - continue - } for i, decision := range validationResult.Decisions { switch decision.Action { diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/controller_reconcile.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/controller_reconcile.go index 296ac416aa28..f1c910202527 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/controller_reconcile.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/controller_reconcile.go @@ -36,8 +36,8 @@ import ( "k8s.io/apiserver/pkg/admission/plugin/cel" "k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/internal/generic" "k8s.io/apiserver/pkg/admission/plugin/webhook/matchconditions" - celconfig "k8s.io/apiserver/pkg/apis/cel" "k8s.io/apiserver/pkg/authorization/authorizer" + "k8s.io/apiserver/pkg/cel/environment" "k8s.io/client-go/dynamic" "k8s.io/client-go/dynamic/dynamicinformer" "k8s.io/client-go/informers" @@ -512,13 +512,13 @@ func (c *policyController) latestPolicyData() []policyData { for i := range matchConditions { matchExpressionAccessors[i] = (*matchconditions.MatchCondition)(&matchConditions[i]) } - matcher = matchconditions.NewMatcher(c.filterCompiler.Compile(matchExpressionAccessors, optionalVars, celconfig.PerCallLimit), c.authz, failurePolicy, "validatingadmissionpolicy", definitionInfo.lastReconciledValue.Name) + matcher = matchconditions.NewMatcher(c.filterCompiler.Compile(matchExpressionAccessors, optionalVars, environment.StoredExpressions), c.authz, failurePolicy, "validatingadmissionpolicy", definitionInfo.lastReconciledValue.Name) } bindingInfo.validator = c.newValidator( - c.filterCompiler.Compile(convertv1alpha1Validations(definitionInfo.lastReconciledValue.Spec.Validations), optionalVars, celconfig.PerCallLimit), + c.filterCompiler.Compile(convertv1alpha1Validations(definitionInfo.lastReconciledValue.Spec.Validations), optionalVars, environment.StoredExpressions), matcher, - c.filterCompiler.Compile(convertv1alpha1AuditAnnotations(definitionInfo.lastReconciledValue.Spec.AuditAnnotations), optionalVars, celconfig.PerCallLimit), - c.filterCompiler.Compile(convertV1Alpha1MessageExpressions(definitionInfo.lastReconciledValue.Spec.Validations), expressionOptionalVars, celconfig.PerCallLimit), + c.filterCompiler.Compile(convertv1alpha1AuditAnnotations(definitionInfo.lastReconciledValue.Spec.AuditAnnotations), optionalVars, environment.StoredExpressions), + c.filterCompiler.Compile(convertV1Alpha1MessageExpressions(definitionInfo.lastReconciledValue.Spec.Validations), expressionOptionalVars, environment.StoredExpressions), failurePolicy, c.authz, ) diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/typechecking.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/typechecking.go index 7b128e38185c..12ba33edf799 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/typechecking.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/typechecking.go @@ -21,20 +21,20 @@ import ( "fmt" "sort" "strings" - "sync" + "time" "github.com/google/cel-go/cel" - "github.com/google/cel-go/common/types/ref" "k8s.io/api/admissionregistration/v1alpha1" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apimachinery/pkg/util/version" plugincel "k8s.io/apiserver/pkg/admission/plugin/cel" apiservercel "k8s.io/apiserver/pkg/cel" "k8s.io/apiserver/pkg/cel/common" - "k8s.io/apiserver/pkg/cel/library" + "k8s.io/apiserver/pkg/cel/environment" "k8s.io/apiserver/pkg/cel/openapi" "k8s.io/apiserver/pkg/cel/openapi/resolver" "k8s.io/klog/v2" @@ -128,7 +128,7 @@ func (c *TypeChecker) CheckExpressions(expressions []string, hasParams bool, pol for i, gvk := range gvks { s := schemas[i] issues, err := c.checkExpression(exp, hasParams, typeOverwrite{ - object: common.SchemaDeclType(s, true), + object: common.SchemaDeclType(s, true).MaybeAssignTypeName(generateUniqueTypeName(gvk.Kind)), params: paramsDeclType, }) // save even if no issues are found, for the sake of formatting. @@ -144,6 +144,10 @@ func (c *TypeChecker) CheckExpressions(expressions []string, hasParams bool, pol return allWarnings } +func generateUniqueTypeName(kind string) string { + return fmt.Sprintf("%s%d", kind, time.Now().Nanosecond()) +} + // formatWarning converts the resulting issues and possible error during // type checking into a human-readable string func (c *TypeChecker) formatWarning(results []typeCheckingResult) string { @@ -169,7 +173,7 @@ func (c *TypeChecker) declType(gvk schema.GroupVersionKind) (*apiservercel.DeclT if err != nil { return nil, err } - return common.SchemaDeclType(&openapi.Schema{Schema: s}, true), nil + return common.SchemaDeclType(&openapi.Schema{Schema: s}, true).MaybeAssignTypeName(generateUniqueTypeName(gvk.Kind)), nil } func (c *TypeChecker) paramsType(policy *v1alpha1.ValidatingAdmissionPolicy) schema.GroupVersionKind { @@ -314,122 +318,51 @@ func sortGVKList(list []schema.GroupVersionKind) []schema.GroupVersionKind { } func buildEnv(hasParams bool, types typeOverwrite) (*cel.Env, error) { - baseEnv, err := getBaseEnv() - if err != nil { - return nil, err - } - reg := apiservercel.NewRegistry(baseEnv) + baseEnv := environment.MustBaseEnvSet(environment.DefaultCompatibilityVersion()) requestType := plugincel.BuildRequestType() var varOpts []cel.EnvOption - var rts []*apiservercel.RuleTypes + var declTypes []*apiservercel.DeclType // request, hand-crafted type - rt, opts, err := createRuleTypesAndOptions(reg, requestType, plugincel.RequestVarName) - if err != nil { - return nil, err - } - rts = append(rts, rt) - varOpts = append(varOpts, opts...) + declTypes = append(declTypes, requestType) + varOpts = append(varOpts, createVariableOpts(requestType, plugincel.RequestVarName)...) // object and oldObject, same type, type(s) resolved from constraints - rt, opts, err = createRuleTypesAndOptions(reg, types.object, plugincel.ObjectVarName, plugincel.OldObjectVarName) - if err != nil { - return nil, err - } - rts = append(rts, rt) - varOpts = append(varOpts, opts...) + declTypes = append(declTypes, types.object) + varOpts = append(varOpts, createVariableOpts(types.object, plugincel.ObjectVarName, plugincel.OldObjectVarName)...) // params, defined by ParamKind - if hasParams { - rt, opts, err := createRuleTypesAndOptions(reg, types.params, plugincel.ParamsVarName) - if err != nil { - return nil, err - } - rts = append(rts, rt) - varOpts = append(varOpts, opts...) - } - - opts, err = ruleTypesOpts(rts, baseEnv.TypeProvider()) - if err != nil { - return nil, err + if hasParams && types.params != nil { + declTypes = append(declTypes, types.params) + varOpts = append(varOpts, createVariableOpts(types.params, plugincel.ParamsVarName)...) } - opts = append(opts, varOpts...) // add variables after ruleTypes. - env, err := baseEnv.Extend(opts...) + env, err := baseEnv.Extend( + environment.VersionedOptions{ + // Feature epoch was actually 1.26, but we artificially set it to 1.0 because these + // options should always be present. + IntroducedVersion: version.MajorMinor(1, 0), + EnvOptions: varOpts, + DeclTypes: declTypes, + }, + ) if err != nil { return nil, err } - return env, nil + return env.Env(environment.StoredExpressions) } -// createRuleTypeAndOptions creates the cel RuleTypes and a slice of EnvOption +// createVariableOpts creates a slice of EnvOption // that can be used for creating a CEL env containing variables of declType. // declType can be nil, in which case the variables will be of DynType. -func createRuleTypesAndOptions(registry *apiservercel.Registry, declType *apiservercel.DeclType, variables ...string) (*apiservercel.RuleTypes, []cel.EnvOption, error) { +func createVariableOpts(declType *apiservercel.DeclType, variables ...string) []cel.EnvOption { opts := make([]cel.EnvOption, 0, len(variables)) - // untyped, use DynType - if declType == nil { - for _, v := range variables { - opts = append(opts, cel.Variable(v, cel.DynType)) - } - return nil, opts, nil - } - // create a RuleType for the given type - rt, err := apiservercel.NewRuleTypes(declType.TypeName(), declType, registry) - if err != nil { - return nil, nil, err - } - if rt == nil { - return nil, nil, nil + t := cel.DynType + if declType != nil { + t = declType.CelType() } for _, v := range variables { - opts = append(opts, cel.Variable(v, declType.CelType())) - } - return rt, opts, nil -} - -func ruleTypesOpts(ruleTypes []*apiservercel.RuleTypes, underlyingTypeProvider ref.TypeProvider) ([]cel.EnvOption, error) { - var providers []ref.TypeProvider // may be unused, too small to matter - var adapters []ref.TypeAdapter - for _, rt := range ruleTypes { - if rt != nil { - withTP, err := rt.WithTypeProvider(underlyingTypeProvider) - if err != nil { - return nil, err - } - providers = append(providers, withTP) - adapters = append(adapters, withTP) - } - } - var tp ref.TypeProvider - var ta ref.TypeAdapter - switch len(providers) { - case 0: - return nil, nil - case 1: - tp = providers[0] - ta = adapters[0] - default: - tp = &apiservercel.CompositedTypeProvider{Providers: providers} - ta = &apiservercel.CompositedTypeAdapter{Adapters: adapters} + opts = append(opts, cel.Variable(v, t)) } - return []cel.EnvOption{cel.CustomTypeProvider(tp), cel.CustomTypeAdapter(ta)}, nil + return opts } - -func getBaseEnv() (*cel.Env, error) { - typeCheckingBaseEnvInit.Do(func() { - var opts []cel.EnvOption - opts = append(opts, cel.HomogeneousAggregateLiterals()) - // Validate function declarations once during base env initialization, - // so they don't need to be evaluated each time a CEL rule is compiled. - // This is a relatively expensive operation. - opts = append(opts, cel.EagerlyValidateDeclarations(true), cel.DefaultUTCTimeZone(true)) - opts = append(opts, library.ExtensionLibs...) - typeCheckingBaseEnv, typeCheckingBaseEnvError = cel.NewEnv(opts...) - }) - return typeCheckingBaseEnv, typeCheckingBaseEnvError -} - -var typeCheckingBaseEnv *cel.Env -var typeCheckingBaseEnvError error -var typeCheckingBaseEnvInit sync.Once diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/accessors.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/accessors.go index 102597cbcc04..b95af270d7a9 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/accessors.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/accessors.go @@ -26,8 +26,8 @@ import ( "k8s.io/apiserver/pkg/admission/plugin/webhook/matchconditions" "k8s.io/apiserver/pkg/admission/plugin/webhook/predicates/namespace" "k8s.io/apiserver/pkg/admission/plugin/webhook/predicates/object" - celconfig "k8s.io/apiserver/pkg/apis/cel" "k8s.io/apiserver/pkg/authorization/authorizer" + "k8s.io/apiserver/pkg/cel/environment" webhookutil "k8s.io/apiserver/pkg/util/webhook" "k8s.io/client-go/rest" ) @@ -140,7 +140,7 @@ func (m *mutatingWebhookAccessor) GetCompiledMatcher(compiler cel.FilterCompiler HasParams: false, HasAuthorizer: true, }, - celconfig.PerCallLimit, + environment.StoredExpressions, ), authorizer, m.FailurePolicy, "validating", m.Name) }) return m.compiledMatcher @@ -268,7 +268,7 @@ func (v *validatingWebhookAccessor) GetCompiledMatcher(compiler cel.FilterCompil HasParams: false, HasAuthorizer: true, }, - celconfig.PerCallLimit, + environment.StoredExpressions, ), authorizer, v.FailurePolicy, "validating", v.Name) }) return v.compiledMatcher diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/generic/webhook.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/generic/webhook.go index a5828983112b..ad5ec18e24f3 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/generic/webhook.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/generic/webhook.go @@ -35,6 +35,7 @@ import ( "k8s.io/apiserver/pkg/admission/plugin/webhook/predicates/object" "k8s.io/apiserver/pkg/admission/plugin/webhook/predicates/rules" "k8s.io/apiserver/pkg/authorization/authorizer" + "k8s.io/apiserver/pkg/cel/environment" webhookutil "k8s.io/apiserver/pkg/util/webhook" "k8s.io/client-go/informers" clientset "k8s.io/client-go/kubernetes" @@ -97,7 +98,7 @@ func NewWebhook(handler *admission.Handler, configFile io.Reader, sourceFactory namespaceMatcher: &namespace.Matcher{}, objectMatcher: &object.Matcher{}, dispatcher: dispatcherFactory(&cm), - filterCompiler: cel.NewFilterCompiler(), + filterCompiler: cel.NewFilterCompiler(environment.MustBaseEnvSet(environment.DefaultCompatibilityVersion())), }, nil } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/cel/common/values.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/cel/common/values.go index e6d7b99757e6..d9034a80fb2a 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/cel/common/values.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/cel/common/values.go @@ -26,9 +26,10 @@ import ( "github.com/google/cel-go/common/types/ref" "github.com/google/cel-go/common/types/traits" + "k8s.io/kube-openapi/pkg/validation/strfmt" + "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apiserver/pkg/cel" - "k8s.io/kube-openapi/pkg/validation/strfmt" ) // UnstructuredToVal converts a Kubernetes unstructured data element to a CEL Val. @@ -425,7 +426,22 @@ var _ = traits.Lister(&unstructuredList{}) func (t *unstructuredList) ConvertToNative(typeDesc reflect.Type) (interface{}, error) { switch typeDesc.Kind() { case reflect.Slice: - return t.elements, nil + switch t.itemsSchema.Type() { + // Workaround for https://github.com/kubernetes/kubernetes/issues/117590 until we + // resolve the desired behavior in cel-go via https://github.com/google/cel-go/issues/688 + case "string": + var result []string + for _, e := range t.elements { + s, ok := e.(string) + if !ok { + return nil, fmt.Errorf("expected all elements to be of type string, but got %T", e) + } + result = append(result, s) + } + return result, nil + default: + return t.elements, nil + } } return nil, fmt.Errorf("type conversion error from '%s' to '%s'", t.Type(), typeDesc) } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/cel/composited.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/cel/composited.go deleted file mode 100644 index 9e5e634d0c34..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/cel/composited.go +++ /dev/null @@ -1,119 +0,0 @@ -/* -Copyright 2023 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cel - -import ( - "github.com/google/cel-go/common/types/ref" - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" -) - -var _ ref.TypeProvider = (*CompositedTypeProvider)(nil) -var _ ref.TypeAdapter = (*CompositedTypeAdapter)(nil) - -// CompositedTypeProvider is the provider that tries each of the underlying -// providers in order, and returns result of the first successful attempt. -type CompositedTypeProvider struct { - // Providers contains the underlying type providers. - // If Providers is empty, the CompositedTypeProvider becomes no-op provider. - Providers []ref.TypeProvider -} - -// EnumValue finds out the numeric value of the given enum name. -// The result comes from first provider that returns non-nil. -func (c *CompositedTypeProvider) EnumValue(enumName string) ref.Val { - for _, p := range c.Providers { - val := p.EnumValue(enumName) - if val != nil { - return val - } - } - return nil -} - -// FindIdent takes a qualified identifier name and returns a Value if one -// exists. The result comes from first provider that returns non-nil. -func (c *CompositedTypeProvider) FindIdent(identName string) (ref.Val, bool) { - for _, p := range c.Providers { - val, ok := p.FindIdent(identName) - if ok { - return val, ok - } - } - return nil, false -} - -// FindType finds the Type given a qualified type name, or return false -// if none of the providers finds the type. -// If any of the providers find the type, the first provider that returns true -// will be the result. -func (c *CompositedTypeProvider) FindType(typeName string) (*exprpb.Type, bool) { - for _, p := range c.Providers { - typ, ok := p.FindType(typeName) - if ok { - return typ, ok - } - } - return nil, false -} - -// FindFieldType returns the field type for a checked type value. Returns -// false if none of the providers can find the type. -// If multiple providers can find the field, the result is taken from -// the first that does. -func (c *CompositedTypeProvider) FindFieldType(messageType string, fieldName string) (*ref.FieldType, bool) { - for _, p := range c.Providers { - ft, ok := p.FindFieldType(messageType, fieldName) - if ok { - return ft, ok - } - } - return nil, false -} - -// NewValue creates a new type value from a qualified name and map of field -// name to value. -// If multiple providers can create the new type, the first that returns -// non-nil will decide the result. -func (c *CompositedTypeProvider) NewValue(typeName string, fields map[string]ref.Val) ref.Val { - for _, p := range c.Providers { - v := p.NewValue(typeName, fields) - if v != nil { - return v - } - } - return nil -} - -// CompositedTypeAdapter is the adapter that tries each of the underlying -// type adapter in order until the first successfully conversion. -type CompositedTypeAdapter struct { - // Adapters contains underlying type adapters. - // If Adapters is empty, the CompositedTypeAdapter becomes a no-op adapter. - Adapters []ref.TypeAdapter -} - -// NativeToValue takes the value and convert it into a ref.Val -// The result comes from the first TypeAdapter that returns non-nil. -func (c *CompositedTypeAdapter) NativeToValue(value interface{}) ref.Val { - for _, a := range c.Adapters { - v := a.NativeToValue(value) - if v != nil { - return v - } - } - return nil -} diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/cel/environment/base.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/cel/environment/base.go new file mode 100644 index 000000000000..641e5857be0a --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/cel/environment/base.go @@ -0,0 +1,118 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package environment + +import ( + "fmt" + "strconv" + "sync" + + "github.com/google/cel-go/cel" + "github.com/google/cel-go/ext" + "golang.org/x/sync/singleflight" + + "k8s.io/apimachinery/pkg/util/version" + celconfig "k8s.io/apiserver/pkg/apis/cel" + "k8s.io/apiserver/pkg/cel/library" +) + +// DefaultCompatibilityVersion returns a default compatibility version for use with EnvSet +// that guarantees compatibility with CEL features/libraries/parameters understood by +// an n-1 version +// +// This default will be set to no more than n-1 the current Kubernetes major.minor version. +// +// Note that a default version number less than n-1 indicates a wider range of version +// compatibility than strictly required for rollback. A wide range of compatibility is +// desirable because it means that CEL expressions are portable across a wider range +// of Kubernetes versions. +func DefaultCompatibilityVersion() *version.Version { + return version.MajorMinor(1, 27) +} + +var baseOpts = []VersionedOptions{ + { + // CEL epoch was actually 1.23, but we artificially set it to 1.0 because these + // options should always be present. + IntroducedVersion: version.MajorMinor(1, 0), + EnvOptions: []cel.EnvOption{ + cel.HomogeneousAggregateLiterals(), + // Validate function declarations once during base env initialization, + // so they don't need to be evaluated each time a CEL rule is compiled. + // This is a relatively expensive operation. + cel.EagerlyValidateDeclarations(true), + cel.DefaultUTCTimeZone(true), + + ext.Strings(ext.StringsVersion(0)), + library.URLs(), + library.Regex(), + library.Lists(), + }, + ProgramOptions: []cel.ProgramOption{ + cel.EvalOptions(cel.OptOptimize, cel.OptTrackCost), + cel.CostLimit(celconfig.PerCallLimit), + }, + }, + { + IntroducedVersion: version.MajorMinor(1, 27), + EnvOptions: []cel.EnvOption{ + library.Authz(), + }, + }, + { + IntroducedVersion: version.MajorMinor(1, 28), + EnvOptions: []cel.EnvOption{ + cel.CrossTypeNumericComparisons(true), + cel.OptionalTypes(), + }, + }, + // TODO: switch to ext.Strings version 2 once format() is fixed to work with HomogeneousAggregateLiterals. +} + +// MustBaseEnvSet returns the common CEL base environments for Kubernetes for Version, or panics +// if the version is nil, or does not have major and minor components. +// +// The returned environment contains function libraries, language settings, optimizations and +// runtime cost limits appropriate CEL as it is used in Kubernetes. +// +// The returned environment contains no CEL variable definitions or custom type declarations and +// should be extended to construct environments with the appropriate variable definitions, +// type declarations and any other needed configuration. +func MustBaseEnvSet(ver *version.Version) *EnvSet { + if ver == nil { + panic("version must be non-nil") + } + if len(ver.Components()) < 2 { + panic(fmt.Sprintf("version must contain an major and minor component, but got: %s", ver.String())) + } + key := strconv.FormatUint(uint64(ver.Major()), 10) + "." + strconv.FormatUint(uint64(ver.Minor()), 10) + if entry, ok := baseEnvs.Load(key); ok { + return entry.(*EnvSet) + } + + entry, _, _ := baseEnvsSingleflight.Do(key, func() (interface{}, error) { + entry := mustNewEnvSet(ver, baseOpts) + baseEnvs.Store(key, entry) + return entry, nil + }) + return entry.(*EnvSet) +} + +var ( + baseEnvs = sync.Map{} + baseEnvsSingleflight = &singleflight.Group{} +) diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/cel/environment/environment.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/cel/environment/environment.go new file mode 100644 index 000000000000..b47bc8e984be --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/cel/environment/environment.go @@ -0,0 +1,274 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package environment + +import ( + "fmt" + "math" + + "github.com/google/cel-go/cel" + + "k8s.io/apimachinery/pkg/util/version" + apiservercel "k8s.io/apiserver/pkg/cel" +) + +// Type defines the different types of CEL environments used in Kubernetes. +// CEL environments are used to compile and evaluate CEL expressions. +// Environments include: +// - Function libraries +// - Variables +// - Types (both core CEL types and Kubernetes types) +// - Other CEL environment and program options +type Type string + +const ( + // NewExpressions is used to validate new or modified expressions in + // requests that write expressions to API resources. + // + // This environment type is compatible with a specific Kubernetes + // major/minor version. To ensure safe rollback, this environment type + // may not include all the function libraries, variables, type declarations, and CEL + // language settings available in the StoredExpressions environment type. + // + // NewExpressions must be used to validate (parse, compile, type check) + // all new or modified CEL expressions before they are written to storage. + NewExpressions Type = "NewExpressions" + + // StoredExpressions is used to compile and run CEL expressions that have been + // persisted to storage. + // + // This environment type is compatible with CEL expressions that have been + // persisted to storage by all known versions of Kubernetes. This is the most + // permissive environment available. + // + // StoredExpressions is appropriate for use with CEL expressions in + // configuration files. + StoredExpressions Type = "StoredExpressions" +) + +// EnvSet manages the creation and extension of CEL environments. Each EnvSet contains +// both an NewExpressions and StoredExpressions environment. EnvSets are created +// and extended using VersionedOptions so that the EnvSet can prepare environments according +// to what options were introduced at which versions. +// +// Each EnvSet is given a compatibility version when it is created, and prepares the +// NewExpressions environment to be compatible with that version. The EnvSet also +// prepares StoredExpressions to be compatible with all known versions of Kubernetes. +type EnvSet struct { + // compatibilityVersion is the version that all configuration in + // the NewExpressions environment is compatible with. + compatibilityVersion *version.Version + + // newExpressions is an environment containing only configuration + // in this EnvSet that is enabled at this compatibilityVersion. + newExpressions *cel.Env + + // storedExpressions is an environment containing the latest configuration + // in this EnvSet. + storedExpressions *cel.Env +} + +func newEnvSet(compatibilityVersion *version.Version, opts []VersionedOptions) (*EnvSet, error) { + base, err := cel.NewEnv() + if err != nil { + return nil, err + } + baseSet := EnvSet{compatibilityVersion: compatibilityVersion, newExpressions: base, storedExpressions: base} + return baseSet.Extend(opts...) +} + +func mustNewEnvSet(ver *version.Version, opts []VersionedOptions) *EnvSet { + envSet, err := newEnvSet(ver, opts) + if err != nil { + panic(fmt.Sprintf("Default environment misconfigured: %v", err)) + } + return envSet +} + +// NewExpressionsEnv returns the NewExpressions environment Type for this EnvSet. +// See NewExpressions for details. +func (e *EnvSet) NewExpressionsEnv() *cel.Env { + return e.newExpressions +} + +// StoredExpressionsEnv returns the StoredExpressions environment Type for this EnvSet. +// See StoredExpressions for details. +func (e *EnvSet) StoredExpressionsEnv() *cel.Env { + return e.storedExpressions +} + +// Env returns the CEL environment for the given Type. +func (e *EnvSet) Env(envType Type) (*cel.Env, error) { + switch envType { + case NewExpressions: + return e.newExpressions, nil + case StoredExpressions: + return e.storedExpressions, nil + default: + return nil, fmt.Errorf("unsupported environment type: %v", envType) + } +} + +// VersionedOptions provides a set of CEL configuration options as well as the version the +// options were introduced and, optionally, the version the options were removed. +type VersionedOptions struct { + // IntroducedVersion is the version at which these options were introduced. + // The NewExpressions environment will only include options introduced at or before the + // compatibility version of the EnvSet. + // + // For example, to configure a CEL environment with an "object" variable bound to a + // resource kind, first create a DeclType from the groupVersionKind of the resource and then + // populate a VersionedOptions with the variable and the type: + // + // schema := schemaResolver.ResolveSchema(groupVersionKind) + // objectType := apiservercel.SchemaDeclType(schema, true) + // ... + // VersionOptions{ + // IntroducedVersion: version.MajorMinor(1, 26), + // DeclTypes: []*apiservercel.DeclType{ objectType }, + // EnvOptions: []cel.EnvOption{ cel.Variable("object", objectType.CelType()) }, + // }, + // + // To create an DeclType from a CRD, use a structural schema. For example: + // + // schema := structuralschema.NewStructural(crdJSONProps) + // objectType := apiservercel.SchemaDeclType(schema, true) + // + // Required. + IntroducedVersion *version.Version + // RemovedVersion is the version at which these options were removed. + // The NewExpressions environment will not include options removed at or before the + // compatibility version of the EnvSet. + // + // All option removals must be backward compatible; the removal must either be paired + // with a compatible replacement introduced at the same version, or the removal must be non-breaking. + // The StoredExpressions environment will not include removed options. + // + // A function library may be upgraded by setting the RemovedVersion of the old library + // to the same value as the IntroducedVersion of the new library. The new library must + // be backward compatible with the old library. + // + // For example: + // + // VersionOptions{ + // IntroducedVersion: version.MajorMinor(1, 26), RemovedVersion: version.MajorMinor(1, 27), + // EnvOptions: []cel.EnvOption{ libraries.Example(libraries.ExampleVersion(1)) }, + // }, + // VersionOptions{ + // IntroducedVersion: version.MajorMinor(1, 27), + // EnvOptions: []EnvOptions{ libraries.Example(libraries.ExampleVersion(2)) }, + // }, + // + // Optional. + RemovedVersion *version.Version + + // EnvOptions provides CEL EnvOptions. This may be used to add a cel.Variable, a + // cel.Library, or to enable other CEL EnvOptions such as language settings. + // + // If an added cel.Variable has an OpenAPI type, the type must be included in DeclTypes. + EnvOptions []cel.EnvOption + // ProgramOptions provides CEL ProgramOptions. This may be used to set a cel.CostLimit, + // enable optimizations, and set other program level options that should be enabled + // for all programs using this environment. + ProgramOptions []cel.ProgramOption + // DeclTypes provides OpenAPI type declarations to register with the environment. + // + // If cel.Variables added to EnvOptions refer to a OpenAPI type, the type must be included in + // DeclTypes. + DeclTypes []*apiservercel.DeclType +} + +// Extend returns an EnvSet based on this EnvSet but extended with given VersionedOptions. +// This EnvSet is not mutated. +// The returned EnvSet has the same compatibility version as the EnvSet that was extended. +// +// Extend is an expensive operation and each call to Extend that adds DeclTypes increases +// the depth of a chain of resolvers. For these reasons, calls to Extend should be kept +// to a minimum. +// +// Some best practices: +// +// - Minimize calls Extend when handling API requests. Where possible, call Extend +// when initializing components. +// - If an EnvSets returned by Extend can be used to compile multiple CEL programs, +// call Extend once and reuse the returned EnvSets. +// - Prefer a single call to Extend with a full list of VersionedOptions over +// making multiple calls to Extend. +func (e *EnvSet) Extend(options ...VersionedOptions) (*EnvSet, error) { + if len(options) > 0 { + newExprOpts, err := e.filterAndBuildOpts(e.newExpressions, e.compatibilityVersion, options) + if err != nil { + return nil, err + } + p, err := e.newExpressions.Extend(newExprOpts) + if err != nil { + return nil, err + } + storedExprOpt, err := e.filterAndBuildOpts(e.storedExpressions, version.MajorMinor(math.MaxUint, math.MaxUint), options) + if err != nil { + return nil, err + } + s, err := e.storedExpressions.Extend(storedExprOpt) + if err != nil { + return nil, err + } + return &EnvSet{compatibilityVersion: e.compatibilityVersion, newExpressions: p, storedExpressions: s}, nil + } + return e, nil +} + +func (e *EnvSet) filterAndBuildOpts(base *cel.Env, compatVer *version.Version, opts []VersionedOptions) (cel.EnvOption, error) { + var envOpts []cel.EnvOption + var progOpts []cel.ProgramOption + var declTypes []*apiservercel.DeclType + + for _, opt := range opts { + if compatVer.AtLeast(opt.IntroducedVersion) && (opt.RemovedVersion == nil || compatVer.LessThan(opt.RemovedVersion)) { + envOpts = append(envOpts, opt.EnvOptions...) + progOpts = append(progOpts, opt.ProgramOptions...) + declTypes = append(declTypes, opt.DeclTypes...) + } + } + + if len(declTypes) > 0 { + provider := apiservercel.NewDeclTypeProvider(declTypes...) + providerOpts, err := provider.EnvOptions(base.TypeProvider()) + if err != nil { + return nil, err + } + envOpts = append(envOpts, providerOpts...) + } + + combined := cel.Lib(&envLoader{ + envOpts: envOpts, + progOpts: progOpts, + }) + return combined, nil +} + +type envLoader struct { + envOpts []cel.EnvOption + progOpts []cel.ProgramOption +} + +func (e *envLoader) CompileOptions() []cel.EnvOption { + return e.envOpts +} + +func (e *envLoader) ProgramOptions() []cel.ProgramOption { + return e.progOpts +} diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/cel/library/libraries.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/cel/library/libraries.go deleted file mode 100644 index e2e8fc29bd16..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/cel/library/libraries.go +++ /dev/null @@ -1,35 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package library - -import ( - "github.com/google/cel-go/cel" - "github.com/google/cel-go/ext" - "github.com/google/cel-go/interpreter" -) - -// ExtensionLibs declares the set of CEL extension libraries available everywhere CEL is used in Kubernetes. -var ExtensionLibs = append(k8sExtensionLibs, ext.Strings()) - -var k8sExtensionLibs = []cel.EnvOption{ - URLs(), - Regex(), - Lists(), - Authz(), -} - -var ExtensionLibRegexOptimizations = []*interpreter.RegexOptimization{FindRegexOptimization, FindAllRegexOptimization} diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/cel/library/regex.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/cel/library/regex.go index 6db5ef195756..17fb3d44c970 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/cel/library/regex.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/cel/library/regex.go @@ -77,7 +77,9 @@ func (*regex) CompileOptions() []cel.EnvOption { } func (*regex) ProgramOptions() []cel.ProgramOption { - return []cel.ProgramOption{} + return []cel.ProgramOption{ + cel.OptimizeRegex(FindRegexOptimization, FindAllRegexOptimization), + } } func find(strVal ref.Val, regexVal ref.Val) ref.Val { diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/cel/library/test.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/cel/library/test.go new file mode 100644 index 000000000000..95446f63c6bd --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/cel/library/test.go @@ -0,0 +1,79 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package library + +import ( + "math" + + "github.com/google/cel-go/cel" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" +) + +// Test provides a test() function that returns true. +func Test(options ...TestOption) cel.EnvOption { + t := &testLib{version: math.MaxUint32} + for _, o := range options { + t = o(t) + } + return cel.Lib(t) +} + +type testLib struct { + version uint32 +} + +type TestOption func(*testLib) *testLib + +func TestVersion(version uint32) func(lib *testLib) *testLib { + return func(sl *testLib) *testLib { + sl.version = version + return sl + } +} + +func (t *testLib) CompileOptions() []cel.EnvOption { + var options []cel.EnvOption + + if t.version == 0 { + options = append(options, cel.Function("test", + cel.Overload("test", []*cel.Type{}, cel.BoolType, + cel.FunctionBinding(func(args ...ref.Val) ref.Val { + return types.True + })))) + } + + if t.version >= 1 { + options = append(options, cel.Function("test", + cel.Overload("test", []*cel.Type{}, cel.BoolType, + cel.FunctionBinding(func(args ...ref.Val) ref.Val { + // Return false here so tests can observe which version of the function is registered + // Actual function libraries must not break backward compatibility + return types.False + })))) + options = append(options, cel.Function("testV1", + cel.Overload("testV1", []*cel.Type{}, cel.BoolType, + cel.FunctionBinding(func(args ...ref.Val) ref.Val { + return types.True + })))) + } + return options +} + +func (*testLib) ProgramOptions() []cel.ProgramOption { + return []cel.ProgramOption{} +} diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/cel/registry.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/cel/registry.go deleted file mode 100644 index 1aee3a127d6c..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/cel/registry.go +++ /dev/null @@ -1,79 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cel - -import ( - "sync" - - "github.com/google/cel-go/cel" -) - -// Resolver declares methods to find policy templates and related configuration objects. -type Resolver interface { - // FindType returns a DeclType instance corresponding to the given fully-qualified name, if - // present. - FindType(name string) (*DeclType, bool) -} - -// NewRegistry create a registry for keeping track of environments and types -// from a base cel.Env expression environment. -func NewRegistry(stdExprEnv *cel.Env) *Registry { - return &Registry{ - exprEnvs: map[string]*cel.Env{"": stdExprEnv}, - types: map[string]*DeclType{ - BoolType.TypeName(): BoolType, - BytesType.TypeName(): BytesType, - DoubleType.TypeName(): DoubleType, - DurationType.TypeName(): DurationType, - IntType.TypeName(): IntType, - NullType.TypeName(): NullType, - StringType.TypeName(): StringType, - TimestampType.TypeName(): TimestampType, - UintType.TypeName(): UintType, - ListType.TypeName(): ListType, - MapType.TypeName(): MapType, - }, - } -} - -// Registry defines a repository of environment, schema, template, and type definitions. -// -// Registry instances are concurrency-safe. -type Registry struct { - rwMux sync.RWMutex - exprEnvs map[string]*cel.Env - types map[string]*DeclType -} - -// FindType implements the Resolver interface method. -func (r *Registry) FindType(name string) (*DeclType, bool) { - r.rwMux.RLock() - defer r.rwMux.RUnlock() - typ, found := r.types[name] - if found { - return typ, true - } - return typ, found -} - -// SetType registers a DeclType descriptor by its fully qualified name. -func (r *Registry) SetType(name string, declType *DeclType) error { - r.rwMux.Lock() - defer r.rwMux.Unlock() - r.types[name] = declType - return nil -} diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/cel/types.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/cel/types.go index b2cc92d59eb1..bd14e1697445 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/cel/types.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/cel/types.go @@ -319,44 +319,53 @@ func (f *DeclField) EnumValues() []ref.Val { return ev } -// NewRuleTypes returns an Open API Schema-based type-system which is CEL compatible. -func NewRuleTypes(kind string, - declType *DeclType, - res Resolver) (*RuleTypes, error) { +func allTypesForDecl(declTypes []*DeclType) map[string]*DeclType { + if declTypes == nil { + return nil + } + allTypes := map[string]*DeclType{} + for _, declType := range declTypes { + for k, t := range FieldTypeMap(declType.TypeName(), declType) { + allTypes[k] = t + } + } + + return allTypes +} + +// NewDeclTypeProvider returns an Open API Schema-based type-system which is CEL compatible. +func NewDeclTypeProvider(rootTypes ...*DeclType) *DeclTypeProvider { // Note, if the schema indicates that it's actually based on another proto // then prefer the proto definition. For expressions in the proto, a new field // annotation will be needed to indicate the expected environment and type of // the expression. - schemaTypes, err := newSchemaTypeProvider(kind, declType) - if err != nil { - return nil, err + allTypes := allTypesForDecl(rootTypes) + return &DeclTypeProvider{ + registeredTypes: allTypes, } - if schemaTypes == nil { - return nil, nil - } - return &RuleTypes{ - ruleSchemaDeclTypes: schemaTypes, - resolver: res, - }, nil } -// RuleTypes extends the CEL ref.TypeProvider interface and provides an Open API Schema-based +// DeclTypeProvider extends the CEL ref.TypeProvider interface and provides an Open API Schema-based // type-system. -type RuleTypes struct { - ref.TypeProvider - ruleSchemaDeclTypes *schemaTypeProvider - typeAdapter ref.TypeAdapter - resolver Resolver +type DeclTypeProvider struct { + registeredTypes map[string]*DeclType + typeProvider ref.TypeProvider + typeAdapter ref.TypeAdapter +} + +func (rt *DeclTypeProvider) EnumValue(enumName string) ref.Val { + return rt.typeProvider.EnumValue(enumName) +} + +func (rt *DeclTypeProvider) FindIdent(identName string) (ref.Val, bool) { + return rt.typeProvider.FindIdent(identName) } // EnvOptions returns a set of cel.EnvOption values which includes the declaration set // as well as a custom ref.TypeProvider. // -// Note, the standard declaration set includes 'rule' which is defined as the top-level rule-schema -// type if one is configured. -// -// If the RuleTypes value is nil, an empty []cel.EnvOption set is returned. -func (rt *RuleTypes) EnvOptions(tp ref.TypeProvider) ([]cel.EnvOption, error) { +// If the DeclTypeProvider value is nil, an empty []cel.EnvOption set is returned. +func (rt *DeclTypeProvider) EnvOptions(tp ref.TypeProvider) ([]cel.EnvOption, error) { if rt == nil { return []cel.EnvOption{}, nil } @@ -367,13 +376,12 @@ func (rt *RuleTypes) EnvOptions(tp ref.TypeProvider) ([]cel.EnvOption, error) { return []cel.EnvOption{ cel.CustomTypeProvider(rtWithTypes), cel.CustomTypeAdapter(rtWithTypes), - cel.Variable("rule", rt.ruleSchemaDeclTypes.root.CelType()), }, nil } -// WithTypeProvider returns a new RuleTypes that sets the given TypeProvider -// If the original RuleTypes is nil, the returned RuleTypes is still nil. -func (rt *RuleTypes) WithTypeProvider(tp ref.TypeProvider) (*RuleTypes, error) { +// WithTypeProvider returns a new DeclTypeProvider that sets the given TypeProvider +// If the original DeclTypeProvider is nil, the returned DeclTypeProvider is still nil. +func (rt *DeclTypeProvider) WithTypeProvider(tp ref.TypeProvider) (*DeclTypeProvider, error) { if rt == nil { return nil, nil } @@ -382,13 +390,12 @@ func (rt *RuleTypes) WithTypeProvider(tp ref.TypeProvider) (*RuleTypes, error) { if ok { ta = tpa } - rtWithTypes := &RuleTypes{ - TypeProvider: tp, - typeAdapter: ta, - ruleSchemaDeclTypes: rt.ruleSchemaDeclTypes, - resolver: rt.resolver, + rtWithTypes := &DeclTypeProvider{ + typeProvider: tp, + typeAdapter: ta, + registeredTypes: rt.registeredTypes, } - for name, declType := range rt.ruleSchemaDeclTypes.types { + for name, declType := range rt.registeredTypes { tpType, found := tp.FindType(name) expT, err := declType.ExprType() if err != nil { @@ -396,7 +403,7 @@ func (rt *RuleTypes) WithTypeProvider(tp ref.TypeProvider) (*RuleTypes, error) { } if found && !proto.Equal(tpType, expT) { return nil, fmt.Errorf( - "type %s definition differs between CEL environment and rule", name) + "type %s definition differs between CEL environment and type provider", name) } } return rtWithTypes, nil @@ -409,7 +416,7 @@ func (rt *RuleTypes) WithTypeProvider(tp ref.TypeProvider) (*RuleTypes, error) { // // Note, when the type name is based on the Open API Schema, the name will reflect the object path // where the type definition appears. -func (rt *RuleTypes) FindType(typeName string) (*exprpb.Type, bool) { +func (rt *DeclTypeProvider) FindType(typeName string) (*exprpb.Type, bool) { if rt == nil { return nil, false } @@ -421,11 +428,11 @@ func (rt *RuleTypes) FindType(typeName string) (*exprpb.Type, bool) { } return expT, found } - return rt.TypeProvider.FindType(typeName) + return rt.typeProvider.FindType(typeName) } // FindDeclType returns the CPT type description which can be mapped to a CEL type. -func (rt *RuleTypes) FindDeclType(typeName string) (*DeclType, bool) { +func (rt *DeclTypeProvider) FindDeclType(typeName string) (*DeclType, bool) { if rt == nil { return nil, false } @@ -438,10 +445,10 @@ func (rt *RuleTypes) FindDeclType(typeName string) (*DeclType, bool) { // If, in the future an object instance rather than a type name were provided, the field // resolution might more accurately reflect the expected type model. However, in this case // concessions were made to align with the existing CEL interfaces. -func (rt *RuleTypes) FindFieldType(typeName, fieldName string) (*ref.FieldType, bool) { +func (rt *DeclTypeProvider) FindFieldType(typeName, fieldName string) (*ref.FieldType, bool) { st, found := rt.findDeclType(typeName) if !found { - return rt.TypeProvider.FindFieldType(typeName, fieldName) + return rt.typeProvider.FindFieldType(typeName, fieldName) } f, found := st.Fields[fieldName] @@ -471,48 +478,63 @@ func (rt *RuleTypes) FindFieldType(typeName, fieldName string) (*ref.FieldType, // NativeToValue is an implementation of the ref.TypeAdapater interface which supports conversion // of rule values to CEL ref.Val instances. -func (rt *RuleTypes) NativeToValue(val interface{}) ref.Val { +func (rt *DeclTypeProvider) NativeToValue(val interface{}) ref.Val { return rt.typeAdapter.NativeToValue(val) } -// TypeNames returns the list of type names declared within the RuleTypes object. -func (rt *RuleTypes) TypeNames() []string { - typeNames := make([]string, len(rt.ruleSchemaDeclTypes.types)) +func (rt *DeclTypeProvider) NewValue(typeName string, fields map[string]ref.Val) ref.Val { + // TODO: implement for OpenAPI types to enable CEL object instantiation, which is needed + // for mutating admission. + return rt.typeProvider.NewValue(typeName, fields) +} + +// TypeNames returns the list of type names declared within the DeclTypeProvider object. +func (rt *DeclTypeProvider) TypeNames() []string { + typeNames := make([]string, len(rt.registeredTypes)) i := 0 - for name := range rt.ruleSchemaDeclTypes.types { + for name := range rt.registeredTypes { typeNames[i] = name i++ } return typeNames } -func (rt *RuleTypes) findDeclType(typeName string) (*DeclType, bool) { - declType, found := rt.ruleSchemaDeclTypes.types[typeName] +func (rt *DeclTypeProvider) findDeclType(typeName string) (*DeclType, bool) { + declType, found := rt.registeredTypes[typeName] if found { return declType, true } - declType, found = rt.resolver.FindType(typeName) - if found { - return declType, true + declType = findScalar(typeName) + return declType, declType != nil +} + +func findScalar(typename string) *DeclType { + switch typename { + case BoolType.TypeName(): + return BoolType + case BytesType.TypeName(): + return BytesType + case DoubleType.TypeName(): + return DoubleType + case DurationType.TypeName(): + return DurationType + case IntType.TypeName(): + return IntType + case NullType.TypeName(): + return NullType + case StringType.TypeName(): + return StringType + case TimestampType.TypeName(): + return TimestampType + case UintType.TypeName(): + return UintType + case ListType.TypeName(): + return ListType + case MapType.TypeName(): + return MapType + default: + return nil } - return nil, false -} - -func newSchemaTypeProvider(kind string, declType *DeclType) (*schemaTypeProvider, error) { - if declType == nil { - return nil, nil - } - root := declType.MaybeAssignTypeName(kind) - types := FieldTypeMap(kind, root) - return &schemaTypeProvider{ - root: root, - types: types, - }, nil -} - -type schemaTypeProvider struct { - root *DeclType - types map[string]*DeclType } var ( diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/filters/authentication.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/filters/authentication.go index d6741bf3a3aa..277bdcdfe5f6 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/filters/authentication.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/filters/authentication.go @@ -34,17 +34,17 @@ import ( "k8s.io/klog/v2" ) -type recordMetrics func(context.Context, *authenticator.Response, bool, error, authenticator.Audiences, time.Time, time.Time) +type authenticationRecordMetricsFunc func(context.Context, *authenticator.Response, bool, error, authenticator.Audiences, time.Time, time.Time) // WithAuthentication creates an http handler that tries to authenticate the given request as a user, and then // stores any such user found onto the provided context for the request. If authentication fails or returns an error // the failed handler is used. On success, "Authorization" header is removed from the request and handler // is invoked to serve the request. func WithAuthentication(handler http.Handler, auth authenticator.Request, failed http.Handler, apiAuds authenticator.Audiences, requestHeaderConfig *authenticatorfactory.RequestHeaderConfig) http.Handler { - return withAuthentication(handler, auth, failed, apiAuds, requestHeaderConfig, recordAuthMetrics) + return withAuthentication(handler, auth, failed, apiAuds, requestHeaderConfig, recordAuthenticationMetrics) } -func withAuthentication(handler http.Handler, auth authenticator.Request, failed http.Handler, apiAuds authenticator.Audiences, requestHeaderConfig *authenticatorfactory.RequestHeaderConfig, metrics recordMetrics) http.Handler { +func withAuthentication(handler http.Handler, auth authenticator.Request, failed http.Handler, apiAuds authenticator.Audiences, requestHeaderConfig *authenticatorfactory.RequestHeaderConfig, metrics authenticationRecordMetricsFunc) http.Handler { if auth == nil { klog.Warning("Authentication is disabled") return handler diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/filters/authorization.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/filters/authorization.go index f7648d41cedd..e102a1e32818 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/filters/authorization.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/filters/authorization.go @@ -20,6 +20,7 @@ import ( "context" "errors" "net/http" + "time" "k8s.io/klog/v2" @@ -41,14 +42,21 @@ const ( reasonError = "internal error" ) -// WithAuthorizationCheck passes all authorized requests on to handler, and returns a forbidden error otherwise. -func WithAuthorization(handler http.Handler, a authorizer.Authorizer, s runtime.NegotiatedSerializer) http.Handler { +type recordAuthorizationMetricsFunc func(ctx context.Context, authorized authorizer.Decision, err error, authStart time.Time, authFinish time.Time) + +// WithAuthorization passes all authorized requests on to handler, and returns a forbidden error otherwise. +func WithAuthorization(hhandler http.Handler, auth authorizer.Authorizer, s runtime.NegotiatedSerializer) http.Handler { + return withAuthorization(hhandler, auth, s, recordAuthorizationMetrics) +} + +func withAuthorization(handler http.Handler, a authorizer.Authorizer, s runtime.NegotiatedSerializer, metrics recordAuthorizationMetricsFunc) http.Handler { if a == nil { klog.Warning("Authorization is disabled") return handler } return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { ctx := req.Context() + authorizationStart := time.Now() attributes, err := GetAuthorizerAttributes(ctx) if err != nil { @@ -56,6 +64,12 @@ func WithAuthorization(handler http.Handler, a authorizer.Authorizer, s runtime. return } authorized, reason, err := a.Authorize(ctx, attributes) + + authorizationFinish := time.Now() + defer func() { + metrics(ctx, authorized, err, authorizationStart, authorizationFinish) + }() + // an authorizer like RBAC could encounter evaluation errors and still allow the request, so authorizer decision is checked before error here. if authorized == authorizer.DecisionAllow { audit.AddAuditAnnotations(ctx, diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/filters/metrics.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/filters/metrics.go index 47e1be847c7a..a4dae3d84ed9 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/filters/metrics.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/filters/metrics.go @@ -21,6 +21,8 @@ import ( "strings" "time" + "k8s.io/apiserver/pkg/authorization/authorizer" + "k8s.io/apiserver/pkg/authentication/authenticator" "k8s.io/component-base/metrics" "k8s.io/component-base/metrics/legacyregistry" @@ -38,6 +40,10 @@ const ( successLabel = "success" failureLabel = "failure" errorLabel = "error" + + allowedLabel = "allowed" + deniedLabel = "denied" + noOpinionLabel = "no-opinion" ) var ( @@ -68,15 +74,54 @@ var ( }, []string{"result"}, ) + + authorizationAttemptsCounter = metrics.NewCounterVec( + &metrics.CounterOpts{ + Name: "authorization_attempts_total", + Help: "Counter of authorization attempts broken down by result. It can be either 'allowed', 'denied', 'no-opinion' or 'error'.", + StabilityLevel: metrics.ALPHA, + }, + []string{"result"}, + ) + + authorizationLatency = metrics.NewHistogramVec( + &metrics.HistogramOpts{ + Name: "authorization_duration_seconds", + Help: "Authorization duration in seconds broken out by result.", + Buckets: metrics.ExponentialBuckets(0.001, 2, 15), + StabilityLevel: metrics.ALPHA, + }, + []string{"result"}, + ) ) func init() { legacyregistry.MustRegister(authenticatedUserCounter) legacyregistry.MustRegister(authenticatedAttemptsCounter) legacyregistry.MustRegister(authenticationLatency) + legacyregistry.MustRegister(authorizationAttemptsCounter) + legacyregistry.MustRegister(authorizationLatency) +} + +func recordAuthorizationMetrics(ctx context.Context, authorized authorizer.Decision, err error, authStart time.Time, authFinish time.Time) { + var resultLabel string + + switch { + case authorized == authorizer.DecisionAllow: + resultLabel = allowedLabel + case err != nil: + resultLabel = errorLabel + case authorized == authorizer.DecisionDeny: + resultLabel = deniedLabel + case authorized == authorizer.DecisionNoOpinion: + resultLabel = noOpinionLabel + } + + authorizationAttemptsCounter.WithContext(ctx).WithLabelValues(resultLabel).Inc() + authorizationLatency.WithContext(ctx).WithLabelValues(resultLabel).Observe(authFinish.Sub(authStart).Seconds()) } -func recordAuthMetrics(ctx context.Context, resp *authenticator.Response, ok bool, err error, apiAudiences authenticator.Audiences, authStart time.Time, authFinish time.Time) { +func recordAuthenticationMetrics(ctx context.Context, resp *authenticator.Response, ok bool, err error, apiAudiences authenticator.Audiences, authStart time.Time, authFinish time.Time) { var resultLabel string switch { diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/create.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/create.go index 78c1d2f52a73..120d3f665bd0 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/create.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/create.go @@ -191,14 +191,13 @@ func createHandler(r rest.NamedCreater, scope *RequestScope, admit admission.Int // Dedup owner references before updating managed fields dedupOwnerReferencesAndAddWarning(obj, req.Context(), false) result, err := finisher.FinishRequest(ctx, func() (runtime.Object, error) { - if scope.FieldManager != nil { - liveObj, err := scope.Creater.New(scope.Kind) - if err != nil { - return nil, fmt.Errorf("failed to create new object (Create for %v): %v", scope.Kind, err) - } - obj = scope.FieldManager.UpdateNoErrors(liveObj, obj, managerOrUserAgent(options.FieldManager, req.UserAgent())) - admit = fieldmanager.NewManagedFieldsValidatingAdmissionController(admit) + liveObj, err := scope.Creater.New(scope.Kind) + if err != nil { + return nil, fmt.Errorf("failed to create new object (Create for %v): %v", scope.Kind, err) } + obj = scope.FieldManager.UpdateNoErrors(liveObj, obj, managerOrUserAgent(options.FieldManager, req.UserAgent())) + admit = fieldmanager.NewManagedFieldsValidatingAdmissionController(admit) + if mutatingAdmission, ok := admit.(admission.MutationInterface); ok && mutatingAdmission.Handles(admission.Create) { if err := mutatingAdmission.Admit(ctx, admissionAttributes, scope); err != nil { return nil, err diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/patch.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/patch.go index 4f5533f34afe..8209efef708e 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/patch.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/patch.go @@ -177,9 +177,8 @@ func PatchResource(r rest.Patcher, scope *RequestScope, admit admission.Interfac userInfo, ) - if scope.FieldManager != nil { - admit = fieldmanager.NewManagedFieldsValidatingAdmissionController(admit) - } + admit = fieldmanager.NewManagedFieldsValidatingAdmissionController(admit) + mutatingAdmission, _ := admit.(admission.MutationInterface) createAuthorizerAttributes := authorizer.AttributesRecord{ User: userInfo, @@ -345,9 +344,12 @@ func (p *jsonPatcher) applyPatchToCurrentObject(requestContext context.Context, } } - if p.fieldManager != nil { - objToUpdate = p.fieldManager.UpdateNoErrors(currentObject, objToUpdate, managerOrUserAgent(p.options.FieldManager, p.userAgent)) + if p.options == nil { + // Provide a more informative error for the crash that would + // happen on the next line + panic("PatchOptions required but not provided") } + objToUpdate = p.fieldManager.UpdateNoErrors(currentObject, objToUpdate, managerOrUserAgent(p.options.FieldManager, p.userAgent)) return objToUpdate, nil } @@ -441,9 +443,7 @@ func (p *smpPatcher) applyPatchToCurrentObject(requestContext context.Context, c return nil, err } - if p.fieldManager != nil { - newObj = p.fieldManager.UpdateNoErrors(currentObject, newObj, managerOrUserAgent(p.options.FieldManager, p.userAgent)) - } + newObj = p.fieldManager.UpdateNoErrors(currentObject, newObj, managerOrUserAgent(p.options.FieldManager, p.userAgent)) return newObj, nil } @@ -654,9 +654,6 @@ func (p *patcher) patchResource(ctx context.Context, scope *RequestScope) (runti } transformers := []rest.TransformFunc{p.applyPatch, p.applyAdmission, dedupOwnerReferencesTransformer} - if scope.FieldManager != nil { - transformers = append(transformers, fieldmanager.IgnoreManagedFieldsTimestampsTransformer) - } wasCreated := false p.updatedObjectInfo = rest.DefaultUpdatedObjectInfo(nil, transformers...) diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/update.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/update.go index 630c97cdcdd6..4b76ef97e07d 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/update.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/update.go @@ -156,15 +156,13 @@ func UpdateResource(r rest.Updater, scope *RequestScope, admit admission.Interfa // allows skipping managedFields update if the resulting object is too big shouldUpdateManagedFields := true - if scope.FieldManager != nil { - admit = fieldmanager.NewManagedFieldsValidatingAdmissionController(admit) - transformers = append(transformers, func(_ context.Context, newObj, liveObj runtime.Object) (runtime.Object, error) { - if shouldUpdateManagedFields { - return scope.FieldManager.UpdateNoErrors(liveObj, newObj, managerOrUserAgent(options.FieldManager, req.UserAgent())), nil - } - return newObj, nil - }) - } + admit = fieldmanager.NewManagedFieldsValidatingAdmissionController(admit) + transformers = append(transformers, func(_ context.Context, newObj, liveObj runtime.Object) (runtime.Object, error) { + if shouldUpdateManagedFields { + return scope.FieldManager.UpdateNoErrors(liveObj, newObj, managerOrUserAgent(options.FieldManager, req.UserAgent())), nil + } + return newObj, nil + }) if mutatingAdmission, ok := admit.(admission.MutationInterface); ok { transformers = append(transformers, func(ctx context.Context, newObj, oldObj runtime.Object) (runtime.Object, error) { @@ -189,15 +187,6 @@ func UpdateResource(r rest.Updater, scope *RequestScope, admit admission.Interfa }) } - // Ignore changes that only affect managed fields - // timestamps. FieldManager can't know about changes - // like normalized fields, defaulted fields and other - // mutations. - // Only makes sense when SSA field manager is being used - if scope.FieldManager != nil { - transformers = append(transformers, fieldmanager.IgnoreManagedFieldsTimestampsTransformer) - } - createAuthorizerAttributes := authorizer.AttributesRecord{ User: userInfo, ResourceRequest: true, @@ -237,7 +226,7 @@ func UpdateResource(r rest.Updater, scope *RequestScope, admit admission.Interfa result, err := requestFunc() // If the object wasn't committed to storage because it's serialized size was too large, // it is safe to remove managedFields (which can be large) and try again. - if isTooLargeError(err) && scope.FieldManager != nil { + if isTooLargeError(err) { if accessor, accessorErr := meta.Accessor(obj); accessorErr == nil { accessor.SetManagedFields(nil) shouldUpdateManagedFields = false diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/watch.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/watch.go index c76cc194a2ce..7d9372d967a0 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/watch.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/handlers/watch.go @@ -219,7 +219,7 @@ func (s *WatchServer) ServeHTTP(w http.ResponseWriter, req *http.Request) { var unknown runtime.Unknown internalEvent := &metav1.InternalEvent{} outEvent := &metav1.WatchEvent{} - buf := &bytes.Buffer{} + buf := runtime.NewSpliceBuffer() ch := s.Watching.ResultChan() done := req.Context().Done() diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/installer.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/installer.go index 3f8b6807e759..f1adddba54a1 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/installer.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/installer.go @@ -674,28 +674,23 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag reqScope.MetaGroupVersion = *a.group.MetaGroupVersion } - // Use TypeConverter's nil-ness as a proxy for whether SSA/OpenAPI is enabled - // This should be removed in the future and made unconditional - // https://github.com/kubernetes/kubernetes/pull/114998 - if a.group.TypeConverter != nil { - var resetFields map[fieldpath.APIVersion]*fieldpath.Set - if resetFieldsStrategy, isResetFieldsStrategy := storage.(rest.ResetFieldsStrategy); isResetFieldsStrategy { - resetFields = resetFieldsStrategy.GetResetFields() - } + var resetFields map[fieldpath.APIVersion]*fieldpath.Set + if resetFieldsStrategy, isResetFieldsStrategy := storage.(rest.ResetFieldsStrategy); isResetFieldsStrategy { + resetFields = resetFieldsStrategy.GetResetFields() + } - reqScope.FieldManager, err = managedfields.NewDefaultFieldManager( - a.group.TypeConverter, - a.group.UnsafeConvertor, - a.group.Defaulter, - a.group.Creater, - fqKindToRegister, - reqScope.HubGroupVersion, - subresource, - resetFields, - ) - if err != nil { - return nil, nil, fmt.Errorf("failed to create field manager: %v", err) - } + reqScope.FieldManager, err = managedfields.NewDefaultFieldManager( + a.group.TypeConverter, + a.group.UnsafeConvertor, + a.group.Defaulter, + a.group.Creater, + fqKindToRegister, + reqScope.HubGroupVersion, + subresource, + resetFields, + ) + if err != nil { + return nil, nil, fmt.Errorf("failed to create field manager: %v", err) } for _, action := range actions { diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/features/kube_features.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/features/kube_features.go index 72cd493758bb..a2d342d62621 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/features/kube_features.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/features/kube_features.go @@ -112,17 +112,6 @@ const ( // Enables expression validation for Custom Resource CustomResourceValidationExpressions featuregate.Feature = "CustomResourceValidationExpressions" - // owner: @apelisse - // alpha: v1.12 - // beta: v1.13 - // stable: v1.18 - // - // Allow requests to be processed but not stored, so that - // validation, merging, mutation can be tested without - // committing. - DryRun featuregate.Feature = "DryRun" - - // owner: @wojtek-t // alpha: v1.20 // beta: v1.21 // GA: v1.24 @@ -253,8 +242,6 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS CustomResourceValidationExpressions: {Default: true, PreRelease: featuregate.Beta}, - DryRun: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.28 - EfficientWatchResumption: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, KMSv2: {Default: true, PreRelease: featuregate.Beta}, diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/registry/generic/OWNERS b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/registry/generic/OWNERS index 29d730907f9c..c0e4923f6ac7 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/registry/generic/OWNERS +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/registry/generic/OWNERS @@ -2,7 +2,6 @@ reviewers: - thockin - - lavalamp - smarterclayton - wojtek-t - deads2k diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/registry/generic/registry/dryrun.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/registry/generic/registry/dryrun.go index 018e4b4c52df..c8db56b2bab7 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/registry/generic/registry/dryrun.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/registry/generic/registry/dryrun.go @@ -18,7 +18,10 @@ package registry import ( "context" + "fmt" + "reflect" + "k8s.io/apimachinery/pkg/conversion" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/watch" "k8s.io/apiserver/pkg/storage" @@ -72,19 +75,30 @@ func (s *DryRunnableStorage) GuaranteedUpdate( ctx context.Context, key string, destination runtime.Object, ignoreNotFound bool, preconditions *storage.Preconditions, tryUpdate storage.UpdateFunc, dryRun bool, cachedExistingObject runtime.Object) error { if dryRun { - err := s.Storage.Get(ctx, key, storage.GetOptions{IgnoreNotFound: ignoreNotFound}, destination) + var current runtime.Object + v, err := conversion.EnforcePtr(destination) + if err != nil { + return fmt.Errorf("unable to convert output object to pointer: %v", err) + } + if u, ok := v.Addr().Interface().(runtime.Unstructured); ok { + current = u.NewEmptyInstance() + } else { + current = reflect.New(v.Type()).Interface().(runtime.Object) + } + + err = s.Storage.Get(ctx, key, storage.GetOptions{IgnoreNotFound: ignoreNotFound}, current) if err != nil { return err } - err = preconditions.Check(key, destination) + err = preconditions.Check(key, current) if err != nil { return err } - rev, err := s.Versioner().ObjectResourceVersion(destination) + rev, err := s.Versioner().ObjectResourceVersion(current) if err != nil { return err } - updated, _, err := tryUpdate(destination, storage.ResponseMeta{ResourceVersion: rev}) + updated, _, err := tryUpdate(current, storage.ResponseMeta{ResourceVersion: rev}) if err != nil { return err } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/registry/generic/registry/store.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/registry/generic/registry/store.go index fa23d29d6c90..55f06f7972b8 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/registry/generic/registry/store.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/registry/generic/registry/store.go @@ -38,6 +38,7 @@ import ( "k8s.io/apimachinery/pkg/util/validation/field" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/watch" + "k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/registry/generic" "k8s.io/apiserver/pkg/registry/rest" @@ -671,6 +672,15 @@ func (e *Store) Update(ctx context.Context, name string, objInfo rest.UpdatedObj if err := rest.BeforeUpdate(e.UpdateStrategy, ctx, obj, existing); err != nil { return nil, nil, err } + + // Ignore changes that only affect managed fields timestamps. + // FieldManager can't know about changes like normalized fields, defaulted + // fields and other mutations. + obj, err = fieldmanager.IgnoreManagedFieldsTimestampsTransformer(ctx, obj, existing) + if err != nil { + return nil, nil, err + } + // at this point we have a fully formed object. It is time to call the validators that the apiserver // handling chain wants to enforce. if updateValidation != nil { diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/config.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/config.go index 9dc87506a40b..bb11f22ec671 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/config.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/config.go @@ -65,6 +65,7 @@ import ( "k8s.io/apiserver/pkg/server/healthz" "k8s.io/apiserver/pkg/server/routes" serverstore "k8s.io/apiserver/pkg/server/storage" + storagevalue "k8s.io/apiserver/pkg/storage/value" "k8s.io/apiserver/pkg/storageversion" utilfeature "k8s.io/apiserver/pkg/util/feature" utilflowcontrol "k8s.io/apiserver/pkg/util/flowcontrol" @@ -190,6 +191,8 @@ type Config struct { // SkipOpenAPIInstallation avoids installing the OpenAPI handler if set to true. SkipOpenAPIInstallation bool + // ResourceTransformers are used to transform resources from and to etcd, e.g. encryption. + ResourceTransformers storagevalue.ResourceTransformers // RESTOptionsGetter is used to construct RESTStorage types via the generic registry. RESTOptionsGetter genericregistry.RESTOptionsGetter diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/filters/maxinflight.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/filters/maxinflight.go index 5d7b00ec337d..9effcb768f29 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/filters/maxinflight.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/filters/maxinflight.go @@ -34,7 +34,6 @@ import ( const ( // Constant for the retry-after interval on rate limiting. - // TODO: maybe make this dynamic? or user-adjustable? retryAfter = "1" // How often inflight usage metric should be updated. Because @@ -210,7 +209,7 @@ func WithMaxInFlightLimit( // We need to split this data between buckets used for throttling. metrics.RecordDroppedRequest(r, requestInfo, metrics.APIServerComponent, isMutatingRequest) metrics.RecordRequestTermination(r, requestInfo, metrics.APIServerComponent, http.StatusTooManyRequests) - tooManyRequests(r, w) + tooManyRequests(r, w, retryAfter) } } }) @@ -221,9 +220,3 @@ func WithMaxInFlightLimit( func StartMaxInFlightWatermarkMaintenance(stopCh <-chan struct{}) { startWatermarkMaintenance(watermark, stopCh) } - -func tooManyRequests(req *http.Request, w http.ResponseWriter) { - // Return a 429 status indicating "Too Many Requests" - w.Header().Set("Retry-After", retryAfter) - http.Error(w, "Too many requests, please try again later.", http.StatusTooManyRequests) -} diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/filters/priority-and-fairness.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/filters/priority-and-fairness.go index 937971c17eb7..6b3987781601 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/filters/priority-and-fairness.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/filters/priority-and-fairness.go @@ -21,6 +21,7 @@ import ( "fmt" "net/http" "runtime" + "strconv" "sync" "sync/atomic" "time" @@ -67,242 +68,268 @@ func truncateLogField(s string) string { var initAPFOnce sync.Once -// WithPriorityAndFairness limits the number of in-flight -// requests in a fine-grained way. -func WithPriorityAndFairness( - handler http.Handler, - longRunningRequestCheck apirequest.LongRunningRequestCheck, - fcIfc utilflowcontrol.Interface, - workEstimator flowcontrolrequest.WorkEstimatorFunc, -) http.Handler { - if fcIfc == nil { - klog.Warningf("priority and fairness support not found, skipping") - return handler +type priorityAndFairnessHandler struct { + handler http.Handler + longRunningRequestCheck apirequest.LongRunningRequestCheck + fcIfc utilflowcontrol.Interface + workEstimator flowcontrolrequest.WorkEstimatorFunc + + // droppedRequests tracks the history of dropped requests for + // the purpose of computing RetryAfter header to avoid system + // overload. + droppedRequests utilflowcontrol.DroppedRequestsTracker +} + +func (h *priorityAndFairnessHandler) Handle(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + requestInfo, ok := apirequest.RequestInfoFrom(ctx) + if !ok { + handleError(w, r, fmt.Errorf("no RequestInfo found in context")) + return + } + user, ok := apirequest.UserFrom(ctx) + if !ok { + handleError(w, r, fmt.Errorf("no User found in context")) + return } - initAPFOnce.Do(func() { - initMaxInFlight(0, 0) - // Fetching these gauges is delayed until after their underlying metric has been registered - // so that this latches onto the efficient implementation. - waitingMark.readOnlyObserver = fcmetrics.GetWaitingReadonlyConcurrency() - waitingMark.mutatingObserver = fcmetrics.GetWaitingMutatingConcurrency() - }) - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - requestInfo, ok := apirequest.RequestInfoFrom(ctx) - if !ok { - handleError(w, r, fmt.Errorf("no RequestInfo found in context")) - return - } - user, ok := apirequest.UserFrom(ctx) - if !ok { - handleError(w, r, fmt.Errorf("no User found in context")) - return - } - isWatchRequest := watchVerbs.Has(requestInfo.Verb) + isWatchRequest := watchVerbs.Has(requestInfo.Verb) - // Skip tracking long running non-watch requests. - if longRunningRequestCheck != nil && longRunningRequestCheck(r, requestInfo) && !isWatchRequest { - klog.V(6).Infof("Serving RequestInfo=%#+v, user.Info=%#+v as longrunning\n", requestInfo, user) - handler.ServeHTTP(w, r) - return - } + // Skip tracking long running non-watch requests. + if h.longRunningRequestCheck != nil && h.longRunningRequestCheck(r, requestInfo) && !isWatchRequest { + klog.V(6).Infof("Serving RequestInfo=%#+v, user.Info=%#+v as longrunning\n", requestInfo, user) + h.handler.ServeHTTP(w, r) + return + } - var classification *PriorityAndFairnessClassification - noteFn := func(fs *flowcontrol.FlowSchema, pl *flowcontrol.PriorityLevelConfiguration, flowDistinguisher string) { - classification = &PriorityAndFairnessClassification{ - FlowSchemaName: fs.Name, - FlowSchemaUID: fs.UID, - PriorityLevelName: pl.Name, - PriorityLevelUID: pl.UID} + var classification *PriorityAndFairnessClassification + noteFn := func(fs *flowcontrol.FlowSchema, pl *flowcontrol.PriorityLevelConfiguration, flowDistinguisher string) { + classification = &PriorityAndFairnessClassification{ + FlowSchemaName: fs.Name, + FlowSchemaUID: fs.UID, + PriorityLevelName: pl.Name, + PriorityLevelUID: pl.UID, + } - httplog.AddKeyValue(ctx, "apf_pl", truncateLogField(pl.Name)) - httplog.AddKeyValue(ctx, "apf_fs", truncateLogField(fs.Name)) + httplog.AddKeyValue(ctx, "apf_pl", truncateLogField(pl.Name)) + httplog.AddKeyValue(ctx, "apf_fs", truncateLogField(fs.Name)) + } + // estimateWork is called, if at all, after noteFn + estimateWork := func() flowcontrolrequest.WorkEstimate { + if classification == nil { + // workEstimator is being invoked before classification of + // the request has completed, we should never be here though. + klog.ErrorS(fmt.Errorf("workEstimator is being invoked before classification of the request has completed"), + "Using empty FlowSchema and PriorityLevelConfiguration name", "verb", r.Method, "URI", r.RequestURI) + return h.workEstimator(r, "", "") } - // estimateWork is called, if at all, after noteFn - estimateWork := func() flowcontrolrequest.WorkEstimate { - if classification == nil { - // workEstimator is being invoked before classification of - // the request has completed, we should never be here though. - klog.ErrorS(fmt.Errorf("workEstimator is being invoked before classification of the request has completed"), - "Using empty FlowSchema and PriorityLevelConfiguration name", "verb", r.Method, "URI", r.RequestURI) - - return workEstimator(r, "", "") - } - workEstimate := workEstimator(r, classification.FlowSchemaName, classification.PriorityLevelName) + workEstimate := h.workEstimator(r, classification.FlowSchemaName, classification.PriorityLevelName) - fcmetrics.ObserveWorkEstimatedSeats(classification.PriorityLevelName, classification.FlowSchemaName, workEstimate.MaxSeats()) - httplog.AddKeyValue(ctx, "apf_iseats", workEstimate.InitialSeats) - httplog.AddKeyValue(ctx, "apf_fseats", workEstimate.FinalSeats) - httplog.AddKeyValue(ctx, "apf_additionalLatency", workEstimate.AdditionalLatency) + fcmetrics.ObserveWorkEstimatedSeats(classification.PriorityLevelName, classification.FlowSchemaName, workEstimate.MaxSeats()) + httplog.AddKeyValue(ctx, "apf_iseats", workEstimate.InitialSeats) + httplog.AddKeyValue(ctx, "apf_fseats", workEstimate.FinalSeats) + httplog.AddKeyValue(ctx, "apf_additionalLatency", workEstimate.AdditionalLatency) - return workEstimate - } + return workEstimate + } - var served bool - isMutatingRequest := !nonMutatingRequestVerbs.Has(requestInfo.Verb) - noteExecutingDelta := func(delta int32) { - if isMutatingRequest { - watermark.recordMutating(int(atomic.AddInt32(&atomicMutatingExecuting, delta))) - } else { - watermark.recordReadOnly(int(atomic.AddInt32(&atomicReadOnlyExecuting, delta))) - } - } - noteWaitingDelta := func(delta int32) { - if isMutatingRequest { - waitingMark.recordMutating(int(atomic.AddInt32(&atomicMutatingWaiting, delta))) - } else { - waitingMark.recordReadOnly(int(atomic.AddInt32(&atomicReadOnlyWaiting, delta))) - } + var served bool + isMutatingRequest := !nonMutatingRequestVerbs.Has(requestInfo.Verb) + noteExecutingDelta := func(delta int32) { + if isMutatingRequest { + watermark.recordMutating(int(atomic.AddInt32(&atomicMutatingExecuting, delta))) + } else { + watermark.recordReadOnly(int(atomic.AddInt32(&atomicReadOnlyExecuting, delta))) } - queueNote := func(inQueue bool) { - if inQueue { - noteWaitingDelta(1) - } else { - noteWaitingDelta(-1) - } + } + noteWaitingDelta := func(delta int32) { + if isMutatingRequest { + waitingMark.recordMutating(int(atomic.AddInt32(&atomicMutatingWaiting, delta))) + } else { + waitingMark.recordReadOnly(int(atomic.AddInt32(&atomicReadOnlyWaiting, delta))) } - - digest := utilflowcontrol.RequestDigest{ - RequestInfo: requestInfo, - User: user, + } + queueNote := func(inQueue bool) { + if inQueue { + noteWaitingDelta(1) + } else { + noteWaitingDelta(-1) } + } - if isWatchRequest { - // This channel blocks calling handler.ServeHTTP() until closed, and is closed inside execute(). - // If APF rejects the request, it is never closed. - shouldStartWatchCh := make(chan struct{}) + digest := utilflowcontrol.RequestDigest{ + RequestInfo: requestInfo, + User: user, + } - watchInitializationSignal := newInitializationSignal() - // This wraps the request passed to handler.ServeHTTP(), - // setting a context that plumbs watchInitializationSignal to storage - var watchReq *http.Request - // This is set inside execute(), prior to closing shouldStartWatchCh. - // If the request is rejected by APF it is left nil. - var forgetWatch utilflowcontrol.ForgetWatchFunc + if isWatchRequest { + // This channel blocks calling handler.ServeHTTP() until closed, and is closed inside execute(). + // If APF rejects the request, it is never closed. + shouldStartWatchCh := make(chan struct{}) + + watchInitializationSignal := newInitializationSignal() + // This wraps the request passed to handler.ServeHTTP(), + // setting a context that plumbs watchInitializationSignal to storage + var watchReq *http.Request + // This is set inside execute(), prior to closing shouldStartWatchCh. + // If the request is rejected by APF it is left nil. + var forgetWatch utilflowcontrol.ForgetWatchFunc + + defer func() { + // Protect from the situation when request will not reach storage layer + // and the initialization signal will not be send. + if watchInitializationSignal != nil { + watchInitializationSignal.Signal() + } + // Forget the watcher if it was registered. + // + // This is race-free because by this point, one of the following occurred: + // case <-shouldStartWatchCh: execute() completed the assignment to forgetWatch + // case <-resultCh: Handle() completed, and Handle() does not return + // while execute() is running + if forgetWatch != nil { + forgetWatch() + } + }() + execute := func() { + startedAt := time.Now() defer func() { - // Protect from the situation when request will not reach storage layer - // and the initialization signal will not be send. - if watchInitializationSignal != nil { - watchInitializationSignal.Signal() - } - // Forget the watcher if it was registered. - // - // // This is race-free because by this point, one of the following occurred: - // case <-shouldStartWatchCh: execute() completed the assignment to forgetWatch - // case <-resultCh: Handle() completed, and Handle() does not return - // while execute() is running - if forgetWatch != nil { - forgetWatch() - } + httplog.AddKeyValue(ctx, "apf_init_latency", time.Since(startedAt)) }() + noteExecutingDelta(1) + defer noteExecutingDelta(-1) + served = true + setResponseHeaders(classification, w) - execute := func() { - startedAt := time.Now() - defer func() { - httplog.AddKeyValue(ctx, "apf_init_latency", time.Since(startedAt)) - }() - noteExecutingDelta(1) - defer noteExecutingDelta(-1) - served = true - setResponseHeaders(classification, w) + forgetWatch = h.fcIfc.RegisterWatch(r) - forgetWatch = fcIfc.RegisterWatch(r) + // Notify the main thread that we're ready to start the watch. + close(shouldStartWatchCh) - // Notify the main thread that we're ready to start the watch. - close(shouldStartWatchCh) + // Wait until the request is finished from the APF point of view + // (which is when its initialization is done). + watchInitializationSignal.Wait() + } - // Wait until the request is finished from the APF point of view - // (which is when its initialization is done). - watchInitializationSignal.Wait() - } + // Ensure that an item can be put to resultCh asynchronously. + resultCh := make(chan interface{}, 1) + + // Call Handle in a separate goroutine. + // The reason for it is that from APF point of view, the request processing + // finishes as soon as watch is initialized (which is generally orders of + // magnitude faster then the watch request itself). This means that Handle() + // call finishes much faster and for performance reasons we want to reduce + // the number of running goroutines - so we run the shorter thing in a + // dedicated goroutine and the actual watch handler in the main one. + go func() { + defer func() { + err := recover() + // do not wrap the sentinel ErrAbortHandler panic value + if err != nil && err != http.ErrAbortHandler { + // Same as stdlib http server code. Manually allocate stack + // trace buffer size to prevent excessively large logs + const size = 64 << 10 + buf := make([]byte, size) + buf = buf[:runtime.Stack(buf, false)] + err = fmt.Sprintf("%v\n%s", err, buf) + } - // Ensure that an item can be put to resultCh asynchronously. - resultCh := make(chan interface{}, 1) - - // Call Handle in a separate goroutine. - // The reason for it is that from APF point of view, the request processing - // finishes as soon as watch is initialized (which is generally orders of - // magnitude faster then the watch request itself). This means that Handle() - // call finishes much faster and for performance reasons we want to reduce - // the number of running goroutines - so we run the shorter thing in a - // dedicated goroutine and the actual watch handler in the main one. - go func() { - defer func() { - err := recover() - // do not wrap the sentinel ErrAbortHandler panic value - if err != nil && err != http.ErrAbortHandler { - // Same as stdlib http server code. Manually allocate stack - // trace buffer size to prevent excessively large logs - const size = 64 << 10 - buf := make([]byte, size) - buf = buf[:runtime.Stack(buf, false)] - err = fmt.Sprintf("%v\n%s", err, buf) - } - - // Ensure that the result is put into resultCh independently of the panic. - resultCh <- err - }() - - // We create handleCtx with explicit cancelation function. - // The reason for it is that Handle() underneath may start additional goroutine - // that is blocked on context cancellation. However, from APF point of view, - // we don't want to wait until the whole watch request is processed (which is - // when it context is actually cancelled) - we want to unblock the goroutine as - // soon as the request is processed from the APF point of view. - // - // Note that we explicitly do NOT call the actuall handler using that context - // to avoid cancelling request too early. - handleCtx, handleCtxCancel := context.WithCancel(ctx) - defer handleCtxCancel() - - // Note that Handle will return irrespective of whether the request - // executes or is rejected. In the latter case, the function will return - // without calling the passed `execute` function. - fcIfc.Handle(handleCtx, digest, noteFn, estimateWork, queueNote, execute) + // Ensure that the result is put into resultCh independently of the panic. + resultCh <- err }() - select { - case <-shouldStartWatchCh: - watchCtx := utilflowcontrol.WithInitializationSignal(ctx, watchInitializationSignal) - watchReq = r.WithContext(watchCtx) - handler.ServeHTTP(w, watchReq) - // Protect from the situation when request will not reach storage layer - // and the initialization signal will not be send. - // It has to happen before waiting on the resultCh below. - watchInitializationSignal.Signal() - // TODO: Consider finishing the request as soon as Handle call panics. - if err := <-resultCh; err != nil { - panic(err) - } - case err := <-resultCh: - if err != nil { - panic(err) - } + // We create handleCtx with explicit cancelation function. + // The reason for it is that Handle() underneath may start additional goroutine + // that is blocked on context cancellation. However, from APF point of view, + // we don't want to wait until the whole watch request is processed (which is + // when it context is actually cancelled) - we want to unblock the goroutine as + // soon as the request is processed from the APF point of view. + // + // Note that we explicitly do NOT call the actuall handler using that context + // to avoid cancelling request too early. + handleCtx, handleCtxCancel := context.WithCancel(ctx) + defer handleCtxCancel() + + // Note that Handle will return irrespective of whether the request + // executes or is rejected. In the latter case, the function will return + // without calling the passed `execute` function. + h.fcIfc.Handle(handleCtx, digest, noteFn, estimateWork, queueNote, execute) + }() + + select { + case <-shouldStartWatchCh: + watchCtx := utilflowcontrol.WithInitializationSignal(ctx, watchInitializationSignal) + watchReq = r.WithContext(watchCtx) + h.handler.ServeHTTP(w, watchReq) + // Protect from the situation when request will not reach storage layer + // and the initialization signal will not be send. + // It has to happen before waiting on the resultCh below. + watchInitializationSignal.Signal() + // TODO: Consider finishing the request as soon as Handle call panics. + if err := <-resultCh; err != nil { + panic(err) } - } else { - execute := func() { - noteExecutingDelta(1) - defer noteExecutingDelta(-1) - served = true - setResponseHeaders(classification, w) - - handler.ServeHTTP(w, r) + case err := <-resultCh: + if err != nil { + panic(err) } - - fcIfc.Handle(ctx, digest, noteFn, estimateWork, queueNote, execute) } - - if !served { + } else { + execute := func() { + noteExecutingDelta(1) + defer noteExecutingDelta(-1) + served = true setResponseHeaders(classification, w) - epmetrics.RecordDroppedRequest(r, requestInfo, epmetrics.APIServerComponent, isMutatingRequest) - epmetrics.RecordRequestTermination(r, requestInfo, epmetrics.APIServerComponent, http.StatusTooManyRequests) - tooManyRequests(r, w) + h.handler.ServeHTTP(w, r) } + + h.fcIfc.Handle(ctx, digest, noteFn, estimateWork, queueNote, execute) + } + + if !served { + setResponseHeaders(classification, w) + + epmetrics.RecordDroppedRequest(r, requestInfo, epmetrics.APIServerComponent, isMutatingRequest) + epmetrics.RecordRequestTermination(r, requestInfo, epmetrics.APIServerComponent, http.StatusTooManyRequests) + h.droppedRequests.RecordDroppedRequest(classification.PriorityLevelName) + + // TODO(wojtek-t): Idea from deads2k: we can consider some jittering and in case of non-int + // number, just return the truncated result and sleep the remainder server-side. + tooManyRequests(r, w, strconv.Itoa(int(h.droppedRequests.GetRetryAfter(classification.PriorityLevelName)))) + } +} + +// WithPriorityAndFairness limits the number of in-flight +// requests in a fine-grained way. +func WithPriorityAndFairness( + handler http.Handler, + longRunningRequestCheck apirequest.LongRunningRequestCheck, + fcIfc utilflowcontrol.Interface, + workEstimator flowcontrolrequest.WorkEstimatorFunc, +) http.Handler { + if fcIfc == nil { + klog.Warningf("priority and fairness support not found, skipping") + return handler + } + initAPFOnce.Do(func() { + initMaxInFlight(0, 0) + // Fetching these gauges is delayed until after their underlying metric has been registered + // so that this latches onto the efficient implementation. + waitingMark.readOnlyObserver = fcmetrics.GetWaitingReadonlyConcurrency() + waitingMark.mutatingObserver = fcmetrics.GetWaitingMutatingConcurrency() }) + + priorityAndFairnessHandler := &priorityAndFairnessHandler{ + handler: handler, + longRunningRequestCheck: longRunningRequestCheck, + fcIfc: fcIfc, + workEstimator: workEstimator, + droppedRequests: utilflowcontrol.NewDroppedRequestsTracker(), + } + return http.HandlerFunc(priorityAndFairnessHandler.Handle) } // StartPriorityAndFairnessWatermarkMaintenance starts the goroutines to observe and maintain watermarks for @@ -323,3 +350,9 @@ func setResponseHeaders(classification *PriorityAndFairnessClassification, w htt w.Header().Set(flowcontrol.ResponseHeaderMatchedPriorityLevelConfigurationUID, string(classification.PriorityLevelUID)) w.Header().Set(flowcontrol.ResponseHeaderMatchedFlowSchemaUID, string(classification.FlowSchemaUID)) } + +func tooManyRequests(req *http.Request, w http.ResponseWriter, retryAfter string) { + // Return a 429 status indicating "Too Many Requests" + w.Header().Set("Retry-After", retryAfter) + http.Error(w, "Too many requests, please try again later.", http.StatusTooManyRequests) +} diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/genericapiserver.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/genericapiserver.go index 52c865f8a98e..794a5db2f77b 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/genericapiserver.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/genericapiserver.go @@ -18,6 +18,7 @@ package server import ( "context" + "errors" "fmt" "net/http" gpath "path" @@ -736,16 +737,7 @@ func (s preparedGenericAPIServer) NonBlockingRun(stopCh <-chan struct{}, shutdow } // installAPIResources is a private method for installing the REST storage backing each api groupversionresource -func (s *GenericAPIServer) installAPIResources(apiPrefix string, apiGroupInfo *APIGroupInfo, openAPIModels map[string]*spec.Schema) error { - var typeConverter managedfields.TypeConverter - - if len(openAPIModels) > 0 { - var err error - typeConverter, err = managedfields.NewTypeConverter(openAPIModels, false) - if err != nil { - return err - } - } +func (s *GenericAPIServer) installAPIResources(apiPrefix string, apiGroupInfo *APIGroupInfo, typeConverter managedfields.TypeConverter) error { var resourceInfos []*storageversion.ResourceInfo for _, groupVersion := range apiGroupInfo.PrioritizedVersions { if len(apiGroupInfo.VersionedResourcesStorageMap[groupVersion.Version]) == 0 { @@ -844,6 +836,9 @@ func (s *GenericAPIServer) InstallLegacyAPIGroup(apiPrefix string, apiGroupInfo // underlying storage will be destroyed on this servers shutdown. func (s *GenericAPIServer) InstallAPIGroups(apiGroupInfos ...*APIGroupInfo) error { for _, apiGroupInfo := range apiGroupInfos { + if len(apiGroupInfo.PrioritizedVersions) == 0 { + return fmt.Errorf("no version priority set for %#v", *apiGroupInfo) + } // Do not register empty group or empty version. Doing so claims /apis/ for the wrong entity to be returned. // Catching these here places the error much closer to its origin if len(apiGroupInfo.PrioritizedVersions[0].Group) == 0 { @@ -953,13 +948,13 @@ func NewDefaultAPIGroupInfo(group string, scheme *runtime.Scheme, parameterCodec } // getOpenAPIModels is a private method for getting the OpenAPI models -func (s *GenericAPIServer) getOpenAPIModels(apiPrefix string, apiGroupInfos ...*APIGroupInfo) (map[string]*spec.Schema, error) { +func (s *GenericAPIServer) getOpenAPIModels(apiPrefix string, apiGroupInfos ...*APIGroupInfo) (managedfields.TypeConverter, error) { if s.openAPIV3Config == nil { - //!TODO: A future work should add a requirement that - // OpenAPIV3 config is required. May require some refactoring of tests. - return nil, nil + // SSA is GA and requires OpenAPI config to be set + // to create models. + return nil, errors.New("OpenAPIV3 config must not be nil") } - pathsToIgnore := openapiutil.NewTrie(s.openAPIConfig.IgnorePrefixes) + pathsToIgnore := openapiutil.NewTrie(s.openAPIV3Config.IgnorePrefixes) resourceNames := make([]string, 0) for _, apiGroupInfo := range apiGroupInfos { groupResources, err := getResourceNamesForGroup(apiPrefix, apiGroupInfo, pathsToIgnore) @@ -977,7 +972,13 @@ func (s *GenericAPIServer) getOpenAPIModels(apiPrefix string, apiGroupInfos ...* for _, apiGroupInfo := range apiGroupInfos { apiGroupInfo.StaticOpenAPISpec = openAPISpec } - return openAPISpec, nil + + typeConverter, err := managedfields.NewTypeConverter(openAPISpec, false) + if err != nil { + return nil, err + } + + return typeConverter, nil } // getResourceNamesForGroup is a private method for getting the canonical names for each resource to build in an api group diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/handler.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/handler.go index 9f37df1cdff3..847a624e36b9 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/handler.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/handler.go @@ -53,13 +53,13 @@ type APIServerHandler struct { // Director is here so that we can properly handle fall through and proxy cases. // This looks a bit bonkers, but here's what's happening. We need to have /apis handling registered in gorestful in order to have // swagger generated for compatibility. Doing that with `/apis` as a webservice, means that it forcibly 404s (no defaulting allowed) - // all requests which are not /apis or /apis/. We need those calls to fall through behind goresful for proper delegation. Trying to + // all requests which are not /apis or /apis/. We need those calls to fall through behind gorestful for proper delegation. Trying to // register for a pattern which includes everything behind it doesn't work because gorestful negotiates for verbs and content encoding // and all those things go crazy when gorestful really just needs to pass through. In addition, openapi enforces unique verb constraints // which we don't fit into and it still muddies up swagger. Trying to switch the webservices into a route doesn't work because the // containing webservice faces all the same problems listed above. // This leads to the crazy thing done here. Our mux does what we need, so we'll place it in front of gorestful. It will introspect to - // decide if the route is likely to be handled by goresful and route there if needed. Otherwise, it goes to NonGoRestfulMux mux in + // decide if the route is likely to be handled by gorestful and route there if needed. Otherwise, it goes to NonGoRestfulMux mux in // order to handle "normal" paths and delegation. Hopefully no API consumers will ever have to deal with this level of detail. I think // we should consider completely removing gorestful. // Other servers should only use this opaquely to delegate to an API server. diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/options/admission.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/options/admission.go index 5ee0036de1fc..6f4990a7e2c8 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/options/admission.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/options/admission.go @@ -39,7 +39,6 @@ import ( "k8s.io/client-go/dynamic" "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" "k8s.io/component-base/featuregate" ) @@ -123,7 +122,8 @@ func (a *AdmissionOptions) AddFlags(fs *pflag.FlagSet) { func (a *AdmissionOptions) ApplyTo( c *server.Config, informers informers.SharedInformerFactory, - kubeAPIServerClientConfig *rest.Config, + kubeClient kubernetes.Interface, + dynamicClient dynamic.Interface, features featuregate.FeatureGate, pluginInitializers ...admission.PluginInitializer, ) error { @@ -143,15 +143,8 @@ func (a *AdmissionOptions) ApplyTo( return fmt.Errorf("failed to read plugin config: %v", err) } - clientset, err := kubernetes.NewForConfig(kubeAPIServerClientConfig) - if err != nil { - return err - } - dynamicClient, err := dynamic.NewForConfig(kubeAPIServerClientConfig) - if err != nil { - return err - } - genericInitializer := initializer.New(clientset, dynamicClient, informers, c.Authorization.Authorizer, features, c.DrainedNotify()) + genericInitializer := initializer.New(kubeClient, dynamicClient, informers, c.Authorization.Authorizer, features, + c.DrainedNotify()) initializersChain := admission.PluginInitializers{genericInitializer} initializersChain = append(initializersChain, pluginInitializers...) diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig/config.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig/config.go index 796cc6b03dc1..8fb0ae768252 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig/config.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig/config.go @@ -43,7 +43,7 @@ import ( "k8s.io/apiserver/pkg/apis/config/validation" "k8s.io/apiserver/pkg/features" "k8s.io/apiserver/pkg/server/healthz" - "k8s.io/apiserver/pkg/storage/value" + storagevalue "k8s.io/apiserver/pkg/storage/value" aestransformer "k8s.io/apiserver/pkg/storage/value/encrypt/aes" "k8s.io/apiserver/pkg/storage/value/encrypt/envelope" envelopekmsv2 "k8s.io/apiserver/pkg/storage/value/encrypt/envelope/kmsv2" @@ -159,7 +159,7 @@ func (h *kmsv2PluginProbe) toHealthzCheck(idx int) healthz.HealthChecker { // EncryptionConfiguration represents the parsed and normalized encryption configuration for the apiserver. type EncryptionConfiguration struct { // Transformers is a list of value.Transformer that will be used to encrypt and decrypt data. - Transformers map[schema.GroupResource]value.Transformer + Transformers map[schema.GroupResource]storagevalue.Transformer // HealthChecks is a list of healthz.HealthChecker that will be used to check the health of the encryption providers. HealthChecks []healthz.HealthChecker @@ -207,7 +207,7 @@ func LoadEncryptionConfig(ctx context.Context, filepath string, reload bool) (*E // getTransformerOverridesAndKMSPluginHealthzCheckers creates the set of transformers and KMS healthz checks based on the given config. // It may launch multiple go routines whose lifecycle is controlled by ctx. // In case of an error, the caller is responsible for canceling ctx to clean up any go routines that may have been launched. -func getTransformerOverridesAndKMSPluginHealthzCheckers(ctx context.Context, config *apiserverconfig.EncryptionConfiguration) (map[schema.GroupResource]value.Transformer, []healthz.HealthChecker, *kmsState, error) { +func getTransformerOverridesAndKMSPluginHealthzCheckers(ctx context.Context, config *apiserverconfig.EncryptionConfiguration) (map[schema.GroupResource]storagevalue.Transformer, []healthz.HealthChecker, *kmsState, error) { var kmsHealthChecks []healthz.HealthChecker transformers, probes, kmsUsed, err := getTransformerOverridesAndKMSPluginProbes(ctx, config) if err != nil { @@ -228,8 +228,8 @@ type healthChecker interface { // getTransformerOverridesAndKMSPluginProbes creates the set of transformers and KMS probes based on the given config. // It may launch multiple go routines whose lifecycle is controlled by ctx. // In case of an error, the caller is responsible for canceling ctx to clean up any go routines that may have been launched. -func getTransformerOverridesAndKMSPluginProbes(ctx context.Context, config *apiserverconfig.EncryptionConfiguration) (map[schema.GroupResource]value.Transformer, []healthChecker, *kmsState, error) { - resourceToPrefixTransformer := map[schema.GroupResource][]value.PrefixTransformer{} +func getTransformerOverridesAndKMSPluginProbes(ctx context.Context, config *apiserverconfig.EncryptionConfiguration) (map[schema.GroupResource]storagevalue.Transformer, []healthChecker, *kmsState, error) { + resourceToPrefixTransformer := map[schema.GroupResource][]storagevalue.PrefixTransformer{} var probes []healthChecker var kmsUsed kmsState @@ -268,11 +268,11 @@ func getTransformerOverridesAndKMSPluginProbes(ctx context.Context, config *apis probes = append(probes, p...) } - transformers := make(map[schema.GroupResource]value.Transformer, len(resourceToPrefixTransformer)) + transformers := make(map[schema.GroupResource]storagevalue.Transformer, len(resourceToPrefixTransformer)) for gr, transList := range resourceToPrefixTransformer { gr := gr transList := transList - transformers[gr] = value.NewPrefixTransformers(fmt.Errorf("no matching prefix found"), transList...) + transformers[gr] = storagevalue.NewPrefixTransformers(fmt.Errorf("no matching prefix found"), transList...) } return transformers, probes, &kmsUsed, nil @@ -478,15 +478,15 @@ func loadConfig(filepath string, reload bool) (*apiserverconfig.EncryptionConfig // prefixTransformersAndProbes creates the set of transformers and KMS probes based on the given resource config. // It may launch multiple go routines whose lifecycle is controlled by ctx. // In case of an error, the caller is responsible for canceling ctx to clean up any go routines that may have been launched. -func prefixTransformersAndProbes(ctx context.Context, config apiserverconfig.ResourceConfiguration) ([]value.PrefixTransformer, []healthChecker, *kmsState, error) { - var transformers []value.PrefixTransformer +func prefixTransformersAndProbes(ctx context.Context, config apiserverconfig.ResourceConfiguration) ([]storagevalue.PrefixTransformer, []healthChecker, *kmsState, error) { + var transformers []storagevalue.PrefixTransformer var probes []healthChecker var kmsUsed kmsState for _, provider := range config.Providers { provider := provider var ( - transformer value.PrefixTransformer + transformer storagevalue.PrefixTransformer transformerErr error probe healthChecker used *kmsState @@ -497,7 +497,7 @@ func prefixTransformersAndProbes(ctx context.Context, config apiserverconfig.Res transformer, transformerErr = aesPrefixTransformer(provider.AESGCM, aestransformer.NewGCMTransformer, aesGCMTransformerPrefixV1) case provider.AESCBC != nil: - cbcTransformer := func(block cipher.Block) (value.Transformer, error) { + cbcTransformer := func(block cipher.Block) (storagevalue.Transformer, error) { return aestransformer.NewCBCTransformer(block), nil } transformer, transformerErr = aesPrefixTransformer(provider.AESCBC, cbcTransformer, aesCBCTransformerPrefixV1) @@ -513,7 +513,7 @@ func prefixTransformersAndProbes(ctx context.Context, config apiserverconfig.Res } case provider.Identity != nil: - transformer = value.PrefixTransformer{ + transformer = storagevalue.PrefixTransformer{ Transformer: identity.NewEncryptCheckTransformer(), Prefix: []byte{}, } @@ -532,10 +532,10 @@ func prefixTransformersAndProbes(ctx context.Context, config apiserverconfig.Res return transformers, probes, &kmsUsed, nil } -type blockTransformerFunc func(cipher.Block) (value.Transformer, error) +type blockTransformerFunc func(cipher.Block) (storagevalue.Transformer, error) -func aesPrefixTransformer(config *apiserverconfig.AESConfiguration, fn blockTransformerFunc, prefix string) (value.PrefixTransformer, error) { - var result value.PrefixTransformer +func aesPrefixTransformer(config *apiserverconfig.AESConfiguration, fn blockTransformerFunc, prefix string) (storagevalue.PrefixTransformer, error) { + var result storagevalue.PrefixTransformer if len(config.Keys) == 0 { return result, fmt.Errorf("aes provider has no valid keys") @@ -550,7 +550,7 @@ func aesPrefixTransformer(config *apiserverconfig.AESConfiguration, fn blockTran } } - keyTransformers := []value.PrefixTransformer{} + keyTransformers := []storagevalue.PrefixTransformer{} for _, keyData := range config.Keys { keyData := keyData @@ -569,26 +569,26 @@ func aesPrefixTransformer(config *apiserverconfig.AESConfiguration, fn blockTran // Create a new PrefixTransformer for this key keyTransformers = append(keyTransformers, - value.PrefixTransformer{ + storagevalue.PrefixTransformer{ Transformer: transformer, Prefix: []byte(keyData.Name + ":"), }) } // Create a prefixTransformer which can choose between these keys - keyTransformer := value.NewPrefixTransformers( + keyTransformer := storagevalue.NewPrefixTransformers( fmt.Errorf("no matching key was found for the provided AES transformer"), keyTransformers...) // Create a PrefixTransformer which shall later be put in a list with other providers - result = value.PrefixTransformer{ + result = storagevalue.PrefixTransformer{ Transformer: keyTransformer, Prefix: []byte(prefix), } return result, nil } -func secretboxPrefixTransformer(config *apiserverconfig.SecretboxConfiguration) (value.PrefixTransformer, error) { - var result value.PrefixTransformer +func secretboxPrefixTransformer(config *apiserverconfig.SecretboxConfiguration) (storagevalue.PrefixTransformer, error) { + var result storagevalue.PrefixTransformer if len(config.Keys) == 0 { return result, fmt.Errorf("secretbox provider has no valid keys") @@ -603,7 +603,7 @@ func secretboxPrefixTransformer(config *apiserverconfig.SecretboxConfiguration) } } - keyTransformers := []value.PrefixTransformer{} + keyTransformers := []storagevalue.PrefixTransformer{} for _, keyData := range config.Keys { keyData := keyData @@ -621,18 +621,18 @@ func secretboxPrefixTransformer(config *apiserverconfig.SecretboxConfiguration) // Create a new PrefixTransformer for this key keyTransformers = append(keyTransformers, - value.PrefixTransformer{ + storagevalue.PrefixTransformer{ Transformer: secretbox.NewSecretboxTransformer(keyArray), Prefix: []byte(keyData.Name + ":"), }) } // Create a prefixTransformer which can choose between these keys - keyTransformer := value.NewPrefixTransformers( + keyTransformer := storagevalue.NewPrefixTransformers( fmt.Errorf("no matching key was found for the provided Secretbox transformer"), keyTransformers...) // Create a PrefixTransformer which shall later be put in a list with other providers - result = value.PrefixTransformer{ + result = storagevalue.PrefixTransformer{ Transformer: keyTransformer, Prefix: []byte(secretboxTransformerPrefixV1), } @@ -665,13 +665,13 @@ func (s *kmsState) accumulate(other *kmsState) { // kmsPrefixTransformer creates a KMS transformer and probe based on the given KMS config. // It may launch multiple go routines whose lifecycle is controlled by ctx. // In case of an error, the caller is responsible for canceling ctx to clean up any go routines that may have been launched. -func kmsPrefixTransformer(ctx context.Context, config *apiserverconfig.KMSConfiguration) (value.PrefixTransformer, healthChecker, *kmsState, error) { +func kmsPrefixTransformer(ctx context.Context, config *apiserverconfig.KMSConfiguration) (storagevalue.PrefixTransformer, healthChecker, *kmsState, error) { kmsName := config.Name switch config.APIVersion { case kmsAPIVersionV1: envelopeService, err := envelopeServiceFactory(ctx, config.Endpoint, config.Timeout.Duration) if err != nil { - return value.PrefixTransformer{}, nil, nil, fmt.Errorf("could not configure KMSv1-Plugin's probe %q, error: %w", kmsName, err) + return storagevalue.PrefixTransformer{}, nil, nil, fmt.Errorf("could not configure KMSv1-Plugin's probe %q, error: %w", kmsName, err) } probe := &kmsPluginProbe{ @@ -692,12 +692,12 @@ func kmsPrefixTransformer(ctx context.Context, config *apiserverconfig.KMSConfig case kmsAPIVersionV2: if !utilfeature.DefaultFeatureGate.Enabled(features.KMSv2) { - return value.PrefixTransformer{}, nil, nil, fmt.Errorf("could not configure KMSv2 plugin %q, KMSv2 feature is not enabled", kmsName) + return storagevalue.PrefixTransformer{}, nil, nil, fmt.Errorf("could not configure KMSv2 plugin %q, KMSv2 feature is not enabled", kmsName) } envelopeService, err := EnvelopeKMSv2ServiceFactory(ctx, config.Endpoint, config.Name, config.Timeout.Duration) if err != nil { - return value.PrefixTransformer{}, nil, nil, fmt.Errorf("could not configure KMSv2-Plugin's probe %q, error: %w", kmsName, err) + return storagevalue.PrefixTransformer{}, nil, nil, fmt.Errorf("could not configure KMSv2-Plugin's probe %q, error: %w", kmsName, err) } probe := &kmsv2PluginProbe{ @@ -748,7 +748,7 @@ func kmsPrefixTransformer(ctx context.Context, config *apiserverconfig.KMSConfig }) // using AES-GCM by default for encrypting data with KMSv2 - transformer := value.PrefixTransformer{ + transformer := storagevalue.PrefixTransformer{ Transformer: envelopekmsv2.NewEnvelopeTransformer(envelopeService, kmsName, probe.getCurrentState), Prefix: []byte(kmsTransformerPrefixV2 + kmsName + ":"), } @@ -759,12 +759,12 @@ func kmsPrefixTransformer(ctx context.Context, config *apiserverconfig.KMSConfig }, nil default: - return value.PrefixTransformer{}, nil, nil, fmt.Errorf("could not configure KMS plugin %q, unsupported KMS API version %q", kmsName, config.APIVersion) + return storagevalue.PrefixTransformer{}, nil, nil, fmt.Errorf("could not configure KMS plugin %q, unsupported KMS API version %q", kmsName, config.APIVersion) } } -func envelopePrefixTransformer(config *apiserverconfig.KMSConfiguration, envelopeService envelope.Service, prefix string) value.PrefixTransformer { - baseTransformerFunc := func(block cipher.Block) (value.Transformer, error) { +func envelopePrefixTransformer(config *apiserverconfig.KMSConfiguration, envelopeService envelope.Service, prefix string) storagevalue.PrefixTransformer { + baseTransformerFunc := func(block cipher.Block) (storagevalue.Transformer, error) { gcm, err := aestransformer.NewGCMTransformer(block) if err != nil { return nil, err @@ -777,15 +777,15 @@ func envelopePrefixTransformer(config *apiserverconfig.KMSConfiguration, envelop return unionTransformers{gcm, aestransformer.NewCBCTransformer(block)}, nil } - return value.PrefixTransformer{ + return storagevalue.PrefixTransformer{ Transformer: envelope.NewEnvelopeTransformer(envelopeService, int(*config.CacheSize), baseTransformerFunc), Prefix: []byte(prefix + config.Name + ":"), } } -type unionTransformers []value.Transformer +type unionTransformers []storagevalue.Transformer -func (u unionTransformers) TransformFromStorage(ctx context.Context, data []byte, dataCtx value.Context) (out []byte, stale bool, err error) { +func (u unionTransformers) TransformFromStorage(ctx context.Context, data []byte, dataCtx storagevalue.Context) (out []byte, stale bool, err error) { var errs []error for i := range u { transformer := u[i] @@ -804,7 +804,7 @@ func (u unionTransformers) TransformFromStorage(ctx context.Context, data []byte return nil, false, fmt.Errorf("unionTransformers: unable to transform from storage") } -func (u unionTransformers) TransformToStorage(ctx context.Context, data []byte, dataCtx value.Context) (out []byte, err error) { +func (u unionTransformers) TransformToStorage(ctx context.Context, data []byte, dataCtx storagevalue.Context) (out []byte, err error) { return u[0].TransformToStorage(ctx, data, dataCtx) } @@ -815,7 +815,7 @@ func computeEncryptionConfigHash(data []byte) string { return fmt.Sprintf("%x", sha256.Sum256(data)) } -var _ ResourceTransformers = &DynamicTransformers{} +var _ storagevalue.ResourceTransformers = &DynamicTransformers{} var _ healthz.HealthChecker = &DynamicTransformers{} // DynamicTransformers holds transformers that may be dynamically updated via a single external actor, likely a controller. @@ -825,7 +825,7 @@ type DynamicTransformers struct { } type transformTracker struct { - transformerOverrides map[schema.GroupResource]value.Transformer + transformerOverrides map[schema.GroupResource]storagevalue.Transformer kmsPluginHealthzCheck healthz.HealthChecker closeTransformers context.CancelFunc kmsCloseGracePeriod time.Duration @@ -833,7 +833,7 @@ type transformTracker struct { // NewDynamicTransformers returns transformers, health checks for kms providers and an ability to close transformers. func NewDynamicTransformers( - transformerOverrides map[schema.GroupResource]value.Transformer, + transformerOverrides map[schema.GroupResource]storagevalue.Transformer, kmsPluginHealthzCheck healthz.HealthChecker, closeTransformers context.CancelFunc, kmsCloseGracePeriod time.Duration, @@ -864,7 +864,7 @@ func (d *DynamicTransformers) Name() string { } // TransformerForResource returns the transformer for the given resource. -func (d *DynamicTransformers) TransformerForResource(resource schema.GroupResource) value.Transformer { +func (d *DynamicTransformers) TransformerForResource(resource schema.GroupResource) storagevalue.Transformer { return &resourceTransformer{ resource: resource, transformTracker: d.transformTracker, @@ -873,7 +873,7 @@ func (d *DynamicTransformers) TransformerForResource(resource schema.GroupResour // Set sets the transformer overrides. This method is not go routine safe and must only be called by the same, single caller throughout the lifetime of this object. func (d *DynamicTransformers) Set( - transformerOverrides map[schema.GroupResource]value.Transformer, + transformerOverrides map[schema.GroupResource]storagevalue.Transformer, closeTransformers context.CancelFunc, kmsPluginHealthzCheck healthz.HealthChecker, kmsCloseGracePeriod time.Duration, @@ -898,34 +898,30 @@ func (d *DynamicTransformers) Set( }() } -var _ value.Transformer = &resourceTransformer{} +var _ storagevalue.Transformer = &resourceTransformer{} type resourceTransformer struct { resource schema.GroupResource transformTracker *atomic.Value } -func (r *resourceTransformer) TransformFromStorage(ctx context.Context, data []byte, dataCtx value.Context) ([]byte, bool, error) { +func (r *resourceTransformer) TransformFromStorage(ctx context.Context, data []byte, dataCtx storagevalue.Context) ([]byte, bool, error) { return r.transformer().TransformFromStorage(ctx, data, dataCtx) } -func (r *resourceTransformer) TransformToStorage(ctx context.Context, data []byte, dataCtx value.Context) ([]byte, error) { +func (r *resourceTransformer) TransformToStorage(ctx context.Context, data []byte, dataCtx storagevalue.Context) ([]byte, error) { return r.transformer().TransformToStorage(ctx, data, dataCtx) } -func (r *resourceTransformer) transformer() value.Transformer { +func (r *resourceTransformer) transformer() storagevalue.Transformer { return transformerFromOverrides(r.transformTracker.Load().(*transformTracker).transformerOverrides, r.resource) } -type ResourceTransformers interface { - TransformerForResource(resource schema.GroupResource) value.Transformer -} - -var _ ResourceTransformers = &StaticTransformers{} +var _ storagevalue.ResourceTransformers = &StaticTransformers{} -type StaticTransformers map[schema.GroupResource]value.Transformer +type StaticTransformers map[schema.GroupResource]storagevalue.Transformer -func (s StaticTransformers) TransformerForResource(resource schema.GroupResource) value.Transformer { +func (s StaticTransformers) TransformerForResource(resource schema.GroupResource) storagevalue.Transformer { return transformerFromOverrides(s, resource) } @@ -934,7 +930,7 @@ var anyGroupAnyResource = schema.GroupResource{ Resource: "*", } -func transformerFromOverrides(transformerOverrides map[schema.GroupResource]value.Transformer, resource schema.GroupResource) value.Transformer { +func transformerFromOverrides(transformerOverrides map[schema.GroupResource]storagevalue.Transformer, resource schema.GroupResource) storagevalue.Transformer { if transformer := transformerOverrides[resource]; transformer != nil { return transformer } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/options/etcd.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/options/etcd.go index 6aabbf255bed..9f4402411ef8 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/options/etcd.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/options/etcd.go @@ -38,7 +38,7 @@ import ( serverstorage "k8s.io/apiserver/pkg/server/storage" "k8s.io/apiserver/pkg/storage/storagebackend" storagefactory "k8s.io/apiserver/pkg/storage/storagebackend/factory" - flowcontrolrequest "k8s.io/apiserver/pkg/util/flowcontrol/request" + storagevalue "k8s.io/apiserver/pkg/storage/value" "k8s.io/klog/v2" ) @@ -64,11 +64,6 @@ type EtcdOptions struct { // WatchCacheSizes represents override to a given resource WatchCacheSizes []string - // complete guards fields that must be initialized via Complete before the Apply methods can be used. - complete bool - resourceTransformers encryptionconfig.ResourceTransformers - kmsPluginHealthzChecks []healthz.HealthChecker - // SkipHealthEndpoints, when true, causes the Apply methods to not set up health endpoints. // This allows multiple invocations of the Apply methods without duplication of said endpoints. SkipHealthEndpoints bool @@ -212,118 +207,114 @@ func (s *EtcdOptions) AddFlags(fs *pflag.FlagSet) { "The time in seconds that each lease is reused. A lower value could avoid large number of objects reusing the same lease. Notice that a too small value may cause performance problems at storage layer.") } -// Complete must be called exactly once before using any of the Apply methods. It is responsible for setting -// up objects that must be created once and reused across multiple invocations such as storage transformers. -// This method mutates the receiver (EtcdOptions). It must never mutate the inputs. -func (s *EtcdOptions) Complete( - storageObjectCountTracker flowcontrolrequest.StorageObjectCountTracker, - stopCh <-chan struct{}, - addPostStartHook func(name string, hook server.PostStartHookFunc) error, -) error { +// ApplyTo mutates the provided server.Config. It must never mutate the receiver (EtcdOptions). +func (s *EtcdOptions) ApplyTo(c *server.Config) error { if s == nil { return nil } - if s.complete { - return fmt.Errorf("EtcdOptions.Complete called more than once") + storageConfigCopy := s.StorageConfig + if storageConfigCopy.StorageObjectCountTracker == nil { + storageConfigCopy.StorageObjectCountTracker = c.StorageObjectCountTracker } - if len(s.EncryptionProviderConfigFilepath) != 0 { - ctxServer := wait.ContextForChannel(stopCh) - // nolint:govet // The only code path where closeTransformers does not get called is when it gets stored in dynamicTransformers. - ctxTransformers, closeTransformers := context.WithCancel(ctxServer) + return s.ApplyWithStorageFactoryTo(&SimpleStorageFactory{StorageConfig: storageConfigCopy}, c) +} - encryptionConfiguration, err := encryptionconfig.LoadEncryptionConfig(ctxTransformers, s.EncryptionProviderConfigFilepath, s.EncryptionProviderConfigAutomaticReload) - if err != nil { - // in case of error, we want to close partially initialized (if any) transformers - closeTransformers() - return err - } +// ApplyWithStorageFactoryTo mutates the provided server.Config. It must never mutate the receiver (EtcdOptions). +func (s *EtcdOptions) ApplyWithStorageFactoryTo(factory serverstorage.StorageFactory, c *server.Config) error { + if s == nil { + return nil + } - // enable kms hot reload controller only if the config file is set to be automatically reloaded - if s.EncryptionProviderConfigAutomaticReload { - // with reload=true we will always have 1 health check - if len(encryptionConfiguration.HealthChecks) != 1 { - // in case of error, we want to close partially initialized (if any) transformers - closeTransformers() - return fmt.Errorf("failed to start kms encryption config hot reload controller. only 1 health check should be available when reload is enabled") - } - - // Here the dynamic transformers take ownership of the transformers and their cancellation. - dynamicTransformers := encryptionconfig.NewDynamicTransformers(encryptionConfiguration.Transformers, encryptionConfiguration.HealthChecks[0], closeTransformers, encryptionConfiguration.KMSCloseGracePeriod) - - // add post start hook to start hot reload controller - // adding this hook here will ensure that it gets configured exactly once - err = addPostStartHook( - "start-encryption-provider-config-automatic-reload", - func(_ server.PostStartHookContext) error { - dynamicEncryptionConfigController := encryptionconfigcontroller.NewDynamicEncryptionConfiguration( - "encryption-provider-config-automatic-reload-controller", - s.EncryptionProviderConfigFilepath, - dynamicTransformers, - encryptionConfiguration.EncryptionFileContentHash, - ) - - go dynamicEncryptionConfigController.Run(ctxServer) - - return nil - }, - ) - if err != nil { - // in case of error, we want to close partially initialized (if any) transformers - closeTransformers() - return fmt.Errorf("failed to add post start hook for kms encryption config hot reload controller: %w", err) - } - - s.resourceTransformers = dynamicTransformers - s.kmsPluginHealthzChecks = []healthz.HealthChecker{dynamicTransformers} - } else { - s.resourceTransformers = encryptionconfig.StaticTransformers(encryptionConfiguration.Transformers) - s.kmsPluginHealthzChecks = encryptionConfiguration.HealthChecks + if !s.SkipHealthEndpoints { + if err := s.addEtcdHealthEndpoint(c); err != nil { + return err } } - s.StorageConfig.StorageObjectCountTracker = storageObjectCountTracker - - s.complete = true + // setup encryption + if err := s.maybeApplyResourceTransformers(c); err != nil { + return err + } - // nolint:govet // The only code path where closeTransformers does not get called is when it gets stored in dynamicTransformers. + c.RESTOptionsGetter = s.CreateRESTOptionsGetter(factory, c.ResourceTransformers) return nil } -// ApplyTo mutates the provided server.Config. It must never mutate the receiver (EtcdOptions). -func (s *EtcdOptions) ApplyTo(c *server.Config) error { - if s == nil { - return nil +func (s *EtcdOptions) CreateRESTOptionsGetter(factory serverstorage.StorageFactory, resourceTransformers storagevalue.ResourceTransformers) generic.RESTOptionsGetter { + if resourceTransformers != nil { + factory = &transformerStorageFactory{ + delegate: factory, + resourceTransformers: resourceTransformers, + } } - - return s.ApplyWithStorageFactoryTo(&SimpleStorageFactory{StorageConfig: s.StorageConfig}, c) + return &StorageFactoryRestOptionsFactory{Options: *s, StorageFactory: factory} } -// ApplyWithStorageFactoryTo mutates the provided server.Config. It must never mutate the receiver (EtcdOptions). -func (s *EtcdOptions) ApplyWithStorageFactoryTo(factory serverstorage.StorageFactory, c *server.Config) error { - if s == nil { +func (s *EtcdOptions) maybeApplyResourceTransformers(c *server.Config) (err error) { + if c.ResourceTransformers != nil { return nil } - - if !s.complete { - return fmt.Errorf("EtcdOptions.Apply called without completion") + if len(s.EncryptionProviderConfigFilepath) == 0 { + return nil } - if !s.SkipHealthEndpoints { - if err := s.addEtcdHealthEndpoint(c); err != nil { - return err + ctxServer := wait.ContextForChannel(c.DrainedNotify()) + ctxTransformers, closeTransformers := context.WithCancel(ctxServer) + defer func() { + // in case of error, we want to close partially initialized (if any) transformers + if err != nil { + closeTransformers() } + }() + + encryptionConfiguration, err := encryptionconfig.LoadEncryptionConfig(ctxTransformers, s.EncryptionProviderConfigFilepath, s.EncryptionProviderConfigAutomaticReload) + if err != nil { + return err } - if s.resourceTransformers != nil { - factory = &transformerStorageFactory{ - delegate: factory, - resourceTransformers: s.resourceTransformers, + if s.EncryptionProviderConfigAutomaticReload { + // with reload=true we will always have 1 health check + if len(encryptionConfiguration.HealthChecks) != 1 { + return fmt.Errorf("failed to start kms encryption config hot reload controller. only 1 health check should be available when reload is enabled") + } + + // Here the dynamic transformers take ownership of the transformers and their cancellation. + dynamicTransformers := encryptionconfig.NewDynamicTransformers(encryptionConfiguration.Transformers, encryptionConfiguration.HealthChecks[0], closeTransformers, encryptionConfiguration.KMSCloseGracePeriod) + + // add post start hook to start hot reload controller + // adding this hook here will ensure that it gets configured exactly once + err = c.AddPostStartHook( + "start-encryption-provider-config-automatic-reload", + func(_ server.PostStartHookContext) error { + dynamicEncryptionConfigController := encryptionconfigcontroller.NewDynamicEncryptionConfiguration( + "encryption-provider-config-automatic-reload-controller", + s.EncryptionProviderConfigFilepath, + dynamicTransformers, + encryptionConfiguration.EncryptionFileContentHash, + ) + + go dynamicEncryptionConfigController.Run(ctxServer) + + return nil + }, + ) + if err != nil { + return fmt.Errorf("failed to add post start hook for kms encryption config hot reload controller: %w", err) + } + + c.ResourceTransformers = dynamicTransformers + if !s.SkipHealthEndpoints { + c.AddHealthChecks(dynamicTransformers) + } + } else { + c.ResourceTransformers = encryptionconfig.StaticTransformers(encryptionConfiguration.Transformers) + if !s.SkipHealthEndpoints { + c.AddHealthChecks(encryptionConfiguration.HealthChecks...) } } - c.RESTOptionsGetter = &StorageFactoryRestOptionsFactory{Options: *s, StorageFactory: factory} return nil } @@ -344,8 +335,6 @@ func (s *EtcdOptions) addEtcdHealthEndpoint(c *server.Config) error { return readyCheck() })) - c.AddHealthChecks(s.kmsPluginHealthzChecks...) - return nil } @@ -453,7 +442,7 @@ var _ serverstorage.StorageFactory = &transformerStorageFactory{} type transformerStorageFactory struct { delegate serverstorage.StorageFactory - resourceTransformers encryptionconfig.ResourceTransformers + resourceTransformers storagevalue.ResourceTransformers } func (t *transformerStorageFactory) NewConfig(resource schema.GroupResource) (*storagebackend.ConfigForResource, error) { diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/options/recommended.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/options/recommended.go index 28aad0daf635..69f8fb51556b 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/options/recommended.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/options/recommended.go @@ -20,7 +20,6 @@ import ( "fmt" "github.com/spf13/pflag" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apiserver/pkg/admission" "k8s.io/apiserver/pkg/features" @@ -28,6 +27,7 @@ import ( "k8s.io/apiserver/pkg/storage/storagebackend" "k8s.io/apiserver/pkg/util/feature" utilflowcontrol "k8s.io/apiserver/pkg/util/flowcontrol" + "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" "k8s.io/component-base/featuregate" "k8s.io/klog/v2" @@ -101,9 +101,6 @@ func (o *RecommendedOptions) AddFlags(fs *pflag.FlagSet) { // ApplyTo adds RecommendedOptions to the server configuration. // pluginInitializers can be empty, it is only need for additional initializers. func (o *RecommendedOptions) ApplyTo(config *server.RecommendedConfig) error { - if err := o.Etcd.Complete(config.Config.StorageObjectCountTracker, config.Config.DrainedNotify(), config.Config.AddPostStartHook); err != nil { - return err - } if err := o.Etcd.ApplyTo(&config.Config); err != nil { return err } @@ -131,9 +128,20 @@ func (o *RecommendedOptions) ApplyTo(config *server.RecommendedConfig) error { if err := o.CoreAPI.ApplyTo(config); err != nil { return err } - if initializers, err := o.ExtraAdmissionInitializers(config); err != nil { + initializers, err := o.ExtraAdmissionInitializers(config) + if err != nil { + return err + } + kubeClient, err := kubernetes.NewForConfig(config.ClientConfig) + if err != nil { + return err + } + dynamicClient, err := dynamic.NewForConfig(config.ClientConfig) + if err != nil { return err - } else if err := o.Admission.ApplyTo(&config.Config, config.SharedInformerFactory, config.ClientConfig, o.FeatureGate, initializers...); err != nil { + } + if err := o.Admission.ApplyTo(&config.Config, config.SharedInformerFactory, kubeClient, dynamicClient, o.FeatureGate, + initializers...); err != nil { return err } if feature.DefaultFeatureGate.Enabled(features.APIPriorityAndFairness) { diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/routes/openapi.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/routes/openapi.go index 17cc1f85a09b..2819d1576016 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/routes/openapi.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/routes/openapi.go @@ -43,10 +43,7 @@ func (oa OpenAPI) InstallV2(c *restful.Container, mux *mux.PathRecorderMux) (*ha } spec.Definitions = handler.PruneDefaults(spec.Definitions) openAPIVersionedService := handler.NewOpenAPIService(spec) - err = openAPIVersionedService.RegisterOpenAPIVersionedService("/openapi/v2", mux) - if err != nil { - klog.Fatalf("Failed to register versioned open api spec for root: %v", err) - } + openAPIVersionedService.RegisterOpenAPIVersionedService("/openapi/v2", mux) return openAPIVersionedService, spec } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/OWNERS b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/OWNERS index c77bfe44b5d4..044ecb9f61f8 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/OWNERS +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/OWNERS @@ -1,11 +1,9 @@ # See the OWNERS docs at https://go.k8s.io/owners approvers: - - lavalamp - liggitt - wojtek-t reviewers: - - lavalamp - smarterclayton - wojtek-t - deads2k @@ -16,6 +14,8 @@ reviewers: - ingvagabund - enj - stevekuznetsov + - MadhavJivrajani emeritus_approvers: - xiang90 - timothysc + - lavalamp diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/cacher/cacher.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/cacher/cacher.go index eada35b1d0a9..5e5aa572d336 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/cacher/cacher.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/cacher/cacher.go @@ -184,7 +184,6 @@ func (i *indexedWatchers) terminateAll(groupResource schema.GroupResource, done // second in a bucket, and pop up them once at the timeout. To be more specific, // if you set fire time at X, you can get the bookmark within (X-1,X+1) period. type watcherBookmarkTimeBuckets struct { - lock sync.Mutex // the key of watcherBuckets is the number of seconds since createTime watchersBuckets map[int64][]*cacheWatcher createTime time.Time @@ -205,7 +204,7 @@ func newTimeBucketWatchers(clock clock.Clock, bookmarkFrequency time.Duration) * // adds a watcher to the bucket, if the deadline is before the start, it will be // added to the first one. -func (t *watcherBookmarkTimeBuckets) addWatcher(w *cacheWatcher) bool { +func (t *watcherBookmarkTimeBuckets) addWatcherThreadUnsafe(w *cacheWatcher) bool { // note that the returned time can be before t.createTime, // especially in cases when the nextBookmarkTime method // give us the zero value of type Time @@ -215,8 +214,6 @@ func (t *watcherBookmarkTimeBuckets) addWatcher(w *cacheWatcher) bool { return false } bucketID := int64(nextTime.Sub(t.createTime) / time.Second) - t.lock.Lock() - defer t.lock.Unlock() if bucketID < t.startBucketID { bucketID = t.startBucketID } @@ -225,12 +222,10 @@ func (t *watcherBookmarkTimeBuckets) addWatcher(w *cacheWatcher) bool { return true } -func (t *watcherBookmarkTimeBuckets) popExpiredWatchers() [][]*cacheWatcher { +func (t *watcherBookmarkTimeBuckets) popExpiredWatchersThreadUnsafe() [][]*cacheWatcher { currentBucketID := int64(t.clock.Since(t.createTime) / time.Second) // There should be one or two elements in almost all cases expiredWatchers := make([][]*cacheWatcher, 0, 2) - t.lock.Lock() - defer t.lock.Unlock() for ; t.startBucketID <= currentBucketID; t.startBucketID++ { if watchers, ok := t.watchersBuckets[t.startBucketID]; ok { delete(t.watchersBuckets, t.startBucketID) @@ -328,6 +323,7 @@ type Cacher struct { // dispatching that event to avoid race with closing channels in watchers. watchersToStop []*cacheWatcher // Maintain a timeout queue to send the bookmark event before the watcher times out. + // Note that this field when accessed MUST be protected by the Cacher.lock. bookmarkWatchers *watcherBookmarkTimeBuckets // expiredBookmarkWatchers is a list of watchers that were expired and need to be schedule for a next bookmark event expiredBookmarkWatchers []*cacheWatcher @@ -404,7 +400,7 @@ func NewCacherFromConfig(config Config) (*Cacher, error) { watchCache := newWatchCache( config.KeyFunc, cacher.processEvent, config.GetAttrsFunc, config.Versioner, config.Indexers, config.Clock, config.GroupResource) - listerWatcher := NewCacherListerWatcher(config.Storage, config.ResourcePrefix, config.NewListFunc) + listerWatcher := NewListerWatcher(config.Storage, config.ResourcePrefix, config.NewListFunc) reflectorName := "storage/cacher.go:" + config.ResourcePrefix reflector := cache.NewNamedReflector(reflectorName, listerWatcher, obj, watchCache, 0) @@ -592,6 +588,18 @@ func (c *Cacher) Watch(ctx context.Context, key string, opts storage.ListOptions identifier, ) + // note that c.waitUntilWatchCacheFreshAndForceAllEvents must be called without + // the c.watchCache.RLock held otherwise we are at risk of a deadlock + // mainly because c.watchCache.processEvent method won't be able to make progress + // + // moreover even though the c.waitUntilWatchCacheFreshAndForceAllEvents acquires a lock + // it is safe to release the lock after the method finishes because we don't require + // any atomicity between the call to the method and further calls that actually get the events. + forceAllEvents, err := c.waitUntilWatchCacheFreshAndForceAllEvents(ctx, requestedWatchRV, opts) + if err != nil { + return newErrWatcher(err), nil + } + // We explicitly use thread unsafe version and do locking ourself to ensure that // no new events will be processed in the meantime. The watchCache will be unlocked // on return from this function. @@ -599,10 +607,7 @@ func (c *Cacher) Watch(ctx context.Context, key string, opts storage.ListOptions // underlying watchCache is calling processEvent under its lock. c.watchCache.RLock() defer c.watchCache.RUnlock() - forceAllEvents, err := c.waitUntilWatchCacheFreshAndForceAllEvents(ctx, requestedWatchRV, opts) - if err != nil { - return newErrWatcher(err), nil - } + startWatchRV := startWatchResourceVersionFn() var cacheInterval *watchCacheInterval if forceAllEvents { @@ -638,7 +643,7 @@ func (c *Cacher) Watch(ctx context.Context, key string, opts storage.ListOptions // Add it to the queue only when the client support watch bookmarks. if watcher.allowWatchBookmarks { - c.bookmarkWatchers.addWatcher(watcher) + c.bookmarkWatchers.addWatcherThreadUnsafe(watcher) } c.watcherIdx++ }() @@ -795,24 +800,30 @@ func (c *Cacher) GetList(ctx context.Context, key string, opts storage.ListOptio return err } span.AddEvent("Listed items from cache", attribute.Int("count", len(objs))) - if len(objs) > listVal.Cap() && pred.Label.Empty() && pred.Field.Empty() { - // Resize the slice appropriately, since we already know that none - // of the elements will be filtered out. - listVal.Set(reflect.MakeSlice(reflect.SliceOf(c.objectType.Elem()), 0, len(objs))) - span.AddEvent("Resized result") - } + // store pointer of eligible objects, + // Why not directly put object in the items of listObj? + // the elements in ListObject are Struct type, making slice will bring excessive memory consumption. + // so we try to delay this action as much as possible + var selectedObjects []runtime.Object for _, obj := range objs { elem, ok := obj.(*storeElement) if !ok { return fmt.Errorf("non *storeElement returned from storage: %v", obj) } if filter(elem.Key, elem.Labels, elem.Fields) { - listVal.Set(reflect.Append(listVal, reflect.ValueOf(elem.Object).Elem())) + selectedObjects = append(selectedObjects, elem.Object) } } - if listVal.IsNil() { + if len(selectedObjects) == 0 { // Ensure that we never return a nil Items pointer in the result for consistency. listVal.Set(reflect.MakeSlice(listVal.Type(), 0, 0)) + } else { + // Resize the slice appropriately, since we already know that size of result set + listVal.Set(reflect.MakeSlice(listVal.Type(), len(selectedObjects), len(selectedObjects))) + span.AddEvent("Resized result") + for i, o := range selectedObjects { + listVal.Index(i).Set(reflect.ValueOf(o).Elem()) + } } span.AddEvent("Filtered items", attribute.Int("count", listVal.Len())) if c.versioner != nil { @@ -911,9 +922,25 @@ func (c *Cacher) dispatchEvents() { bookmarkTimer.Reset(wait.Jitter(time.Second, 0.25)) // Never send a bookmark event if we did not see an event here, this is fine // because we don't provide any guarantees on sending bookmarks. + // + // Just pop closed watchers and requeue others if needed. + // + // TODO(#115478): rework the following logic + // in a way that would allow more + // efficient cleanup of closed watchers if lastProcessedResourceVersion == 0 { - // pop expired watchers in case there has been no update - c.bookmarkWatchers.popExpiredWatchers() + func() { + c.Lock() + defer c.Unlock() + for _, watchers := range c.bookmarkWatchers.popExpiredWatchersThreadUnsafe() { + for _, watcher := range watchers { + if watcher.stopped { + continue + } + c.bookmarkWatchers.addWatcherThreadUnsafe(watcher) + } + } + }() continue } bookmarkEvent := &watchCacheEvent{ @@ -1035,7 +1062,7 @@ func (c *Cacher) dispatchEvent(event *watchCacheEvent) { func (c *Cacher) startDispatchingBookmarkEventsLocked() { // Pop already expired watchers. However, explicitly ignore stopped ones, // as we don't delete watcher from bookmarkWatchers when it is stopped. - for _, watchers := range c.bookmarkWatchers.popExpiredWatchers() { + for _, watchers := range c.bookmarkWatchers.popExpiredWatchersThreadUnsafe() { for _, watcher := range watchers { // c.Lock() is held here. // watcher.stopThreadUnsafe() is protected by c.Lock() @@ -1140,7 +1167,7 @@ func (c *Cacher) finishDispatching() { continue } // requeue the watcher for the next bookmark if needed. - c.bookmarkWatchers.addWatcher(watcher) + c.bookmarkWatchers.addWatcherThreadUnsafe(watcher) } c.expiredBookmarkWatchers = c.expiredBookmarkWatchers[:0] } @@ -1309,54 +1336,6 @@ func (c *Cacher) waitUntilWatchCacheFreshAndForceAllEvents(ctx context.Context, return false, nil } -// cacherListerWatcher opaques storage.Interface to expose cache.ListerWatcher. -type cacherListerWatcher struct { - storage storage.Interface - resourcePrefix string - newListFunc func() runtime.Object -} - -// NewCacherListerWatcher returns a storage.Interface backed ListerWatcher. -func NewCacherListerWatcher(storage storage.Interface, resourcePrefix string, newListFunc func() runtime.Object) cache.ListerWatcher { - return &cacherListerWatcher{ - storage: storage, - resourcePrefix: resourcePrefix, - newListFunc: newListFunc, - } -} - -// Implements cache.ListerWatcher interface. -func (lw *cacherListerWatcher) List(options metav1.ListOptions) (runtime.Object, error) { - list := lw.newListFunc() - pred := storage.SelectionPredicate{ - Label: labels.Everything(), - Field: fields.Everything(), - Limit: options.Limit, - Continue: options.Continue, - } - - storageOpts := storage.ListOptions{ - ResourceVersionMatch: options.ResourceVersionMatch, - Predicate: pred, - Recursive: true, - } - if err := lw.storage.GetList(context.TODO(), lw.resourcePrefix, storageOpts, list); err != nil { - return nil, err - } - return list, nil -} - -// Implements cache.ListerWatcher interface. -func (lw *cacherListerWatcher) Watch(options metav1.ListOptions) (watch.Interface, error) { - opts := storage.ListOptions{ - ResourceVersion: options.ResourceVersion, - Predicate: storage.Everything, - Recursive: true, - ProgressNotify: true, - } - return lw.storage.Watch(context.TODO(), lw.resourcePrefix, opts) -} - // errWatcher implements watch.Interface to return a single error type errWatcher struct { result chan watch.Event diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/cacher/caching_object.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/cacher/caching_object.go index 258efed84258..e2e2aa5e79d0 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/cacher/caching_object.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/cacher/caching_object.go @@ -148,6 +148,10 @@ func (o *cachingObject) CacheEncode(id runtime.Identifier, encode func(runtime.O if result.err != nil { return result.err } + if b, support := w.(runtime.Splice); support { + b.Splice(result.raw) + return nil + } _, err := w.Write(result.raw) return err } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/cacher/lister_watcher.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/cacher/lister_watcher.go new file mode 100644 index 000000000000..1252e5e34959 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/cacher/lister_watcher.go @@ -0,0 +1,77 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cacher + +import ( + "context" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/apiserver/pkg/storage" + "k8s.io/client-go/tools/cache" +) + +// listerWatcher opaques storage.Interface to expose cache.ListerWatcher. +type listerWatcher struct { + storage storage.Interface + resourcePrefix string + newListFunc func() runtime.Object +} + +// NewListerWatcher returns a storage.Interface backed ListerWatcher. +func NewListerWatcher(storage storage.Interface, resourcePrefix string, newListFunc func() runtime.Object) cache.ListerWatcher { + return &listerWatcher{ + storage: storage, + resourcePrefix: resourcePrefix, + newListFunc: newListFunc, + } +} + +// Implements cache.ListerWatcher interface. +func (lw *listerWatcher) List(options metav1.ListOptions) (runtime.Object, error) { + list := lw.newListFunc() + pred := storage.SelectionPredicate{ + Label: labels.Everything(), + Field: fields.Everything(), + Limit: options.Limit, + Continue: options.Continue, + } + + storageOpts := storage.ListOptions{ + ResourceVersionMatch: options.ResourceVersionMatch, + Predicate: pred, + Recursive: true, + } + if err := lw.storage.GetList(context.TODO(), lw.resourcePrefix, storageOpts, list); err != nil { + return nil, err + } + return list, nil +} + +// Implements cache.ListerWatcher interface. +func (lw *listerWatcher) Watch(options metav1.ListOptions) (watch.Interface, error) { + opts := storage.ListOptions{ + ResourceVersion: options.ResourceVersion, + Predicate: storage.Everything, + Recursive: true, + ProgressNotify: true, + } + return lw.storage.Watch(context.TODO(), lw.resourcePrefix, opts) +} diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_cache.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_cache.go index 4d86018e5208..455a225708ad 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_cache.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_cache.go @@ -608,8 +608,8 @@ func (w *watchCache) Resync() error { } func (w *watchCache) currentCapacity() int { - w.Lock() - defer w.Unlock() + w.RLock() + defer w.RUnlock() return w.capacity } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/etcd3/metrics/metrics.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/etcd3/metrics/metrics.go index 6f155c0adb29..78c9a5983882 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/etcd3/metrics/metrics.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/etcd3/metrics/metrics.go @@ -47,6 +47,22 @@ var ( }, []string{"operation", "type"}, ) + etcdRequestCounts = compbasemetrics.NewCounterVec( + &compbasemetrics.CounterOpts{ + Name: "etcd_requests_total", + Help: "Etcd request counts for each operation and object type.", + StabilityLevel: compbasemetrics.ALPHA, + }, + []string{"operation", "type"}, + ) + etcdRequestErrorCounts = compbasemetrics.NewCounterVec( + &compbasemetrics.CounterOpts{ + Name: "etcd_request_errors_total", + Help: "Etcd failed request counts for each operation and object type.", + StabilityLevel: compbasemetrics.ALPHA, + }, + []string{"operation", "type"}, + ) objectCounts = compbasemetrics.NewGaugeVec( &compbasemetrics.GaugeOpts{ Name: "apiserver_storage_objects", @@ -140,6 +156,8 @@ func Register() { // Register the metrics. registerMetrics.Do(func() { legacyregistry.MustRegister(etcdRequestLatency) + legacyregistry.MustRegister(etcdRequestCounts) + legacyregistry.MustRegister(etcdRequestErrorCounts) legacyregistry.MustRegister(objectCounts) legacyregistry.MustRegister(dbTotalSize) legacyregistry.MustRegister(etcdBookmarkCounts) @@ -157,9 +175,15 @@ func UpdateObjectCount(resourcePrefix string, count int64) { objectCounts.WithLabelValues(resourcePrefix).Set(float64(count)) } -// RecordEtcdRequestLatency sets the etcd_request_duration_seconds metrics. -func RecordEtcdRequestLatency(verb, resource string, startTime time.Time) { - etcdRequestLatency.WithLabelValues(verb, resource).Observe(sinceInSeconds(startTime)) +// RecordEtcdRequest updates and sets the etcd_request_duration_seconds, +// etcd_request_total, etcd_request_errors_total metrics. +func RecordEtcdRequest(verb, resource string, err error, startTime time.Time) { + v := []string{verb, resource} + etcdRequestLatency.WithLabelValues(v...).Observe(sinceInSeconds(startTime)) + etcdRequestCounts.WithLabelValues(v...).Inc() + if err != nil { + etcdRequestErrorCounts.WithLabelValues(v...).Inc() + } } // RecordEtcdEvent updated the etcd_events_received_total metric. @@ -183,7 +207,9 @@ func Reset() { } // sinceInSeconds gets the time since the specified start in seconds. -func sinceInSeconds(start time.Time) float64 { +// +// This is a variable to facilitate testing. +var sinceInSeconds = func(start time.Time) float64 { return time.Since(start).Seconds() } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/etcd3/store.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/etcd3/store.go index 2fc237de3314..aab390544774 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/etcd3/store.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/etcd3/store.go @@ -136,7 +136,7 @@ func (s *store) Get(ctx context.Context, key string, opts storage.GetOptions, ou } startTime := time.Now() getResp, err := s.client.KV.Get(ctx, preparedKey) - metrics.RecordEtcdRequestLatency("get", s.groupResourceString, startTime) + metrics.RecordEtcdRequest("get", s.groupResourceString, err, startTime) if err != nil { return err } @@ -210,7 +210,7 @@ func (s *store) Create(ctx context.Context, key string, obj, out runtime.Object, ).Then( clientv3.OpPut(preparedKey, string(newData), opts...), ).Commit() - metrics.RecordEtcdRequestLatency("create", s.groupResourceString, startTime) + metrics.RecordEtcdRequest("create", s.groupResourceString, err, startTime) if err != nil { span.AddEvent("Txn call failed", attribute.String("err", err.Error())) return err @@ -255,7 +255,7 @@ func (s *store) conditionalDelete( getCurrentState := func() (*objState, error) { startTime := time.Now() getResp, err := s.client.KV.Get(ctx, key) - metrics.RecordEtcdRequestLatency("get", s.groupResourceString, startTime) + metrics.RecordEtcdRequest("get", s.groupResourceString, err, startTime) if err != nil { return nil, err } @@ -337,7 +337,7 @@ func (s *store) conditionalDelete( ).Else( clientv3.OpGet(key), ).Commit() - metrics.RecordEtcdRequestLatency("delete", s.groupResourceString, startTime) + metrics.RecordEtcdRequest("delete", s.groupResourceString, err, startTime) if err != nil { return err } @@ -391,7 +391,7 @@ func (s *store) GuaranteedUpdate( getCurrentState := func() (*objState, error) { startTime := time.Now() getResp, err := s.client.KV.Get(ctx, preparedKey) - metrics.RecordEtcdRequestLatency("get", s.groupResourceString, startTime) + metrics.RecordEtcdRequest("get", s.groupResourceString, err, startTime) if err != nil { return nil, err } @@ -512,7 +512,7 @@ func (s *store) GuaranteedUpdate( ).Else( clientv3.OpGet(preparedKey), ).Commit() - metrics.RecordEtcdRequestLatency("update", s.groupResourceString, startTime) + metrics.RecordEtcdRequest("update", s.groupResourceString, err, startTime) if err != nil { span.AddEvent("Txn call failed", attribute.String("err", err.Error())) return err @@ -575,7 +575,7 @@ func (s *store) Count(key string) (int64, error) { startTime := time.Now() getResp, err := s.client.KV.Get(context.Background(), preparedKey, clientv3.WithRange(clientv3.GetPrefixRangeEnd(preparedKey)), clientv3.WithCountOnly()) - metrics.RecordEtcdRequestLatency("listWithCount", preparedKey, startTime) + metrics.RecordEtcdRequest("listWithCount", preparedKey, err, startTime) if err != nil { return 0, err } @@ -720,14 +720,16 @@ func (s *store) GetList(ctx context.Context, key string, opts storage.ListOption numReturn := v.Len() metrics.RecordStorageListMetrics(s.groupResourceString, numFetched, numEvald, numReturn) }() + + metricsOp := "get" + if recursive { + metricsOp = "list" + } + for { startTime := time.Now() getResp, err = s.client.KV.Get(ctx, preparedKey, options...) - if recursive { - metrics.RecordEtcdRequestLatency("list", s.groupResourceString, startTime) - } else { - metrics.RecordEtcdRequestLatency("get", s.groupResourceString, startTime) - } + metrics.RecordEtcdRequest(metricsOp, s.groupResourceString, err, startTime) if err != nil { return interpretListError(err, len(pred.Continue) > 0, continueKey, keyPrefix) } @@ -863,8 +865,12 @@ func growSlice(v reflect.Value, maxCapacity int, sizes ...int) { } // Watch implements storage.Interface.Watch. +// TODO(#115478): In order to graduate the WatchList feature to beta, the etcd3 implementation must/should also support it. func (s *store) Watch(ctx context.Context, key string, opts storage.ListOptions) (watch.Interface, error) { - if opts.SendInitialEvents != nil { + // it is safe to skip SendInitialEvents if the request is backward compatible + // see https://github.com/kubernetes/kubernetes/blob/267eb25e60955fe8e438c6311412e7cf7d028acb/staging/src/k8s.io/apiserver/pkg/storage/etcd3/watcher.go#L260 + compatibility := opts.Predicate.AllowWatchBookmarks == false && (opts.ResourceVersion == "" || opts.ResourceVersion == "0") + if opts.SendInitialEvents != nil && !compatibility { return nil, apierrors.NewInvalid( schema.GroupKind{Group: s.groupResource.Group, Kind: s.groupResource.Resource}, "", diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/storagebackend/OWNERS b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/storagebackend/OWNERS index c29de755d0b8..7b8dfb623fc9 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/storagebackend/OWNERS +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/storagebackend/OWNERS @@ -1,6 +1,5 @@ # See the OWNERS docs at https://go.k8s.io/owners reviewers: - - lavalamp - smarterclayton - wojtek-t diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/kmsv2/cache.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/kmsv2/cache.go index 3c1fbbf8a362..fa4be57ab147 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/kmsv2/cache.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/kmsv2/cache.go @@ -50,8 +50,10 @@ type simpleCache struct { } func newSimpleCache(clock clock.Clock, ttl time.Duration) *simpleCache { + cache := utilcache.NewExpiringWithClock(clock) + cache.AllowExpiredGet = true // for a given key, the value (the decryptTransformer) is always the same return &simpleCache{ - cache: utilcache.NewExpiringWithClock(clock), + cache: cache, ttl: ttl, hashPool: &sync.Pool{ New: func() interface{} { diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/kmsv2/envelope.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/kmsv2/envelope.go index 43ba22d65e0c..e372e62de7ce 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/kmsv2/envelope.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/kmsv2/envelope.go @@ -265,6 +265,8 @@ func (t *envelopeTransformer) doDecode(originalData []byte) (*kmstypes.Encrypted return o, nil } +// GenerateTransformer generates a new transformer and encrypts the DEK using the envelope service. +// It returns the transformer, the encrypted DEK, cache key and error. func GenerateTransformer(ctx context.Context, uid string, envelopeService kmsservice.Service) (value.Transformer, *kmsservice.EncryptResponse, []byte, error) { transformer, newKey, err := aestransformer.NewGCMTransformerWithUniqueKeyUnsafe() if err != nil { diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/value/metrics.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/value/metrics.go index c8fd2f4c04d0..9b29b1bd6711 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/value/metrics.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/value/metrics.go @@ -17,9 +17,11 @@ limitations under the License. package value import ( + "errors" "sync" "time" + "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "k8s.io/component-base/metrics" @@ -112,7 +114,7 @@ func RegisterMetrics() { // RecordTransformation records latencies and count of TransformFromStorage and TransformToStorage operations. // Note that transformation_failures_total metric is deprecated, use transformation_operations_total instead. func RecordTransformation(transformationType, transformerPrefix string, elapsed time.Duration, err error) { - transformerOperationsTotal.WithLabelValues(transformationType, transformerPrefix, status.Code(err).String()).Inc() + transformerOperationsTotal.WithLabelValues(transformationType, transformerPrefix, getErrorCode(err)).Inc() if err == nil { transformerLatencies.WithLabelValues(transformationType, transformerPrefix).Observe(elapsed.Seconds()) @@ -138,3 +140,23 @@ func RecordDataKeyGeneration(start time.Time, err error) { func sinceInSeconds(start time.Time) float64 { return time.Since(start).Seconds() } + +type gRPCError interface { + GRPCStatus() *status.Status +} + +func getErrorCode(err error) string { + if err == nil { + return codes.OK.String() + } + + // handle errors wrapped with fmt.Errorf and similar + var s gRPCError + if errors.As(err, &s) { + return s.GRPCStatus().Code().String() + } + + // This is not gRPC error. The operation must have failed before gRPC + // method was called, otherwise we would get gRPC error. + return "unknown-non-grpc" +} diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/value/transformer.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/value/transformer.go index a6a4aa184d61..cb48ed50c71c 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/value/transformer.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/value/transformer.go @@ -23,6 +23,7 @@ import ( "fmt" "time" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/errors" ) @@ -50,6 +51,11 @@ type Transformer interface { TransformToStorage(ctx context.Context, data []byte, dataCtx Context) (out []byte, err error) } +// ResourceTransformers returns a transformer for the provided resource. +type ResourceTransformers interface { + TransformerForResource(resource schema.GroupResource) Transformer +} + // DefaultContext is a simple implementation of Context for a slice of bytes. type DefaultContext []byte diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/flowcontrol/OWNERS b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/flowcontrol/OWNERS index 2556c589fd12..fd722b2acf5e 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/flowcontrol/OWNERS +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/flowcontrol/OWNERS @@ -1,15 +1,15 @@ # See the OWNERS docs at https://go.k8s.io/owners approvers: - - lavalamp - deads2k - yue9944882 - MikeSpreitzer reviewers: - - lavalamp - deads2k - yue9944882 - MikeSpreitzer labels: - sig/api-machinery - area/apiserver +emeritus_approvers: + - lavalamp diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/flowcontrol/dropped_requests_tracker.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/flowcontrol/dropped_requests_tracker.go new file mode 100644 index 000000000000..74bf9eece630 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/flowcontrol/dropped_requests_tracker.go @@ -0,0 +1,234 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flowcontrol + +import ( + "sync" + "sync/atomic" + "time" + + "k8s.io/utils/clock" +) + +const ( + // maxRetryAfter represents the maximum possible retryAfter. + maxRetryAfter = int64(32) +) + +// DroppedRequestsTracker is an interface that allows tracking +// a history od dropped requests in the system for the purpose +// of adjusting RetryAfter header to avoid system overload. +type DroppedRequestsTracker interface { + // RecordDroppedRequest records a request that was just + // dropped from processing. + RecordDroppedRequest(plName string) + + // GetRetryAfter returns the current suggested value of + // RetryAfter value. + GetRetryAfter(plName string) int64 +} + +// unixStat keeps a statistic how many requests were dropped within +// a single second. +type unixStat struct { + unixTime int64 + requests int64 +} + +type droppedRequestsStats struct { + lock sync.RWMutex + + // history stores the history of dropped requests. + history []unixStat + + // To reduce lock-contention, we store the information about + // the current second here, which we can then access under + // reader lock. + currentUnix int64 + currentCount atomic.Int64 + + retryAfter atomic.Int64 + retryAfterUpdateUnix int64 +} + +func newDroppedRequestsStats(nowUnix int64) *droppedRequestsStats { + result := &droppedRequestsStats{ + // We assume that we can bump at any time after first dropped request. + retryAfterUpdateUnix: 0, + } + result.retryAfter.Store(1) + return result +} + +func (s *droppedRequestsStats) recordDroppedRequest(unixTime int64) { + // Short path - if the current second matches passed time, + // just update the stats. + if done := func() bool { + s.lock.RLock() + defer s.lock.RUnlock() + if s.currentUnix == unixTime { + s.currentCount.Add(1) + return true + } + return false + }(); done { + return + } + + // We trigger the change of . + s.lock.Lock() + defer s.lock.Unlock() + if s.currentUnix == unixTime { + s.currentCount.Add(1) + return + } + + s.updateHistory(s.currentUnix, s.currentCount.Load()) + s.currentUnix = unixTime + s.currentCount.Store(1) + + // We only consider updating retryAfter when bumping the current second. + // However, given that we didn't report anything for the current second, + // we recompute it based on statistics from the previous one. + s.updateRetryAfterIfNeededLocked(unixTime) +} + +func (s *droppedRequestsStats) updateHistory(unixTime int64, count int64) { + s.history = append(s.history, unixStat{unixTime: unixTime, requests: count}) + + startIndex := 0 + // Entries that exceed 2*retryAfter or maxRetryAfter are never going to be needed. + maxHistory := 2 * s.retryAfter.Load() + if maxHistory > maxRetryAfter { + maxHistory = maxRetryAfter + } + for ; startIndex < len(s.history) && unixTime-s.history[startIndex].unixTime > maxHistory; startIndex++ { + } + if startIndex > 0 { + s.history = s.history[startIndex:] + } +} + +// updateRetryAfterIfNeededLocked updates the retryAfter based on the number of +// dropped requests in the last `retryAfter` seconds: +// - if there were less than `retryAfter` dropped requests, it decreases +// retryAfter +// - if there were at least 3*`retryAfter` dropped requests, it increases +// retryAfter +// +// The rationale behind these numbers being fairly low is that APF is queuing +// requests and rejecting (dropping) them is a last resort, which is not expected +// unless a given priority level is actually overloaded. +// +// Additionally, we rate-limit the increases of retryAfter to wait at least +// `retryAfter' seconds after the previous increase to avoid multiple bumps +// on a single spike. +// +// We're working with the interval [unixTime-retryAfter, unixTime). +func (s *droppedRequestsStats) updateRetryAfterIfNeededLocked(unixTime int64) { + retryAfter := s.retryAfter.Load() + + droppedRequests := int64(0) + for i := len(s.history) - 1; i >= 0; i-- { + if unixTime-s.history[i].unixTime > retryAfter { + break + } + if s.history[i].unixTime < unixTime { + droppedRequests += s.history[i].requests + } + } + + if unixTime-s.retryAfterUpdateUnix >= retryAfter && droppedRequests >= 3*retryAfter { + // We try to mimic the TCP algorithm and thus are doubling + // the retryAfter here. + retryAfter *= 2 + if retryAfter >= maxRetryAfter { + retryAfter = maxRetryAfter + } + s.retryAfter.Store(retryAfter) + s.retryAfterUpdateUnix = unixTime + return + } + + if droppedRequests < retryAfter && retryAfter > 1 { + // We try to mimc the TCP algorithm and thus are linearly + // scaling down the retryAfter here. + retryAfter-- + s.retryAfter.Store(retryAfter) + return + } +} + +// droppedRequestsTracker implement DroppedRequestsTracker interface +// for the purpose of adjusting RetryAfter header for newly dropped +// requests to avoid system overload. +type droppedRequestsTracker struct { + now func() time.Time + + lock sync.RWMutex + plStats map[string]*droppedRequestsStats +} + +// NewDroppedRequestsTracker is creating a new instance of +// DroppedRequestsTracker. +func NewDroppedRequestsTracker() DroppedRequestsTracker { + return newDroppedRequestsTracker(clock.RealClock{}.Now) +} + +func newDroppedRequestsTracker(now func() time.Time) *droppedRequestsTracker { + return &droppedRequestsTracker{ + now: now, + plStats: make(map[string]*droppedRequestsStats), + } +} + +func (t *droppedRequestsTracker) RecordDroppedRequest(plName string) { + unixTime := t.now().Unix() + + stats := func() *droppedRequestsStats { + // The list of priority levels should change very infrequently, + // so in almost all cases, the fast path should be enough. + t.lock.RLock() + if plStats, ok := t.plStats[plName]; ok { + t.lock.RUnlock() + return plStats + } + t.lock.RUnlock() + + // Slow path taking writer lock to update the map. + t.lock.Lock() + defer t.lock.Unlock() + if plStats, ok := t.plStats[plName]; ok { + return plStats + } + stats := newDroppedRequestsStats(unixTime) + t.plStats[plName] = stats + return stats + }() + + stats.recordDroppedRequest(unixTime) +} + +func (t *droppedRequestsTracker) GetRetryAfter(plName string) int64 { + t.lock.RLock() + defer t.lock.RUnlock() + + if plStats, ok := t.plStats[plName]; ok { + return plStats.retryAfter.Load() + } + return 1 +} diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/flowcontrol/request/seat_seconds.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/flowcontrol/request/seat_seconds.go index e3a40174524a..05dab65bdd33 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/flowcontrol/request/seat_seconds.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/flowcontrol/request/seat_seconds.go @@ -38,7 +38,7 @@ const MinSeatSeconds = SeatSeconds(0) // This is intended only to produce small values, increments in work // rather than amount of work done since process start. func SeatsTimesDuration(seats float64, duration time.Duration) SeatSeconds { - return SeatSeconds(math.Round(seats * float64(duration/time.Nanosecond) / (1e9 / ssScale))) + return SeatSeconds(int64(math.Round(seats * float64(duration/time.Nanosecond) / (1e9 / ssScale)))) } // ToFloat converts to a floating-point representation. diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/webhook/authentication.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/webhook/authentication.go index a69506de6902..95e4060bd119 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/webhook/authentication.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/webhook/authentication.go @@ -243,6 +243,7 @@ func restConfigFromKubeconfig(configAuthInfo *clientcmdapi.AuthInfo) (*rest.Conf if len(configAuthInfo.Impersonate) > 0 { config.Impersonate = rest.ImpersonationConfig{ UserName: configAuthInfo.Impersonate, + UID: configAuthInfo.ImpersonateUID, Groups: configAuthInfo.ImpersonateGroups, Extra: configAuthInfo.ImpersonateUserExtra, } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/webhook/webhook.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/webhook/webhook.go index 45143bf6efb0..b03640ae8df1 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/webhook/webhook.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/webhook/webhook.go @@ -62,7 +62,7 @@ type GenericWebhook struct { // Otherwise it returns false for an immediate fail. func DefaultShouldRetry(err error) bool { // these errors indicate a transient error that should be retried. - if utilnet.IsConnectionReset(err) || apierrors.IsInternalError(err) || apierrors.IsTimeout(err) || apierrors.IsTooManyRequests(err) { + if utilnet.IsConnectionReset(err) || utilnet.IsHTTP2ConnectionLost(err) || apierrors.IsInternalError(err) || apierrors.IsTimeout(err) || apierrors.IsTooManyRequests(err) { return true } // if the error sends the Retry-After header, we respect it as an explicit confirmation we should retry. diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicy.go b/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicy.go index 81c84d2d46f0..27ea5d9dde94 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicy.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicy.go @@ -32,8 +32,7 @@ import ( type NetworkPolicyApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *NetworkPolicySpecApplyConfiguration `json:"spec,omitempty"` - Status *NetworkPolicyStatusApplyConfiguration `json:"status,omitempty"` + Spec *NetworkPolicySpecApplyConfiguration `json:"spec,omitempty"` } // NetworkPolicy constructs an declarative configuration of the NetworkPolicy type for use with @@ -248,11 +247,3 @@ func (b *NetworkPolicyApplyConfiguration) WithSpec(value *NetworkPolicySpecApply b.Spec = value return b } - -// WithStatus sets the Status field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Status field is set to the value of the last call. -func (b *NetworkPolicyApplyConfiguration) WithStatus(value *NetworkPolicyStatusApplyConfiguration) *NetworkPolicyApplyConfiguration { - b.Status = value - return b -} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicystatus.go b/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicystatus.go deleted file mode 100644 index 99c89b09b092..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicystatus.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1 "k8s.io/client-go/applyconfigurations/meta/v1" -) - -// NetworkPolicyStatusApplyConfiguration represents an declarative configuration of the NetworkPolicyStatus type for use -// with apply. -type NetworkPolicyStatusApplyConfiguration struct { - Conditions []v1.ConditionApplyConfiguration `json:"conditions,omitempty"` -} - -// NetworkPolicyStatusApplyConfiguration constructs an declarative configuration of the NetworkPolicyStatus type for use with -// apply. -func NetworkPolicyStatus() *NetworkPolicyStatusApplyConfiguration { - return &NetworkPolicyStatusApplyConfiguration{} -} - -// WithConditions adds the given value to the Conditions field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Conditions field. -func (b *NetworkPolicyStatusApplyConfiguration) WithConditions(values ...*v1.ConditionApplyConfiguration) *NetworkPolicyStatusApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithConditions") - } - b.Conditions = append(b.Conditions, *values[i]) - } - return b -} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go b/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go index 361b2f4e8555..fdb715d22bfc 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go @@ -8343,10 +8343,6 @@ var schemaYAML = typed.YAMLObject(`types: type: namedType: io.k8s.api.extensions.v1beta1.NetworkPolicySpec default: {} - - name: status - type: - namedType: io.k8s.api.extensions.v1beta1.NetworkPolicyStatus - default: {} - name: io.k8s.api.extensions.v1beta1.NetworkPolicyEgressRule map: fields: @@ -8426,17 +8422,6 @@ var schemaYAML = typed.YAMLObject(`types: elementType: scalar: string elementRelationship: atomic -- name: io.k8s.api.extensions.v1beta1.NetworkPolicyStatus - map: - fields: - - name: conditions - type: - list: - elementType: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Condition - elementRelationship: associative - keys: - - type - name: io.k8s.api.extensions.v1beta1.ReplicaSet map: fields: @@ -10087,10 +10072,6 @@ var schemaYAML = typed.YAMLObject(`types: type: namedType: io.k8s.api.networking.v1.NetworkPolicySpec default: {} - - name: status - type: - namedType: io.k8s.api.networking.v1.NetworkPolicyStatus - default: {} - name: io.k8s.api.networking.v1.NetworkPolicyEgressRule map: fields: @@ -10170,17 +10151,6 @@ var schemaYAML = typed.YAMLObject(`types: elementType: scalar: string elementRelationship: atomic -- name: io.k8s.api.networking.v1.NetworkPolicyStatus - map: - fields: - - name: conditions - type: - list: - elementType: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Condition - elementRelationship: associative - keys: - - type - name: io.k8s.api.networking.v1.ServiceBackendPort map: fields: diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicy.go b/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicy.go index 101510e45f80..409507310b0d 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicy.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicy.go @@ -32,8 +32,7 @@ import ( type NetworkPolicyApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *NetworkPolicySpecApplyConfiguration `json:"spec,omitempty"` - Status *NetworkPolicyStatusApplyConfiguration `json:"status,omitempty"` + Spec *NetworkPolicySpecApplyConfiguration `json:"spec,omitempty"` } // NetworkPolicy constructs an declarative configuration of the NetworkPolicy type for use with @@ -248,11 +247,3 @@ func (b *NetworkPolicyApplyConfiguration) WithSpec(value *NetworkPolicySpecApply b.Spec = value return b } - -// WithStatus sets the Status field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Status field is set to the value of the last call. -func (b *NetworkPolicyApplyConfiguration) WithStatus(value *NetworkPolicyStatusApplyConfiguration) *NetworkPolicyApplyConfiguration { - b.Status = value - return b -} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicystatus.go b/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicystatus.go deleted file mode 100644 index 032de18eda4e..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicystatus.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1 - -import ( - v1 "k8s.io/client-go/applyconfigurations/meta/v1" -) - -// NetworkPolicyStatusApplyConfiguration represents an declarative configuration of the NetworkPolicyStatus type for use -// with apply. -type NetworkPolicyStatusApplyConfiguration struct { - Conditions []v1.ConditionApplyConfiguration `json:"conditions,omitempty"` -} - -// NetworkPolicyStatusApplyConfiguration constructs an declarative configuration of the NetworkPolicyStatus type for use with -// apply. -func NetworkPolicyStatus() *NetworkPolicyStatusApplyConfiguration { - return &NetworkPolicyStatusApplyConfiguration{} -} - -// WithConditions adds the given value to the Conditions field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Conditions field. -func (b *NetworkPolicyStatusApplyConfiguration) WithConditions(values ...*v1.ConditionApplyConfiguration) *NetworkPolicyStatusApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithConditions") - } - b.Conditions = append(b.Conditions, *values[i]) - } - return b -} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/discovery/cached/memory/memcache.go b/cluster-autoscaler/vendor/k8s.io/client-go/discovery/cached/memory/memcache.go index 9143ce00ab42..3829b3cc09c9 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/discovery/cached/memory/memcache.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/discovery/cached/memory/memcache.go @@ -22,7 +22,7 @@ import ( "sync" "syscall" - openapi_v2 "github.com/google/gnostic/openapiv2" + openapi_v2 "github.com/google/gnostic-models/openapiv2" errorsutil "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/discovery/discovery_client.go b/cluster-autoscaler/vendor/k8s.io/client-go/discovery/discovery_client.go index 641568008b7b..a4f083a1ac3b 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/discovery/discovery_client.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/discovery/discovery_client.go @@ -20,6 +20,7 @@ import ( "context" "encoding/json" "fmt" + "mime" "net/http" "net/url" "sort" @@ -29,7 +30,7 @@ import ( //nolint:staticcheck // SA1019 Keep using module since it's still being maintained and the api of google.golang.org/protobuf/proto differs "github.com/golang/protobuf/proto" - openapi_v2 "github.com/google/gnostic/openapiv2" + openapi_v2 "github.com/google/gnostic-models/openapiv2" apidiscovery "k8s.io/api/apidiscovery/v2beta1" "k8s.io/apimachinery/pkg/api/errors" @@ -58,8 +59,9 @@ const ( defaultBurst = 300 AcceptV1 = runtime.ContentTypeJSON - // Aggregated discovery content-type (currently v2beta1). NOTE: Currently, we are assuming the order - // for "g", "v", and "as" from the server. We can only compare this string if we can make that assumption. + // Aggregated discovery content-type (v2beta1). NOTE: content-type parameters + // MUST be ordered (g, v, as) for server in "Accept" header (BUT we are resilient + // to ordering when comparing returned values in "Content-Type" header). AcceptV2Beta1 = runtime.ContentTypeJSON + ";" + "g=apidiscovery.k8s.io;v=v2beta1;as=APIGroupDiscoveryList" // Prioritize aggregated discovery by placing first in the order of discovery accept types. acceptDiscoveryFormats = AcceptV2Beta1 + "," + AcceptV1 @@ -259,8 +261,16 @@ func (d *DiscoveryClient) downloadLegacy() ( var resourcesByGV map[schema.GroupVersion]*metav1.APIResourceList // Switch on content-type server responded with: aggregated or unaggregated. - switch responseContentType { - case AcceptV1: + switch { + case isV2Beta1ContentType(responseContentType): + var aggregatedDiscovery apidiscovery.APIGroupDiscoveryList + err = json.Unmarshal(body, &aggregatedDiscovery) + if err != nil { + return nil, nil, nil, err + } + apiGroupList, resourcesByGV, failedGVs = SplitGroupsAndResources(aggregatedDiscovery) + default: + // Default is unaggregated discovery v1. var v metav1.APIVersions err = json.Unmarshal(body, &v) if err != nil { @@ -271,15 +281,6 @@ func (d *DiscoveryClient) downloadLegacy() ( apiGroup = apiVersionsToAPIGroup(&v) } apiGroupList.Groups = []metav1.APIGroup{apiGroup} - case AcceptV2Beta1: - var aggregatedDiscovery apidiscovery.APIGroupDiscoveryList - err = json.Unmarshal(body, &aggregatedDiscovery) - if err != nil { - return nil, nil, nil, err - } - apiGroupList, resourcesByGV, failedGVs = SplitGroupsAndResources(aggregatedDiscovery) - default: - return nil, nil, nil, fmt.Errorf("Unknown discovery response content-type: %s", responseContentType) } return apiGroupList, resourcesByGV, failedGVs, nil @@ -313,13 +314,8 @@ func (d *DiscoveryClient) downloadAPIs() ( failedGVs := map[schema.GroupVersion]error{} var resourcesByGV map[schema.GroupVersion]*metav1.APIResourceList // Switch on content-type server responded with: aggregated or unaggregated. - switch responseContentType { - case AcceptV1: - err = json.Unmarshal(body, apiGroupList) - if err != nil { - return nil, nil, nil, err - } - case AcceptV2Beta1: + switch { + case isV2Beta1ContentType(responseContentType): var aggregatedDiscovery apidiscovery.APIGroupDiscoveryList err = json.Unmarshal(body, &aggregatedDiscovery) if err != nil { @@ -327,12 +323,38 @@ func (d *DiscoveryClient) downloadAPIs() ( } apiGroupList, resourcesByGV, failedGVs = SplitGroupsAndResources(aggregatedDiscovery) default: - return nil, nil, nil, fmt.Errorf("Unknown discovery response content-type: %s", responseContentType) + // Default is unaggregated discovery v1. + err = json.Unmarshal(body, apiGroupList) + if err != nil { + return nil, nil, nil, err + } } return apiGroupList, resourcesByGV, failedGVs, nil } +// isV2Beta1ContentType checks of the content-type string is both +// "application/json" and contains the v2beta1 content-type params. +// NOTE: This function is resilient to the ordering of the +// content-type parameters, as well as parameters added by +// intermediaries such as proxies or gateways. Examples: +// +// "application/json; g=apidiscovery.k8s.io;v=v2beta1;as=APIGroupDiscoveryList" = true +// "application/json; as=APIGroupDiscoveryList;v=v2beta1;g=apidiscovery.k8s.io" = true +// "application/json; as=APIGroupDiscoveryList;v=v2beta1;g=apidiscovery.k8s.io;charset=utf-8" = true +// "application/json" = false +// "application/json; charset=UTF-8" = false +func isV2Beta1ContentType(contentType string) bool { + base, params, err := mime.ParseMediaType(contentType) + if err != nil { + return false + } + return runtime.ContentTypeJSON == base && + params["g"] == "apidiscovery.k8s.io" && + params["v"] == "v2beta1" && + params["as"] == "APIGroupDiscoveryList" +} + // ServerGroups returns the supported groups, with information like supported versions and the // preferred version. func (d *DiscoveryClient) ServerGroups() (*metav1.APIGroupList, error) { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/discovery/fake/discovery.go b/cluster-autoscaler/vendor/k8s.io/client-go/discovery/fake/discovery.go index d234db893ddd..f8a78e1ef43e 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/discovery/fake/discovery.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/discovery/fake/discovery.go @@ -20,7 +20,7 @@ import ( "fmt" "net/http" - openapi_v2 "github.com/google/gnostic/openapiv2" + openapi_v2 "github.com/google/gnostic-models/openapiv2" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/factory.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/factory.go index 8e7a7e36de28..7dd0ae6353cc 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/factory.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/factory.go @@ -184,7 +184,7 @@ func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[ref return res } -// InternalInformerFor returns the SharedIndexInformer for obj using an internal +// InformerFor returns the SharedIndexInformer for obj using an internal // client. func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer { f.lock.Lock() @@ -257,7 +257,7 @@ type SharedInformerFactory interface { // ForResource gives generic access to a shared informer of the matching type. ForResource(resource schema.GroupVersionResource) (GenericInformer, error) - // InternalInformerFor returns the SharedIndexInformer for obj using an internal + // InformerFor returns the SharedIndexInformer for obj using an internal // client. InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go index aea9d0e133e5..81be8b2e0468 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go @@ -28,6 +28,7 @@ import ( type AuthenticationV1Interface interface { RESTClient() rest.Interface + SelfSubjectReviewsGetter TokenReviewsGetter } @@ -36,6 +37,10 @@ type AuthenticationV1Client struct { restClient rest.Interface } +func (c *AuthenticationV1Client) SelfSubjectReviews() SelfSubjectReviewInterface { + return newSelfSubjectReviews(c) +} + func (c *AuthenticationV1Client) TokenReviews() TokenReviewInterface { return newTokenReviews(c) } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_authentication_client.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_authentication_client.go index ee06a6cdd6c2..865239ff6459 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_authentication_client.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_authentication_client.go @@ -28,6 +28,10 @@ type FakeAuthenticationV1 struct { *testing.Fake } +func (c *FakeAuthenticationV1) SelfSubjectReviews() v1.SelfSubjectReviewInterface { + return &FakeSelfSubjectReviews{c} +} + func (c *FakeAuthenticationV1) TokenReviews() v1.TokenReviewInterface { return &FakeTokenReviews{c} } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_selfsubjectreview.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_selfsubjectreview.go new file mode 100644 index 000000000000..e683b3eaaa01 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_selfsubjectreview.go @@ -0,0 +1,46 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v1 "k8s.io/api/authentication/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + testing "k8s.io/client-go/testing" +) + +// FakeSelfSubjectReviews implements SelfSubjectReviewInterface +type FakeSelfSubjectReviews struct { + Fake *FakeAuthenticationV1 +} + +var selfsubjectreviewsResource = v1.SchemeGroupVersion.WithResource("selfsubjectreviews") + +var selfsubjectreviewsKind = v1.SchemeGroupVersion.WithKind("SelfSubjectReview") + +// Create takes the representation of a selfSubjectReview and creates it. Returns the server's representation of the selfSubjectReview, and an error, if there is any. +func (c *FakeSelfSubjectReviews) Create(ctx context.Context, selfSubjectReview *v1.SelfSubjectReview, opts metav1.CreateOptions) (result *v1.SelfSubjectReview, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(selfsubjectreviewsResource, selfSubjectReview), &v1.SelfSubjectReview{}) + if obj == nil { + return nil, err + } + return obj.(*v1.SelfSubjectReview), err +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/generated_expansion.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/generated_expansion.go index 0413fb2b6658..35f2c22b4f16 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/generated_expansion.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/generated_expansion.go @@ -18,4 +18,6 @@ limitations under the License. package v1 +type SelfSubjectReviewExpansion interface{} + type TokenReviewExpansion interface{} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/selfsubjectreview.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/selfsubjectreview.go new file mode 100644 index 000000000000..bfb9603d672d --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/selfsubjectreview.go @@ -0,0 +1,64 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + + v1 "k8s.io/api/authentication/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// SelfSubjectReviewsGetter has a method to return a SelfSubjectReviewInterface. +// A group's client should implement this interface. +type SelfSubjectReviewsGetter interface { + SelfSubjectReviews() SelfSubjectReviewInterface +} + +// SelfSubjectReviewInterface has methods to work with SelfSubjectReview resources. +type SelfSubjectReviewInterface interface { + Create(ctx context.Context, selfSubjectReview *v1.SelfSubjectReview, opts metav1.CreateOptions) (*v1.SelfSubjectReview, error) + SelfSubjectReviewExpansion +} + +// selfSubjectReviews implements SelfSubjectReviewInterface +type selfSubjectReviews struct { + client rest.Interface +} + +// newSelfSubjectReviews returns a SelfSubjectReviews +func newSelfSubjectReviews(c *AuthenticationV1Client) *selfSubjectReviews { + return &selfSubjectReviews{ + client: c.RESTClient(), + } +} + +// Create takes the representation of a selfSubjectReview and creates it. Returns the server's representation of the selfSubjectReview, and an error, if there is any. +func (c *selfSubjectReviews) Create(ctx context.Context, selfSubjectReview *v1.SelfSubjectReview, opts metav1.CreateOptions) (result *v1.SelfSubjectReview, err error) { + result = &v1.SelfSubjectReview{} + err = c.client.Post(). + Resource("selfsubjectreviews"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(selfSubjectReview). + Do(ctx). + Into(result) + return +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_networkpolicy.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_networkpolicy.go index 7bf8c6972aa8..a32022140a93 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_networkpolicy.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_networkpolicy.go @@ -104,18 +104,6 @@ func (c *FakeNetworkPolicies) Update(ctx context.Context, networkPolicy *v1beta1 return obj.(*v1beta1.NetworkPolicy), err } -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeNetworkPolicies) UpdateStatus(ctx context.Context, networkPolicy *v1beta1.NetworkPolicy, opts v1.UpdateOptions) (*v1beta1.NetworkPolicy, error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(networkpoliciesResource, "status", c.ns, networkPolicy), &v1beta1.NetworkPolicy{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.NetworkPolicy), err -} - // Delete takes name of the networkPolicy and deletes it. Returns an error if one occurs. func (c *FakeNetworkPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. @@ -164,26 +152,3 @@ func (c *FakeNetworkPolicies) Apply(ctx context.Context, networkPolicy *extensio } return obj.(*v1beta1.NetworkPolicy), err } - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeNetworkPolicies) ApplyStatus(ctx context.Context, networkPolicy *extensionsv1beta1.NetworkPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.NetworkPolicy, err error) { - if networkPolicy == nil { - return nil, fmt.Errorf("networkPolicy provided to Apply must not be nil") - } - data, err := json.Marshal(networkPolicy) - if err != nil { - return nil, err - } - name := networkPolicy.Name - if name == nil { - return nil, fmt.Errorf("networkPolicy.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(networkpoliciesResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1beta1.NetworkPolicy{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.NetworkPolicy), err -} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/networkpolicy.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/networkpolicy.go index f24099b90d37..978b26db0337 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/networkpolicy.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/networkpolicy.go @@ -43,7 +43,6 @@ type NetworkPoliciesGetter interface { type NetworkPolicyInterface interface { Create(ctx context.Context, networkPolicy *v1beta1.NetworkPolicy, opts v1.CreateOptions) (*v1beta1.NetworkPolicy, error) Update(ctx context.Context, networkPolicy *v1beta1.NetworkPolicy, opts v1.UpdateOptions) (*v1beta1.NetworkPolicy, error) - UpdateStatus(ctx context.Context, networkPolicy *v1beta1.NetworkPolicy, opts v1.UpdateOptions) (*v1beta1.NetworkPolicy, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.NetworkPolicy, error) @@ -51,7 +50,6 @@ type NetworkPolicyInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.NetworkPolicy, err error) Apply(ctx context.Context, networkPolicy *extensionsv1beta1.NetworkPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.NetworkPolicy, err error) - ApplyStatus(ctx context.Context, networkPolicy *extensionsv1beta1.NetworkPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.NetworkPolicy, err error) NetworkPolicyExpansion } @@ -141,22 +139,6 @@ func (c *networkPolicies) Update(ctx context.Context, networkPolicy *v1beta1.Net return } -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *networkPolicies) UpdateStatus(ctx context.Context, networkPolicy *v1beta1.NetworkPolicy, opts v1.UpdateOptions) (result *v1beta1.NetworkPolicy, err error) { - result = &v1beta1.NetworkPolicy{} - err = c.client.Put(). - Namespace(c.ns). - Resource("networkpolicies"). - Name(networkPolicy.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(networkPolicy). - Do(ctx). - Into(result) - return -} - // Delete takes name of the networkPolicy and deletes it. Returns an error if one occurs. func (c *networkPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { return c.client.Delete(). @@ -224,33 +206,3 @@ func (c *networkPolicies) Apply(ctx context.Context, networkPolicy *extensionsv1 Into(result) return } - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *networkPolicies) ApplyStatus(ctx context.Context, networkPolicy *extensionsv1beta1.NetworkPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.NetworkPolicy, err error) { - if networkPolicy == nil { - return nil, fmt.Errorf("networkPolicy provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(networkPolicy) - if err != nil { - return nil, err - } - - name := networkPolicy.Name - if name == nil { - return nil, fmt.Errorf("networkPolicy.Name must be provided to Apply") - } - - result = &v1beta1.NetworkPolicy{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("networkpolicies"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networkpolicy.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networkpolicy.go index be7413cb8fe6..dde09774c4a2 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networkpolicy.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networkpolicy.go @@ -104,18 +104,6 @@ func (c *FakeNetworkPolicies) Update(ctx context.Context, networkPolicy *v1.Netw return obj.(*v1.NetworkPolicy), err } -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeNetworkPolicies) UpdateStatus(ctx context.Context, networkPolicy *v1.NetworkPolicy, opts metav1.UpdateOptions) (*v1.NetworkPolicy, error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(networkpoliciesResource, "status", c.ns, networkPolicy), &v1.NetworkPolicy{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.NetworkPolicy), err -} - // Delete takes name of the networkPolicy and deletes it. Returns an error if one occurs. func (c *FakeNetworkPolicies) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { _, err := c.Fake. @@ -164,26 +152,3 @@ func (c *FakeNetworkPolicies) Apply(ctx context.Context, networkPolicy *networki } return obj.(*v1.NetworkPolicy), err } - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeNetworkPolicies) ApplyStatus(ctx context.Context, networkPolicy *networkingv1.NetworkPolicyApplyConfiguration, opts metav1.ApplyOptions) (result *v1.NetworkPolicy, err error) { - if networkPolicy == nil { - return nil, fmt.Errorf("networkPolicy provided to Apply must not be nil") - } - data, err := json.Marshal(networkPolicy) - if err != nil { - return nil, err - } - name := networkPolicy.Name - if name == nil { - return nil, fmt.Errorf("networkPolicy.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(networkpoliciesResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1.NetworkPolicy{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.NetworkPolicy), err -} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go index 97afd6278667..d7454ce14525 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go @@ -43,7 +43,6 @@ type NetworkPoliciesGetter interface { type NetworkPolicyInterface interface { Create(ctx context.Context, networkPolicy *v1.NetworkPolicy, opts metav1.CreateOptions) (*v1.NetworkPolicy, error) Update(ctx context.Context, networkPolicy *v1.NetworkPolicy, opts metav1.UpdateOptions) (*v1.NetworkPolicy, error) - UpdateStatus(ctx context.Context, networkPolicy *v1.NetworkPolicy, opts metav1.UpdateOptions) (*v1.NetworkPolicy, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.NetworkPolicy, error) @@ -51,7 +50,6 @@ type NetworkPolicyInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.NetworkPolicy, err error) Apply(ctx context.Context, networkPolicy *networkingv1.NetworkPolicyApplyConfiguration, opts metav1.ApplyOptions) (result *v1.NetworkPolicy, err error) - ApplyStatus(ctx context.Context, networkPolicy *networkingv1.NetworkPolicyApplyConfiguration, opts metav1.ApplyOptions) (result *v1.NetworkPolicy, err error) NetworkPolicyExpansion } @@ -141,22 +139,6 @@ func (c *networkPolicies) Update(ctx context.Context, networkPolicy *v1.NetworkP return } -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *networkPolicies) UpdateStatus(ctx context.Context, networkPolicy *v1.NetworkPolicy, opts metav1.UpdateOptions) (result *v1.NetworkPolicy, err error) { - result = &v1.NetworkPolicy{} - err = c.client.Put(). - Namespace(c.ns). - Resource("networkpolicies"). - Name(networkPolicy.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(networkPolicy). - Do(ctx). - Into(result) - return -} - // Delete takes name of the networkPolicy and deletes it. Returns an error if one occurs. func (c *networkPolicies) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { return c.client.Delete(). @@ -224,33 +206,3 @@ func (c *networkPolicies) Apply(ctx context.Context, networkPolicy *networkingv1 Into(result) return } - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *networkPolicies) ApplyStatus(ctx context.Context, networkPolicy *networkingv1.NetworkPolicyApplyConfiguration, opts metav1.ApplyOptions) (result *v1.NetworkPolicy, err error) { - if networkPolicy == nil { - return nil, fmt.Errorf("networkPolicy provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(networkPolicy) - if err != nil { - return nil, err - } - - name := networkPolicy.Name - if name == nil { - return nil, fmt.Errorf("networkPolicy.Name must be provided to Apply") - } - - result = &v1.NetworkPolicy{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("networkpolicies"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/openapi/client.go b/cluster-autoscaler/vendor/k8s.io/client-go/openapi/client.go index 7b58762acfd5..6a43057187e9 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/openapi/client.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/openapi/client.go @@ -19,6 +19,7 @@ package openapi import ( "context" "encoding/json" + "strings" "k8s.io/client-go/rest" "k8s.io/kube-openapi/pkg/handler3" @@ -58,7 +59,11 @@ func (c *client) Paths() (map[string]GroupVersion, error) { // Create GroupVersions for each element of the result result := map[string]GroupVersion{} for k, v := range discoMap.Paths { - result[k] = newGroupVersion(c, v) + // If the server returned a URL rooted at /openapi/v3, preserve any additional client-side prefix. + // If the server returned a URL not rooted at /openapi/v3, treat it as an actual server-relative URL. + // See https://github.com/kubernetes/kubernetes/issues/117463 for details + useClientPrefix := strings.HasPrefix(v.ServerRelativeURL, "/openapi/v3") + result[k] = newGroupVersion(c, v, useClientPrefix) } return result, nil } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/openapi/groupversion.go b/cluster-autoscaler/vendor/k8s.io/client-go/openapi/groupversion.go index 32133a29b8a4..601dcbe3ccb2 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/openapi/groupversion.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/openapi/groupversion.go @@ -18,6 +18,7 @@ package openapi import ( "context" + "net/url" "k8s.io/kube-openapi/pkg/handler3" ) @@ -29,18 +30,41 @@ type GroupVersion interface { } type groupversion struct { - client *client - item handler3.OpenAPIV3DiscoveryGroupVersion + client *client + item handler3.OpenAPIV3DiscoveryGroupVersion + useClientPrefix bool } -func newGroupVersion(client *client, item handler3.OpenAPIV3DiscoveryGroupVersion) *groupversion { - return &groupversion{client: client, item: item} +func newGroupVersion(client *client, item handler3.OpenAPIV3DiscoveryGroupVersion, useClientPrefix bool) *groupversion { + return &groupversion{client: client, item: item, useClientPrefix: useClientPrefix} } func (g *groupversion) Schema(contentType string) ([]byte, error) { - return g.client.restClient.Get(). - RequestURI(g.item.ServerRelativeURL). - SetHeader("Accept", contentType). - Do(context.TODO()). - Raw() + if !g.useClientPrefix { + return g.client.restClient.Get(). + RequestURI(g.item.ServerRelativeURL). + SetHeader("Accept", contentType). + Do(context.TODO()). + Raw() + } + + locator, err := url.Parse(g.item.ServerRelativeURL) + if err != nil { + return nil, err + } + + path := g.client.restClient.Get(). + AbsPath(locator.Path). + SetHeader("Accept", contentType) + + // Other than root endpoints(openapiv3/apis), resources have hash query parameter to support etags. + // However, absPath does not support handling query parameters internally, + // so that hash query parameter is added manually + for k, value := range locator.Query() { + for _, v := range value { + path.Param(k, v) + } + } + + return path.Do(context.TODO()).Raw() } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/openapi/typeconverter.go b/cluster-autoscaler/vendor/k8s.io/client-go/openapi/typeconverter.go new file mode 100644 index 000000000000..4b91e66d4516 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/openapi/typeconverter.go @@ -0,0 +1,48 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package openapi + +import ( + "encoding/json" + "fmt" + + "k8s.io/apimachinery/pkg/util/managedfields" + "k8s.io/kube-openapi/pkg/spec3" + "k8s.io/kube-openapi/pkg/validation/spec" +) + +func NewTypeConverter(client Client, preserveUnknownFields bool) (managedfields.TypeConverter, error) { + spec := map[string]*spec.Schema{} + paths, err := client.Paths() + if err != nil { + return nil, fmt.Errorf("failed to list paths: %w", err) + } + for _, gv := range paths { + s, err := gv.Schema("application/json") + if err != nil { + return nil, fmt.Errorf("failed to download schema: %w", err) + } + var openapi spec3.OpenAPI + if err := json.Unmarshal(s, &openapi); err != nil { + return nil, fmt.Errorf("failed to parse schema: %w", err) + } + for k, v := range openapi.Components.Schemas { + spec[k] = v + } + } + return managedfields.NewTypeConverter(spec, preserveUnknownFields) +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go b/cluster-autoscaler/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go index 5331b237a70b..b471f5cc6486 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go @@ -32,12 +32,12 @@ import ( "sync" "time" - "github.com/davecgh/go-spew/spew" "golang.org/x/term" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/util/dump" utilnet "k8s.io/apimachinery/pkg/util/net" "k8s.io/client-go/pkg/apis/clientauthentication" "k8s.io/client-go/pkg/apis/clientauthentication/install" @@ -81,8 +81,6 @@ func newCache() *cache { return &cache{m: make(map[string]*Authenticator)} } -var spewConfig = &spew.ConfigState{DisableMethods: true, Indent: " "} - func cacheKey(conf *api.ExecConfig, cluster *clientauthentication.Cluster) string { key := struct { conf *api.ExecConfig @@ -91,7 +89,7 @@ func cacheKey(conf *api.ExecConfig, cluster *clientauthentication.Cluster) strin conf: conf, cluster: cluster, } - return spewConfig.Sprint(key) + return dump.Pretty(key) } type cache struct { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/OWNERS b/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/OWNERS index 726205b3dff3..921ac2fa02b3 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/OWNERS +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/OWNERS @@ -2,7 +2,6 @@ approvers: - thockin - - lavalamp - smarterclayton - wojtek-t - deads2k @@ -11,7 +10,6 @@ approvers: - ncdc reviewers: - thockin - - lavalamp - smarterclayton - wojtek-t - deads2k @@ -26,3 +24,5 @@ reviewers: - dims - ingvagabund - ncdc +emeritus_approvers: + - lavalamp diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/controller.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/controller.go index f437f286166f..8a1104bde808 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/controller.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/controller.go @@ -18,7 +18,6 @@ package cache import ( "errors" - "os" "sync" "time" @@ -148,9 +147,6 @@ func (c *controller) Run(stopCh <-chan struct{}) { if c.config.WatchErrorHandler != nil { r.watchErrorHandler = c.config.WatchErrorHandler } - if s := os.Getenv("ENABLE_CLIENT_GO_WATCH_LIST_ALPHA"); len(s) > 0 { - r.UseWatchList = true - } c.reflectorMutex.Lock() c.reflector = r diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/object-names.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/object-names.go new file mode 100644 index 000000000000..aa8dbb199373 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/object-names.go @@ -0,0 +1,65 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "k8s.io/apimachinery/pkg/types" +) + +// ObjectName is a reference to an object of some implicit kind +type ObjectName struct { + Namespace string + Name string +} + +// NewObjectName constructs a new one +func NewObjectName(namespace, name string) ObjectName { + return ObjectName{Namespace: namespace, Name: name} +} + +// Parts is the inverse of the constructor +func (objName ObjectName) Parts() (namespace, name string) { + return objName.Namespace, objName.Name +} + +// String returns the standard string encoding, +// which is designed to match the historical behavior of MetaNamespaceKeyFunc. +// Note this behavior is different from the String method of types.NamespacedName. +func (objName ObjectName) String() string { + if len(objName.Namespace) > 0 { + return objName.Namespace + "/" + objName.Name + } + return objName.Name +} + +// ParseObjectName tries to parse the standard encoding +func ParseObjectName(str string) (ObjectName, error) { + var objName ObjectName + var err error + objName.Namespace, objName.Name, err = SplitMetaNamespaceKey(str) + return objName, err +} + +// NamespacedNameAsObjectName rebrands the given NamespacedName as an ObjectName +func NamespacedNameAsObjectName(nn types.NamespacedName) ObjectName { + return NewObjectName(nn.Namespace, nn.Name) +} + +// AsNamespacedName rebrands as a NamespacedName +func (objName ObjectName) AsNamespacedName() types.NamespacedName { + return types.NamespacedName{Namespace: objName.Namespace, Name: objName.Name} +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/reflector.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/reflector.go index 2b335c104c87..45eaff52853f 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/reflector.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/reflector.go @@ -22,6 +22,7 @@ import ( "fmt" "io" "math/rand" + "os" "reflect" "strings" "sync" @@ -69,9 +70,7 @@ type Reflector struct { listerWatcher ListerWatcher // backoff manages backoff of ListWatch backoffManager wait.BackoffManager - // initConnBackoffManager manages backoff the initial connection with the Watch call of ListAndWatch. - initConnBackoffManager wait.BackoffManager - resyncPeriod time.Duration + resyncPeriod time.Duration // clock allows tests to manipulate time clock clock.Clock // paginatedResult defines whether pagination should be forced for list calls. @@ -220,11 +219,10 @@ func NewReflectorWithOptions(lw ListerWatcher, expectedType interface{}, store S // We used to make the call every 1sec (1 QPS), the goal here is to achieve ~98% traffic reduction when // API server is not healthy. With these parameters, backoff will stop at [30,60) sec interval which is // 0.22 QPS. If we don't backoff for 2min, assume API server is healthy and we reset the backoff. - backoffManager: wait.NewExponentialBackoffManager(800*time.Millisecond, 30*time.Second, 2*time.Minute, 2.0, 1.0, reflectorClock), - initConnBackoffManager: wait.NewExponentialBackoffManager(800*time.Millisecond, 30*time.Second, 2*time.Minute, 2.0, 1.0, reflectorClock), - clock: reflectorClock, - watchErrorHandler: WatchErrorHandler(DefaultWatchErrorHandler), - expectedType: reflect.TypeOf(expectedType), + backoffManager: wait.NewExponentialBackoffManager(800*time.Millisecond, 30*time.Second, 2*time.Minute, 2.0, 1.0, reflectorClock), + clock: reflectorClock, + watchErrorHandler: WatchErrorHandler(DefaultWatchErrorHandler), + expectedType: reflect.TypeOf(expectedType), } if r.name == "" { @@ -239,6 +237,10 @@ func NewReflectorWithOptions(lw ListerWatcher, expectedType interface{}, store S r.expectedGVK = getExpectedGVKFromObject(expectedType) } + if s := os.Getenv("ENABLE_CLIENT_GO_WATCH_LIST_ALPHA"); len(s) > 0 { + r.UseWatchList = true + } + return r } @@ -420,7 +422,7 @@ func (r *Reflector) watch(w watch.Interface, stopCh <-chan struct{}, resyncerrc select { case <-stopCh: return nil - case <-r.initConnBackoffManager.Backoff().C(): + case <-r.backoffManager.Backoff().C(): continue } } @@ -446,7 +448,7 @@ func (r *Reflector) watch(w watch.Interface, stopCh <-chan struct{}, resyncerrc select { case <-stopCh: return nil - case <-r.initConnBackoffManager.Backoff().C(): + case <-r.backoffManager.Backoff().C(): continue } case apierrors.IsInternalError(err) && retry.ShouldRetry(): @@ -508,7 +510,7 @@ func (r *Reflector) list(stopCh <-chan struct{}) error { pager.PageSize = 0 } - list, paginatedResult, err = pager.List(context.Background(), options) + list, paginatedResult, err = pager.ListWithAlloc(context.Background(), options) if isExpiredError(err) || isTooLargeResourceVersionError(err) { r.setIsLastSyncResourceVersionUnavailable(true) // Retry immediately if the resource version used to list is unavailable. @@ -517,7 +519,7 @@ func (r *Reflector) list(stopCh <-chan struct{}) error { // resource version it is listing at is expired or the cache may not yet be synced to the provided // resource version. So we need to fallback to resourceVersion="" in all to recover and ensure // the reflector makes forward progress. - list, paginatedResult, err = pager.List(context.Background(), metav1.ListOptions{ResourceVersion: r.relistResourceVersion()}) + list, paginatedResult, err = pager.ListWithAlloc(context.Background(), metav1.ListOptions{ResourceVersion: r.relistResourceVersion()}) } close(listCh) }() @@ -555,7 +557,7 @@ func (r *Reflector) list(stopCh <-chan struct{}) error { } resourceVersion = listMetaInterface.GetResourceVersion() initTrace.Step("Resource version extracted") - items, err := meta.ExtractList(list) + items, err := meta.ExtractListWithAlloc(list) if err != nil { return fmt.Errorf("unable to understand list result %#v (%v)", list, err) } @@ -599,7 +601,7 @@ func (r *Reflector) watchList(stopCh <-chan struct{}) (watch.Interface, error) { isErrorRetriableWithSideEffectsFn := func(err error) bool { if canRetry := isWatchErrorRetriable(err); canRetry { klog.V(2).Infof("%s: watch-list of %v returned %v - backing off", r.name, r.typeDescription, err) - <-r.initConnBackoffManager.Backoff().C() + <-r.backoffManager.Backoff().C() return true } if isExpiredError(err) || isTooLargeResourceVersionError(err) { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/shared_informer.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/shared_informer.go index a889fdbc36bd..be8694ddb620 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/shared_informer.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/shared_informer.go @@ -459,29 +459,30 @@ func (s *sharedIndexInformer) Run(stopCh <-chan struct{}) { klog.Warningf("The sharedIndexInformer has started, run more than once is not allowed") return } - fifo := NewDeltaFIFOWithOptions(DeltaFIFOOptions{ - KnownObjects: s.indexer, - EmitDeltaTypeReplaced: true, - Transformer: s.transform, - }) - - cfg := &Config{ - Queue: fifo, - ListerWatcher: s.listerWatcher, - ObjectType: s.objectType, - ObjectDescription: s.objectDescription, - FullResyncPeriod: s.resyncCheckPeriod, - RetryOnError: false, - ShouldResync: s.processor.shouldResync, - - Process: s.HandleDeltas, - WatchErrorHandler: s.watchErrorHandler, - } func() { s.startedLock.Lock() defer s.startedLock.Unlock() + fifo := NewDeltaFIFOWithOptions(DeltaFIFOOptions{ + KnownObjects: s.indexer, + EmitDeltaTypeReplaced: true, + Transformer: s.transform, + }) + + cfg := &Config{ + Queue: fifo, + ListerWatcher: s.listerWatcher, + ObjectType: s.objectType, + ObjectDescription: s.objectDescription, + FullResyncPeriod: s.resyncCheckPeriod, + RetryOnError: false, + ShouldResync: s.processor.shouldResync, + + Process: s.HandleDeltas, + WatchErrorHandler: s.watchErrorHandler, + } + s.controller = New(cfg) s.controller.(*controller).clock = s.clock s.started = true diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/store.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/store.go index 5308ea748001..5cc3f42ec17e 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/store.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/store.go @@ -21,6 +21,7 @@ import ( "strings" "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // Store is a generic object storage and processing interface. A @@ -99,20 +100,38 @@ type ExplicitKey string // The key uses the format / unless is empty, then // it's just . // -// TODO: replace key-as-string with a key-as-struct so that this -// packing/unpacking won't be necessary. +// Clients that want a structured alternative can use ObjectToName or MetaObjectToName. +// Note: this would not be a client that wants a key for a Store because those are +// necessarily strings. +// +// TODO maybe some day?: change Store to be keyed differently func MetaNamespaceKeyFunc(obj interface{}) (string, error) { if key, ok := obj.(ExplicitKey); ok { return string(key), nil } + objName, err := ObjectToName(obj) + if err != nil { + return "", err + } + return objName.String(), nil +} + +// ObjectToName returns the structured name for the given object, +// if indeed it can be viewed as a metav1.Object. +func ObjectToName(obj interface{}) (ObjectName, error) { meta, err := meta.Accessor(obj) if err != nil { - return "", fmt.Errorf("object has no meta: %v", err) + return ObjectName{}, fmt.Errorf("object has no meta: %v", err) } - if len(meta.GetNamespace()) > 0 { - return meta.GetNamespace() + "/" + meta.GetName(), nil + return MetaObjectToName(meta), nil +} + +// MetaObjectToName returns the structured name for the given object +func MetaObjectToName(obj metav1.Object) ObjectName { + if len(obj.GetNamespace()) > 0 { + return ObjectName{Namespace: obj.GetNamespace(), Name: obj.GetName()} } - return meta.GetName(), nil + return ObjectName{Namespace: "", Name: obj.GetName()} } // SplitMetaNamespaceKey returns the namespace and name that diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/clientcmd/api/types.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/clientcmd/api/types.go index 71fb821b1e8f..ae8b8c703863 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/clientcmd/api/types.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/clientcmd/api/types.go @@ -67,7 +67,7 @@ type Preferences struct { type Cluster struct { // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized. // +k8s:conversion-gen=false - LocationOfOrigin string + LocationOfOrigin string `json:"-"` // Server is the address of the kubernetes cluster (https://hostname:port). Server string `json:"server"` // TLSServerName is used to check server certificate. If TLSServerName is empty, the hostname used to contact the server is used. @@ -107,7 +107,7 @@ type Cluster struct { type AuthInfo struct { // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized. // +k8s:conversion-gen=false - LocationOfOrigin string + LocationOfOrigin string `json:"-"` // ClientCertificate is the path to a client cert file for TLS. // +optional ClientCertificate string `json:"client-certificate,omitempty"` @@ -159,7 +159,7 @@ type AuthInfo struct { type Context struct { // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized. // +k8s:conversion-gen=false - LocationOfOrigin string + LocationOfOrigin string `json:"-"` // Cluster is the name of the cluster for this context Cluster string `json:"cluster"` // AuthInfo is the name of the authInfo for this context @@ -252,7 +252,7 @@ type ExecConfig struct { // recommended as one of the prime benefits of exec plugins is that no secrets need // to be stored directly in the kubeconfig. // +k8s:conversion-gen=false - Config runtime.Object + Config runtime.Object `json:"-"` // InteractiveMode determines this plugin's relationship with standard input. Valid // values are "Never" (this exec plugin never uses standard input), "IfAvailable" (this @@ -264,7 +264,7 @@ type ExecConfig struct { // client.authentication.k8s.io/v1beta1, then this field is optional and defaults // to "IfAvailable" when unset. Otherwise, this field is required. // +optional - InteractiveMode ExecInteractiveMode + InteractiveMode ExecInteractiveMode `json:"interactiveMode,omitempty"` // StdinUnavailable indicates whether the exec authenticator can pass standard // input through to this exec plugin. For example, a higher level entity might be using @@ -272,14 +272,14 @@ type ExecConfig struct { // plugin to use standard input. This is kept here in order to keep all of the exec configuration // together, but it is never serialized. // +k8s:conversion-gen=false - StdinUnavailable bool + StdinUnavailable bool `json:"-"` // StdinUnavailableMessage is an optional message to be displayed when the exec authenticator // cannot successfully run this exec plugin because it needs to use standard input and // StdinUnavailable is true. For example, a process that is already using standard input to // read user instructions might set this to "used by my-program to read user instructions". // +k8s:conversion-gen=false - StdinUnavailableMessage string + StdinUnavailableMessage string `json:"-"` } var _ fmt.Stringer = new(ExecConfig) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/clientcmd/loader.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/clientcmd/loader.go index 44de1d41d836..b75737f1c904 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/clientcmd/loader.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/clientcmd/loader.go @@ -128,6 +128,28 @@ type ClientConfigLoadingRules struct { // WarnIfAllMissing indicates whether the configuration files pointed by KUBECONFIG environment variable are present or not. // In case of missing files, it warns the user about the missing files. WarnIfAllMissing bool + + // Warner is the warning log callback to use in case of missing files. + Warner WarningHandler +} + +// WarningHandler allows to set the logging function to use +type WarningHandler func(error) + +func (handler WarningHandler) Warn(err error) { + if handler == nil { + klog.V(1).Info(err) + } else { + handler(err) + } +} + +type MissingConfigError struct { + Missing []string +} + +func (c MissingConfigError) Error() string { + return fmt.Sprintf("Config not found: %s", strings.Join(c.Missing, ", ")) } // ClientConfigLoadingRules implements the ClientConfigLoader interface. @@ -219,7 +241,7 @@ func (rules *ClientConfigLoadingRules) Load() (*clientcmdapi.Config, error) { } if rules.WarnIfAllMissing && len(missingList) > 0 && len(kubeconfigs) == 0 { - klog.Warningf("Config not found: %s", strings.Join(missingList, ", ")) + rules.Warner.Warn(MissingConfigError{Missing: missingList}) } // first merge all of our maps diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go index 940e716175a0..c1151baf2073 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go @@ -99,6 +99,11 @@ func NewLeaderElector(lec LeaderElectionConfig) (*LeaderElector, error) { if lec.Lock == nil { return nil, fmt.Errorf("Lock must not be nil.") } + id := lec.Lock.Identity() + if id == "" { + return nil, fmt.Errorf("Lock identity is empty") + } + le := LeaderElector{ config: lec, clock: clock.RealClock{}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/configmaplock.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/configmaplock.go deleted file mode 100644 index e811fff03c56..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/configmaplock.go +++ /dev/null @@ -1,126 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package resourcelock - -import ( - "context" - "encoding/json" - "errors" - "fmt" - - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - corev1client "k8s.io/client-go/kubernetes/typed/core/v1" -) - -// TODO: This is almost a exact replica of Endpoints lock. -// going forwards as we self host more and more components -// and use ConfigMaps as the means to pass that configuration -// data we will likely move to deprecate the Endpoints lock. - -type configMapLock struct { - // ConfigMapMeta should contain a Name and a Namespace of a - // ConfigMapMeta object that the LeaderElector will attempt to lead. - ConfigMapMeta metav1.ObjectMeta - Client corev1client.ConfigMapsGetter - LockConfig ResourceLockConfig - cm *v1.ConfigMap -} - -// Get returns the election record from a ConfigMap Annotation -func (cml *configMapLock) Get(ctx context.Context) (*LeaderElectionRecord, []byte, error) { - var record LeaderElectionRecord - cm, err := cml.Client.ConfigMaps(cml.ConfigMapMeta.Namespace).Get(ctx, cml.ConfigMapMeta.Name, metav1.GetOptions{}) - if err != nil { - return nil, nil, err - } - cml.cm = cm - if cml.cm.Annotations == nil { - cml.cm.Annotations = make(map[string]string) - } - recordStr, found := cml.cm.Annotations[LeaderElectionRecordAnnotationKey] - recordBytes := []byte(recordStr) - if found { - if err := json.Unmarshal(recordBytes, &record); err != nil { - return nil, nil, err - } - } - return &record, recordBytes, nil -} - -// Create attempts to create a LeaderElectionRecord annotation -func (cml *configMapLock) Create(ctx context.Context, ler LeaderElectionRecord) error { - recordBytes, err := json.Marshal(ler) - if err != nil { - return err - } - cml.cm, err = cml.Client.ConfigMaps(cml.ConfigMapMeta.Namespace).Create(ctx, &v1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: cml.ConfigMapMeta.Name, - Namespace: cml.ConfigMapMeta.Namespace, - Annotations: map[string]string{ - LeaderElectionRecordAnnotationKey: string(recordBytes), - }, - }, - }, metav1.CreateOptions{}) - return err -} - -// Update will update an existing annotation on a given resource. -func (cml *configMapLock) Update(ctx context.Context, ler LeaderElectionRecord) error { - if cml.cm == nil { - return errors.New("configmap not initialized, call get or create first") - } - recordBytes, err := json.Marshal(ler) - if err != nil { - return err - } - if cml.cm.Annotations == nil { - cml.cm.Annotations = make(map[string]string) - } - cml.cm.Annotations[LeaderElectionRecordAnnotationKey] = string(recordBytes) - cm, err := cml.Client.ConfigMaps(cml.ConfigMapMeta.Namespace).Update(ctx, cml.cm, metav1.UpdateOptions{}) - if err != nil { - return err - } - cml.cm = cm - return nil -} - -// RecordEvent in leader election while adding meta-data -func (cml *configMapLock) RecordEvent(s string) { - if cml.LockConfig.EventRecorder == nil { - return - } - events := fmt.Sprintf("%v %v", cml.LockConfig.Identity, s) - subject := &v1.ConfigMap{ObjectMeta: cml.cm.ObjectMeta} - // Populate the type meta, so we don't have to get it from the schema - subject.Kind = "ConfigMap" - subject.APIVersion = v1.SchemeGroupVersion.String() - cml.LockConfig.EventRecorder.Eventf(subject, v1.EventTypeNormal, "LeaderElection", events) -} - -// Describe is used to convert details on current resource lock -// into a string -func (cml *configMapLock) Describe() string { - return fmt.Sprintf("%v/%v", cml.ConfigMapMeta.Namespace, cml.ConfigMapMeta.Name) -} - -// Identity returns the Identity of the lock -func (cml *configMapLock) Identity() string { - return cml.LockConfig.Identity -} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/endpointslock.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/endpointslock.go deleted file mode 100644 index eb36d2210a3f..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/endpointslock.go +++ /dev/null @@ -1,121 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package resourcelock - -import ( - "context" - "encoding/json" - "errors" - "fmt" - - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - corev1client "k8s.io/client-go/kubernetes/typed/core/v1" -) - -type endpointsLock struct { - // EndpointsMeta should contain a Name and a Namespace of an - // Endpoints object that the LeaderElector will attempt to lead. - EndpointsMeta metav1.ObjectMeta - Client corev1client.EndpointsGetter - LockConfig ResourceLockConfig - e *v1.Endpoints -} - -// Get returns the election record from a Endpoints Annotation -func (el *endpointsLock) Get(ctx context.Context) (*LeaderElectionRecord, []byte, error) { - var record LeaderElectionRecord - ep, err := el.Client.Endpoints(el.EndpointsMeta.Namespace).Get(ctx, el.EndpointsMeta.Name, metav1.GetOptions{}) - if err != nil { - return nil, nil, err - } - el.e = ep - if el.e.Annotations == nil { - el.e.Annotations = make(map[string]string) - } - recordStr, found := el.e.Annotations[LeaderElectionRecordAnnotationKey] - recordBytes := []byte(recordStr) - if found { - if err := json.Unmarshal(recordBytes, &record); err != nil { - return nil, nil, err - } - } - return &record, recordBytes, nil -} - -// Create attempts to create a LeaderElectionRecord annotation -func (el *endpointsLock) Create(ctx context.Context, ler LeaderElectionRecord) error { - recordBytes, err := json.Marshal(ler) - if err != nil { - return err - } - el.e, err = el.Client.Endpoints(el.EndpointsMeta.Namespace).Create(ctx, &v1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{ - Name: el.EndpointsMeta.Name, - Namespace: el.EndpointsMeta.Namespace, - Annotations: map[string]string{ - LeaderElectionRecordAnnotationKey: string(recordBytes), - }, - }, - }, metav1.CreateOptions{}) - return err -} - -// Update will update and existing annotation on a given resource. -func (el *endpointsLock) Update(ctx context.Context, ler LeaderElectionRecord) error { - if el.e == nil { - return errors.New("endpoint not initialized, call get or create first") - } - recordBytes, err := json.Marshal(ler) - if err != nil { - return err - } - if el.e.Annotations == nil { - el.e.Annotations = make(map[string]string) - } - el.e.Annotations[LeaderElectionRecordAnnotationKey] = string(recordBytes) - e, err := el.Client.Endpoints(el.EndpointsMeta.Namespace).Update(ctx, el.e, metav1.UpdateOptions{}) - if err != nil { - return err - } - el.e = e - return nil -} - -// RecordEvent in leader election while adding meta-data -func (el *endpointsLock) RecordEvent(s string) { - if el.LockConfig.EventRecorder == nil { - return - } - events := fmt.Sprintf("%v %v", el.LockConfig.Identity, s) - subject := &v1.Endpoints{ObjectMeta: el.e.ObjectMeta} - // Populate the type meta, so we don't have to get it from the schema - subject.Kind = "Endpoints" - subject.APIVersion = v1.SchemeGroupVersion.String() - el.LockConfig.EventRecorder.Eventf(subject, v1.EventTypeNormal, "LeaderElection", events) -} - -// Describe is used to convert details on current resource lock -// into a string -func (el *endpointsLock) Describe() string { - return fmt.Sprintf("%v/%v", el.EndpointsMeta.Namespace, el.EndpointsMeta.Name) -} - -// Identity returns the Identity of the lock -func (el *endpointsLock) Identity() string { - return el.LockConfig.Identity -} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/interface.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/interface.go index 05b5b202379b..483753d632ca 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/interface.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/interface.go @@ -34,7 +34,7 @@ const ( endpointsResourceLock = "endpoints" configMapsResourceLock = "configmaps" LeasesResourceLock = "leases" - // When using EndpointsLeasesResourceLock, you need to ensure that + // When using endpointsLeasesResourceLock, you need to ensure that // API Priority & Fairness is configured with non-default flow-schema // that will catch the necessary operations on leader-election related // endpoint objects. @@ -67,8 +67,8 @@ const ( // serviceAccount: // name: '*' // namespace: kube-system - EndpointsLeasesResourceLock = "endpointsleases" - // When using ConfigMapsLeasesResourceLock, you need to ensure that + endpointsLeasesResourceLock = "endpointsleases" + // When using configMapsLeasesResourceLock, you need to ensure that // API Priority & Fairness is configured with non-default flow-schema // that will catch the necessary operations on leader-election related // configmap objects. @@ -101,7 +101,7 @@ const ( // serviceAccount: // name: '*' // namespace: kube-system - ConfigMapsLeasesResourceLock = "configmapsleases" + configMapsLeasesResourceLock = "configmapsleases" ) // LeaderElectionRecord is the record that is stored in the leader election annotation. @@ -164,22 +164,6 @@ type Interface interface { // Manufacture will create a lock of a given type according to the input parameters func New(lockType string, ns string, name string, coreClient corev1.CoreV1Interface, coordinationClient coordinationv1.CoordinationV1Interface, rlc ResourceLockConfig) (Interface, error) { - endpointsLock := &endpointsLock{ - EndpointsMeta: metav1.ObjectMeta{ - Namespace: ns, - Name: name, - }, - Client: coreClient, - LockConfig: rlc, - } - configmapLock := &configMapLock{ - ConfigMapMeta: metav1.ObjectMeta{ - Namespace: ns, - Name: name, - }, - Client: coreClient, - LockConfig: rlc, - } leaseLock := &LeaseLock{ LeaseMeta: metav1.ObjectMeta{ Namespace: ns, @@ -190,21 +174,15 @@ func New(lockType string, ns string, name string, coreClient corev1.CoreV1Interf } switch lockType { case endpointsResourceLock: - return nil, fmt.Errorf("endpoints lock is removed, migrate to %s", EndpointsLeasesResourceLock) + return nil, fmt.Errorf("endpoints lock is removed, migrate to %s (using version v0.27.x)", endpointsLeasesResourceLock) case configMapsResourceLock: - return nil, fmt.Errorf("configmaps lock is removed, migrate to %s", ConfigMapsLeasesResourceLock) + return nil, fmt.Errorf("configmaps lock is removed, migrate to %s (using version v0.27.x)", configMapsLeasesResourceLock) case LeasesResourceLock: return leaseLock, nil - case EndpointsLeasesResourceLock: - return &MultiLock{ - Primary: endpointsLock, - Secondary: leaseLock, - }, nil - case ConfigMapsLeasesResourceLock: - return &MultiLock{ - Primary: configmapLock, - Secondary: leaseLock, - }, nil + case endpointsLeasesResourceLock: + return nil, fmt.Errorf("endpointsleases lock is removed, migrate to %s", LeasesResourceLock) + case configMapsLeasesResourceLock: + return nil, fmt.Errorf("configmapsleases lock is removed, migrated to %s", LeasesResourceLock) default: return nil, fmt.Errorf("Invalid lock-type %s", lockType) } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/metrics/metrics.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/metrics/metrics.go index f36430dc3edc..601c07335f14 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/metrics/metrics.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/metrics/metrics.go @@ -64,6 +64,17 @@ type RetryMetric interface { IncrementRetry(ctx context.Context, code string, method string, host string) } +// TransportCacheMetric shows the number of entries in the internal transport cache +type TransportCacheMetric interface { + Observe(value int) +} + +// TransportCreateCallsMetric counts the number of times a transport is created +// partitioned by the result of the cache: hit, miss, uncacheable +type TransportCreateCallsMetric interface { + Increment(result string) +} + var ( // ClientCertExpiry is the expiry time of a client certificate ClientCertExpiry ExpiryMetric = noopExpiry{} @@ -85,6 +96,12 @@ var ( // RequestRetry is the retry metric that tracks the number of // retries sent to the server. RequestRetry RetryMetric = noopRetry{} + // TransportCacheEntries is the metric that tracks the number of entries in the + // internal transport cache. + TransportCacheEntries TransportCacheMetric = noopTransportCache{} + // TransportCreateCalls is the metric that counts the number of times a new transport + // is created + TransportCreateCalls TransportCreateCallsMetric = noopTransportCreateCalls{} ) // RegisterOpts contains all the metrics to register. Metrics may be nil. @@ -98,6 +115,8 @@ type RegisterOpts struct { RequestResult ResultMetric ExecPluginCalls CallsMetric RequestRetry RetryMetric + TransportCacheEntries TransportCacheMetric + TransportCreateCalls TransportCreateCallsMetric } // Register registers metrics for the rest client to use. This can @@ -131,6 +150,12 @@ func Register(opts RegisterOpts) { if opts.RequestRetry != nil { RequestRetry = opts.RequestRetry } + if opts.TransportCacheEntries != nil { + TransportCacheEntries = opts.TransportCacheEntries + } + if opts.TransportCreateCalls != nil { + TransportCreateCalls = opts.TransportCreateCalls + } }) } @@ -161,3 +186,11 @@ func (noopCalls) Increment(int, string) {} type noopRetry struct{} func (noopRetry) IncrementRetry(context.Context, string, string, string) {} + +type noopTransportCache struct{} + +func (noopTransportCache) Observe(int) {} + +type noopTransportCreateCalls struct{} + +func (noopTransportCreateCalls) Increment(string) {} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/pager/pager.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/pager/pager.go index 9ba988f6856c..3c77cc37fa5a 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/pager/pager.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/pager/pager.go @@ -73,7 +73,23 @@ func New(fn ListPageFunc) *ListPager { // List returns a single list object, but attempts to retrieve smaller chunks from the // server to reduce the impact on the server. If the chunk attempt fails, it will load // the full list instead. The Limit field on options, if unset, will default to the page size. +// +// If items in the returned list are retained for different durations, and you want to avoid +// retaining the whole slice returned by p.PageFn as long as any item is referenced, +// use ListWithAlloc instead. func (p *ListPager) List(ctx context.Context, options metav1.ListOptions) (runtime.Object, bool, error) { + return p.list(ctx, options, false) +} + +// ListWithAlloc works like List, but avoids retaining references to the items slice returned by p.PageFn. +// It does this by making a shallow copy of non-pointer items in the slice returned by p.PageFn. +// +// If the items in the returned list are not retained, or are retained for the same duration, use List instead for memory efficiency. +func (p *ListPager) ListWithAlloc(ctx context.Context, options metav1.ListOptions) (runtime.Object, bool, error) { + return p.list(ctx, options, true) +} + +func (p *ListPager) list(ctx context.Context, options metav1.ListOptions, allocNew bool) (runtime.Object, bool, error) { if options.Limit == 0 { options.Limit = p.PageSize } @@ -123,7 +139,11 @@ func (p *ListPager) List(ctx context.Context, options metav1.ListOptions) (runti list.ResourceVersion = m.GetResourceVersion() list.SelfLink = m.GetSelfLink() } - if err := meta.EachListItem(obj, func(obj runtime.Object) error { + eachListItemFunc := meta.EachListItem + if allocNew { + eachListItemFunc = meta.EachListItemWithAlloc + } + if err := eachListItemFunc(obj, func(obj runtime.Object) error { list.Items = append(list.Items, obj) return nil }); err != nil { @@ -156,12 +176,26 @@ func (p *ListPager) List(ctx context.Context, options metav1.ListOptions) (runti // // Items are retrieved in chunks from the server to reduce the impact on the server with up to // ListPager.PageBufferSize chunks buffered concurrently in the background. +// +// If items passed to fn are retained for different durations, and you want to avoid +// retaining the whole slice returned by p.PageFn as long as any item is referenced, +// use EachListItemWithAlloc instead. func (p *ListPager) EachListItem(ctx context.Context, options metav1.ListOptions, fn func(obj runtime.Object) error) error { return p.eachListChunkBuffered(ctx, options, func(obj runtime.Object) error { return meta.EachListItem(obj, fn) }) } +// EachListItemWithAlloc works like EachListItem, but avoids retaining references to the items slice returned by p.PageFn. +// It does this by making a shallow copy of non-pointer items in the slice returned by p.PageFn. +// +// If the items passed to fn are not retained, or are retained for the same duration, use EachListItem instead for memory efficiency. +func (p *ListPager) EachListItemWithAlloc(ctx context.Context, options metav1.ListOptions, fn func(obj runtime.Object) error) error { + return p.eachListChunkBuffered(ctx, options, func(obj runtime.Object) error { + return meta.EachListItemWithAlloc(obj, fn) + }) +} + // eachListChunkBuffered fetches runtimeObject list chunks using this ListPager and invokes fn on // each list chunk. If fn returns an error, processing stops and that error is returned. If fn does // not return an error, any error encountered while retrieving the list from the server is diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/record/event.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/record/event.go index 4899b362dff3..1582e8ee7de7 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/record/event.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/record/event.go @@ -357,6 +357,9 @@ func (recorder *recorderImpl) generateEvent(object runtime.Object, annotations m event := recorder.makeEvent(ref, annotations, eventtype, reason, message) event.Source = recorder.source + event.ReportingInstance = recorder.source.Host + event.ReportingController = recorder.source.Component + // NOTE: events should be a non-blocking operation, but we also need to not // put this in a goroutine, otherwise we'll race to write to a closed channel // when we go to shut down this broadcaster. Just drop events if we get overloaded, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/watch/retrywatcher.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/watch/retrywatcher.go index e4806d2ea12b..d81dc43570dd 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/watch/retrywatcher.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/watch/retrywatcher.go @@ -24,10 +24,9 @@ import ( "net/http" "time" - "github.com/davecgh/go-spew/spew" - apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/dump" "k8s.io/apimachinery/pkg/util/net" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/watch" @@ -191,7 +190,7 @@ func (rw *RetryWatcher) doReceive() (bool, time.Duration) { errObject := apierrors.FromObject(event.Object) statusErr, ok := errObject.(*apierrors.StatusError) if !ok { - klog.Error(spew.Sprintf("Received an error which is not *metav1.Status but %#+v", event.Object)) + klog.Error(fmt.Sprintf("Received an error which is not *metav1.Status but %s", dump.Pretty(event.Object))) // Retry unknown errors return false, 0 } @@ -220,7 +219,7 @@ func (rw *RetryWatcher) doReceive() (bool, time.Duration) { // Log here so we have a record of hitting the unexpected error // and we can whitelist some error codes if we missed any that are expected. - klog.V(5).Info(spew.Sprintf("Retrying after unexpected error: %#+v", event.Object)) + klog.V(5).Info(fmt.Sprintf("Retrying after unexpected error: %s", dump.Pretty(event.Object))) // Retry return false, statusDelay diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/transport/cache.go b/cluster-autoscaler/vendor/k8s.io/client-go/transport/cache.go index edcc6d1d4811..7c7f1b330f81 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/transport/cache.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/transport/cache.go @@ -27,6 +27,7 @@ import ( utilnet "k8s.io/apimachinery/pkg/util/net" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/tools/metrics" ) // TlsTransportCache caches TLS http.RoundTrippers different configurations. The @@ -80,11 +81,16 @@ func (c *tlsTransportCache) get(config *Config) (http.RoundTripper, error) { // Ensure we only create a single transport for the given TLS options c.mu.Lock() defer c.mu.Unlock() + defer metrics.TransportCacheEntries.Observe(len(c.transports)) // See if we already have a custom transport for this config if t, ok := c.transports[key]; ok { + metrics.TransportCreateCalls.Increment("hit") return t, nil } + metrics.TransportCreateCalls.Increment("miss") + } else { + metrics.TransportCreateCalls.Increment("uncacheable") } // Get the TLS options for this client config diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/util/cert/cert.go b/cluster-autoscaler/vendor/k8s.io/client-go/util/cert/cert.go index 4be1dfe49350..37b023ef25d2 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/util/cert/cert.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/util/cert/cert.go @@ -25,6 +25,7 @@ import ( "crypto/x509/pkix" "encoding/pem" "fmt" + "math" "math/big" "net" "os" @@ -57,8 +58,14 @@ type AltNames struct { // NewSelfSignedCACert creates a CA certificate func NewSelfSignedCACert(cfg Config, key crypto.Signer) (*x509.Certificate, error) { now := time.Now() + // returns a uniform random value in [0, max-1), then add 1 to serial to make it a uniform random value in [1, max). + serial, err := cryptorand.Int(cryptorand.Reader, new(big.Int).SetInt64(math.MaxInt64-1)) + if err != nil { + return nil, err + } + serial = new(big.Int).Add(serial, big.NewInt(1)) tmpl := x509.Certificate{ - SerialNumber: new(big.Int).SetInt64(0), + SerialNumber: serial, Subject: pkix.Name{ CommonName: cfg.CommonName, Organization: cfg.Organization, @@ -116,9 +123,14 @@ func GenerateSelfSignedCertKeyWithFixtures(host string, alternateIPs []net.IP, a if err != nil { return nil, nil, err } - + // returns a uniform random value in [0, max-1), then add 1 to serial to make it a uniform random value in [1, max). + serial, err := cryptorand.Int(cryptorand.Reader, new(big.Int).SetInt64(math.MaxInt64-1)) + if err != nil { + return nil, nil, err + } + serial = new(big.Int).Add(serial, big.NewInt(1)) caTemplate := x509.Certificate{ - SerialNumber: big.NewInt(1), + SerialNumber: serial, Subject: pkix.Name{ CommonName: fmt.Sprintf("%s-ca@%d", host, time.Now().Unix()), }, @@ -144,9 +156,14 @@ func GenerateSelfSignedCertKeyWithFixtures(host string, alternateIPs []net.IP, a if err != nil { return nil, nil, err } - + // returns a uniform random value in [0, max-1), then add 1 to serial to make it a uniform random value in [1, max). + serial, err = cryptorand.Int(cryptorand.Reader, new(big.Int).SetInt64(math.MaxInt64-1)) + if err != nil { + return nil, nil, err + } + serial = new(big.Int).Add(serial, big.NewInt(1)) template := x509.Certificate{ - SerialNumber: big.NewInt(2), + SerialNumber: serial, Subject: pkix.Name{ CommonName: fmt.Sprintf("%s@%d", host, time.Now().Unix()), }, diff --git a/cluster-autoscaler/vendor/k8s.io/cloud-provider/api/retry_error.go b/cluster-autoscaler/vendor/k8s.io/cloud-provider/api/retry_error.go new file mode 100644 index 000000000000..ac0e5e6e728f --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/cloud-provider/api/retry_error.go @@ -0,0 +1,46 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package api + +import ( + "time" +) + +// RetryError indicates that a service reconciliation should be retried after a +// fixed duration (as opposed to backing off exponentially). +type RetryError struct { + msg string + retryAfter time.Duration +} + +// NewRetryError returns a RetryError. +func NewRetryError(msg string, retryAfter time.Duration) *RetryError { + return &RetryError{ + msg: msg, + retryAfter: retryAfter, + } +} + +// Error shows the details of the retry reason. +func (re *RetryError) Error() string { + return re.msg +} + +// RetryAfter returns the defined retry-after duration. +func (re *RetryError) RetryAfter() time.Duration { + return re.retryAfter +} diff --git a/cluster-autoscaler/vendor/k8s.io/cloud-provider/cloud.go b/cluster-autoscaler/vendor/k8s.io/cloud-provider/cloud.go index 7e7bf9dfab89..c9a04085f480 100644 --- a/cluster-autoscaler/vendor/k8s.io/cloud-provider/cloud.go +++ b/cluster-autoscaler/vendor/k8s.io/cloud-provider/cloud.go @@ -131,11 +131,11 @@ func GetInstanceProviderID(ctx context.Context, cloud Interface, nodeName types. // irrespective of the ImplementedElsewhere error. Additional finalizers for // LB services must be managed in the alternate implementation. type LoadBalancer interface { - // TODO: Break this up into different interfaces (LB, etc) when we have more than one type of service // GetLoadBalancer returns whether the specified load balancer exists, and // if so, what its status is. // Implementations must treat the *v1.Service parameter as read-only and not modify it. - // Parameter 'clusterName' is the name of the cluster as presented to kube-controller-manager + // Parameter 'clusterName' is the name of the cluster as presented to kube-controller-manager. + // TODO: Break this up into different interfaces (LB, etc) when we have more than one type of service GetLoadBalancer(ctx context.Context, clusterName string, service *v1.Service) (status *v1.LoadBalancerStatus, exists bool, err error) // GetLoadBalancerName returns the name of the load balancer. Implementations must treat the // *v1.Service parameter as read-only and not modify it. @@ -143,7 +143,13 @@ type LoadBalancer interface { // EnsureLoadBalancer creates a new load balancer 'name', or updates the existing one. Returns the status of the balancer // Implementations must treat the *v1.Service and *v1.Node // parameters as read-only and not modify them. - // Parameter 'clusterName' is the name of the cluster as presented to kube-controller-manager + // Parameter 'clusterName' is the name of the cluster as presented to kube-controller-manager. + // + // Implementations may return a (possibly wrapped) api.RetryError to enforce + // backing off at a fixed duration. This can be used for cases like when the + // load balancer is not ready yet (e.g., it is still being provisioned) and + // polling at a fixed rate is preferred over backing off exponentially in + // order to minimize latency. EnsureLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) // UpdateLoadBalancer updates hosts under the specified load balancer. // Implementations must treat the *v1.Service and *v1.Node diff --git a/cluster-autoscaler/vendor/k8s.io/component-base/logs/api/v1/registry.go b/cluster-autoscaler/vendor/k8s.io/component-base/logs/api/v1/registry.go index f8fc1f2cae1a..6dc23ec18264 100644 --- a/cluster-autoscaler/vendor/k8s.io/component-base/logs/api/v1/registry.go +++ b/cluster-autoscaler/vendor/k8s.io/component-base/logs/api/v1/registry.go @@ -20,6 +20,7 @@ import ( "fmt" "sort" "strings" + "sync" "github.com/go-logr/logr" @@ -30,6 +31,7 @@ var logRegistry = newLogFormatRegistry() // logFormatRegistry stores factories for all supported logging formats. type logFormatRegistry struct { + mutex sync.Mutex registry map[string]logFormat frozen bool } @@ -83,6 +85,8 @@ func newLogFormatRegistry() *logFormatRegistry { // register adds a new log format. It's an error to modify an existing one. func (lfr *logFormatRegistry) register(name string, format logFormat) error { + lfr.mutex.Lock() + defer lfr.mutex.Unlock() if lfr.frozen { return fmt.Errorf("log format registry is frozen, unable to register log format %s", name) } @@ -98,6 +102,8 @@ func (lfr *logFormatRegistry) register(name string, format logFormat) error { // get specified log format factory func (lfr *logFormatRegistry) get(name string) (*logFormat, error) { + lfr.mutex.Lock() + defer lfr.mutex.Unlock() format, ok := lfr.registry[name] if !ok { return nil, fmt.Errorf("log format: %s does not exists", name) @@ -107,6 +113,8 @@ func (lfr *logFormatRegistry) get(name string) (*logFormat, error) { // list names of registered log formats, including feature gates (sorted) func (lfr *logFormatRegistry) list() string { + lfr.mutex.Lock() + defer lfr.mutex.Unlock() formats := make([]string, 0, len(lfr.registry)) for name, format := range lfr.registry { item := fmt.Sprintf(`"%s"`, name) @@ -121,5 +129,7 @@ func (lfr *logFormatRegistry) list() string { // freeze prevents further modifications of the registered log formats. func (lfr *logFormatRegistry) freeze() { + lfr.mutex.Lock() + defer lfr.mutex.Unlock() lfr.frozen = true } diff --git a/cluster-autoscaler/vendor/k8s.io/component-base/metrics/prometheus/restclient/metrics.go b/cluster-autoscaler/vendor/k8s.io/component-base/metrics/prometheus/restclient/metrics.go index aa7cabea2b3e..9a527d85e114 100644 --- a/cluster-autoscaler/vendor/k8s.io/component-base/metrics/prometheus/restclient/metrics.go +++ b/cluster-autoscaler/vendor/k8s.io/component-base/metrics/prometheus/restclient/metrics.go @@ -152,6 +152,24 @@ var ( }, []string{"code", "call_status"}, ) + + transportCacheEntries = k8smetrics.NewGauge( + &k8smetrics.GaugeOpts{ + Name: "rest_client_transport_cache_entries", + StabilityLevel: k8smetrics.ALPHA, + Help: "Number of transport entries in the internal cache.", + }, + ) + + transportCacheCalls = k8smetrics.NewCounterVec( + &k8smetrics.CounterOpts{ + Name: "rest_client_transport_create_calls_total", + StabilityLevel: k8smetrics.ALPHA, + Help: "Number of calls to get a new transport, partitioned by the result of the operation " + + "hit: obtained from the cache, miss: created and added to the cache, uncacheable: created and not cached", + }, + []string{"result"}, + ) ) func init() { @@ -164,6 +182,9 @@ func init() { legacyregistry.MustRegister(requestRetry) legacyregistry.RawMustRegister(execPluginCertTTL) legacyregistry.MustRegister(execPluginCertRotation) + legacyregistry.MustRegister(execPluginCalls) + legacyregistry.MustRegister(transportCacheEntries) + legacyregistry.MustRegister(transportCacheCalls) metrics.Register(metrics.RegisterOpts{ ClientCertExpiry: execPluginCertTTLAdapter, ClientCertRotationAge: &rotationAdapter{m: execPluginCertRotation}, @@ -174,6 +195,8 @@ func init() { RequestResult: &resultAdapter{requestResult}, RequestRetry: &retryAdapter{requestRetry}, ExecPluginCalls: &callsAdapter{m: execPluginCalls}, + TransportCacheEntries: &transportCacheAdapter{m: transportCacheEntries}, + TransportCreateCalls: &transportCacheCallsAdapter{m: transportCacheCalls}, }) } @@ -232,3 +255,19 @@ type retryAdapter struct { func (r *retryAdapter) IncrementRetry(ctx context.Context, code, method, host string) { r.m.WithContext(ctx).WithLabelValues(code, method, host).Inc() } + +type transportCacheAdapter struct { + m *k8smetrics.Gauge +} + +func (t *transportCacheAdapter) Observe(value int) { + t.m.Set(float64(value)) +} + +type transportCacheCallsAdapter struct { + m *k8smetrics.CounterVec +} + +func (t *transportCacheCallsAdapter) Increment(result string) { + t.m.WithLabelValues(result).Inc() +} diff --git a/cluster-autoscaler/vendor/k8s.io/component-base/metrics/testutil/testutil.go b/cluster-autoscaler/vendor/k8s.io/component-base/metrics/testutil/testutil.go index 8587c752242a..26d2d5fd7154 100644 --- a/cluster-autoscaler/vendor/k8s.io/component-base/metrics/testutil/testutil.go +++ b/cluster-autoscaler/vendor/k8s.io/component-base/metrics/testutil/testutil.go @@ -19,11 +19,13 @@ package testutil import ( "fmt" "io" + "testing" "github.com/prometheus/client_golang/prometheus/testutil" apimachineryversion "k8s.io/apimachinery/pkg/version" "k8s.io/component-base/metrics" + "k8s.io/component-base/metrics/legacyregistry" ) // CollectAndCompare registers the provided Collector with a newly created @@ -91,3 +93,62 @@ func NewFakeKubeRegistry(ver string) metrics.KubeRegistry { return metrics.NewKubeRegistry() } + +func AssertVectorCount(t *testing.T, name string, labelFilter map[string]string, wantCount int) { + metrics, err := legacyregistry.DefaultGatherer.Gather() + if err != nil { + t.Fatalf("Failed to gather metrics: %s", err) + } + + counterSum := 0 + for _, mf := range metrics { + if mf.GetName() != name { + continue // Ignore other metrics. + } + for _, metric := range mf.GetMetric() { + if !LabelsMatch(metric, labelFilter) { + continue + } + counterSum += int(metric.GetCounter().GetValue()) + } + } + if wantCount != counterSum { + t.Errorf("Wanted count %d, got %d for metric %s with labels %#+v", wantCount, counterSum, name, labelFilter) + for _, mf := range metrics { + if mf.GetName() == name { + for _, metric := range mf.GetMetric() { + t.Logf("\tnear match: %s", metric.String()) + } + } + } + } +} + +func AssertHistogramTotalCount(t *testing.T, name string, labelFilter map[string]string, wantCount int) { + metrics, err := legacyregistry.DefaultGatherer.Gather() + if err != nil { + t.Fatalf("Failed to gather metrics: %s", err) + } + counterSum := 0 + for _, mf := range metrics { + if mf.GetName() != name { + continue // Ignore other metrics. + } + for _, metric := range mf.GetMetric() { + if !LabelsMatch(metric, labelFilter) { + continue + } + counterSum += int(metric.GetHistogram().GetSampleCount()) + } + } + if wantCount != counterSum { + t.Errorf("Wanted count %d, got %d for metric %s with labels %#+v", wantCount, counterSum, name, labelFilter) + for _, mf := range metrics { + if mf.GetName() == name { + for _, metric := range mf.GetMetric() { + t.Logf("\tnear match: %s\n", metric.String()) + } + } + } + } +} diff --git a/cluster-autoscaler/vendor/k8s.io/component-base/version/dynamic.go b/cluster-autoscaler/vendor/k8s.io/component-base/version/dynamic.go new file mode 100644 index 000000000000..46ade9f5ec13 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/component-base/version/dynamic.go @@ -0,0 +1,77 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package version + +import ( + "fmt" + "sync/atomic" + + utilversion "k8s.io/apimachinery/pkg/util/version" +) + +var dynamicGitVersion atomic.Value + +func init() { + // initialize to static gitVersion + dynamicGitVersion.Store(gitVersion) +} + +// SetDynamicVersion overrides the version returned as the GitVersion from Get(). +// The specified version must be non-empty, a valid semantic version, and must +// match the major/minor/patch version of the default gitVersion. +func SetDynamicVersion(dynamicVersion string) error { + if err := ValidateDynamicVersion(dynamicVersion); err != nil { + return err + } + dynamicGitVersion.Store(dynamicVersion) + return nil +} + +// ValidateDynamicVersion ensures the given version is non-empty, a valid semantic version, +// and matched the major/minor/patch version of the default gitVersion. +func ValidateDynamicVersion(dynamicVersion string) error { + return validateDynamicVersion(dynamicVersion, gitVersion) +} + +func validateDynamicVersion(dynamicVersion, defaultVersion string) error { + if len(dynamicVersion) == 0 { + return fmt.Errorf("version must not be empty") + } + if dynamicVersion == defaultVersion { + // allow no-op + return nil + } + vRuntime, err := utilversion.ParseSemantic(dynamicVersion) + if err != nil { + return err + } + // must match major/minor/patch of default version + var vDefault *utilversion.Version + if defaultVersion == "v0.0.0-master+$Format:%H$" { + // special-case the placeholder value which doesn't parse as a semantic version + vDefault, err = utilversion.ParseSemantic("v0.0.0-master") + } else { + vDefault, err = utilversion.ParseSemantic(defaultVersion) + } + if err != nil { + return err + } + if vRuntime.Major() != vDefault.Major() || vRuntime.Minor() != vDefault.Minor() || vRuntime.Patch() != vDefault.Patch() { + return fmt.Errorf("version %q must match major/minor/patch of default version %q", dynamicVersion, defaultVersion) + } + return nil +} diff --git a/cluster-autoscaler/vendor/k8s.io/component-base/version/verflag/verflag.go b/cluster-autoscaler/vendor/k8s.io/component-base/version/verflag/verflag.go index 106e3450ff0a..46edab46f331 100644 --- a/cluster-autoscaler/vendor/k8s.io/component-base/version/verflag/verflag.go +++ b/cluster-autoscaler/vendor/k8s.io/component-base/version/verflag/verflag.go @@ -20,20 +20,22 @@ package verflag import ( "fmt" + "io" "os" "strconv" + "strings" flag "github.com/spf13/pflag" "k8s.io/component-base/version" ) -type versionValue int +type versionValue string const ( - VersionFalse versionValue = 0 - VersionTrue versionValue = 1 - VersionRaw versionValue = 2 + VersionFalse versionValue = "false" + VersionTrue versionValue = "true" + VersionRaw versionValue = "raw" ) const strRawVersion string = "raw" @@ -51,20 +53,28 @@ func (v *versionValue) Set(s string) error { *v = VersionRaw return nil } + + if strings.HasPrefix(s, "v") { + err := version.SetDynamicVersion(s) + if err == nil { + *v = versionValue(s) + } + return err + } + boolVal, err := strconv.ParseBool(s) - if boolVal { - *v = VersionTrue - } else { - *v = VersionFalse + if err == nil { + if boolVal { + *v = VersionTrue + } else { + *v = VersionFalse + } } return err } func (v *versionValue) String() string { - if *v == VersionRaw { - return strRawVersion - } - return fmt.Sprintf("%v", bool(*v == VersionTrue)) + return string(*v) } // The type of the flag as required by the pflag.Value interface @@ -88,7 +98,7 @@ func Version(name string, value versionValue, usage string) *versionValue { const versionFlagName = "version" var ( - versionFlag = Version(versionFlagName, VersionFalse, "Print version information and quit") + versionFlag = Version(versionFlagName, VersionFalse, "--version, --version=raw prints version information and quits; --version=vX.Y.Z... sets the reported version") programName = "Kubernetes" ) @@ -98,14 +108,20 @@ func AddFlags(fs *flag.FlagSet) { fs.AddFlag(flag.Lookup(versionFlagName)) } -// PrintAndExitIfRequested will check if the -version flag was passed +// variables for unit testing PrintAndExitIfRequested +var ( + output = io.Writer(os.Stdout) + exit = os.Exit +) + +// PrintAndExitIfRequested will check if --version or --version=raw was passed // and, if so, print the version and exit. func PrintAndExitIfRequested() { if *versionFlag == VersionRaw { - fmt.Printf("%#v\n", version.Get()) - os.Exit(0) + fmt.Fprintf(output, "%#v\n", version.Get()) + exit(0) } else if *versionFlag == VersionTrue { - fmt.Printf("%s %s\n", programName, version.Get()) - os.Exit(0) + fmt.Fprintf(output, "%s %s\n", programName, version.Get()) + exit(0) } } diff --git a/cluster-autoscaler/vendor/k8s.io/component-base/version/version.go b/cluster-autoscaler/vendor/k8s.io/component-base/version/version.go index d1e76dc00e05..1d268d4c6805 100644 --- a/cluster-autoscaler/vendor/k8s.io/component-base/version/version.go +++ b/cluster-autoscaler/vendor/k8s.io/component-base/version/version.go @@ -31,7 +31,7 @@ func Get() apimachineryversion.Info { return apimachineryversion.Info{ Major: gitMajor, Minor: gitMinor, - GitVersion: gitVersion, + GitVersion: dynamicGitVersion.Load().(string), GitCommit: gitCommit, GitTreeState: gitTreeState, BuildDate: buildDate, diff --git a/cluster-autoscaler/vendor/k8s.io/component-helpers/scheduling/corev1/nodeaffinity/nodeaffinity.go b/cluster-autoscaler/vendor/k8s.io/component-helpers/scheduling/corev1/nodeaffinity/nodeaffinity.go index 27caf69b920a..0e3b991636a8 100644 --- a/cluster-autoscaler/vendor/k8s.io/component-helpers/scheduling/corev1/nodeaffinity/nodeaffinity.go +++ b/cluster-autoscaler/vendor/k8s.io/component-helpers/scheduling/corev1/nodeaffinity/nodeaffinity.go @@ -200,6 +200,15 @@ func (t *nodeSelectorTerm) match(nodeLabels labels.Set, nodeFields fields.Set) ( return true, nil } +var validSelectorOperators = []string{ + string(v1.NodeSelectorOpIn), + string(v1.NodeSelectorOpNotIn), + string(v1.NodeSelectorOpExists), + string(v1.NodeSelectorOpDoesNotExist), + string(v1.NodeSelectorOpGt), + string(v1.NodeSelectorOpLt), +} + // nodeSelectorRequirementsAsSelector converts the []NodeSelectorRequirement api type into a struct that implements // labels.Selector. func nodeSelectorRequirementsAsSelector(nsm []v1.NodeSelectorRequirement, path *field.Path) (labels.Selector, []error) { @@ -225,7 +234,7 @@ func nodeSelectorRequirementsAsSelector(nsm []v1.NodeSelectorRequirement, path * case v1.NodeSelectorOpLt: op = selection.LessThan default: - errs = append(errs, field.NotSupported(p.Child("operator"), expr.Operator, nil)) + errs = append(errs, field.NotSupported(p.Child("operator"), expr.Operator, validSelectorOperators)) continue } r, err := labels.NewRequirement(expr.Key, op, expr.Values, field.WithPath(p)) diff --git a/cluster-autoscaler/vendor/k8s.io/cri-api/pkg/apis/runtime/v1/api.pb.go b/cluster-autoscaler/vendor/k8s.io/cri-api/pkg/apis/runtime/v1/api.pb.go index 56bc7dbae7f6..95cdd5cfa3f7 100644 --- a/cluster-autoscaler/vendor/k8s.io/cri-api/pkg/apis/runtime/v1/api.pb.go +++ b/cluster-autoscaler/vendor/k8s.io/cri-api/pkg/apis/runtime/v1/api.pb.go @@ -77,7 +77,7 @@ func (Protocol) EnumDescriptor() ([]byte, []int) { type MountPropagation int32 const ( - // No mount propagation ("private" in Linux terminology). + // No mount propagation ("rprivate" in Linux terminology). MountPropagation_PROPAGATION_PRIVATE MountPropagation = 0 // Mounts get propagated from the host to the container ("rslave" in Linux). MountPropagation_PROPAGATION_HOST_TO_CONTAINER MountPropagation = 1 diff --git a/cluster-autoscaler/vendor/k8s.io/cri-api/pkg/apis/runtime/v1/api.proto b/cluster-autoscaler/vendor/k8s.io/cri-api/pkg/apis/runtime/v1/api.proto index 3e20c42c6097..11d586363697 100644 --- a/cluster-autoscaler/vendor/k8s.io/cri-api/pkg/apis/runtime/v1/api.proto +++ b/cluster-autoscaler/vendor/k8s.io/cri-api/pkg/apis/runtime/v1/api.proto @@ -199,7 +199,7 @@ message PortMapping { } enum MountPropagation { - // No mount propagation ("private" in Linux terminology). + // No mount propagation ("rprivate" in Linux terminology). PROPAGATION_PRIVATE = 0; // Mounts get propagated from the host to the container ("rslave" in Linux). PROPAGATION_HOST_TO_CONTAINER = 1; diff --git a/cluster-autoscaler/vendor/k8s.io/cri-api/pkg/errors/errors.go b/cluster-autoscaler/vendor/k8s.io/cri-api/pkg/errors/errors.go index 41d7b92466d8..a4538669122f 100644 --- a/cluster-autoscaler/vendor/k8s.io/cri-api/pkg/errors/errors.go +++ b/cluster-autoscaler/vendor/k8s.io/cri-api/pkg/errors/errors.go @@ -17,10 +17,20 @@ limitations under the License. package errors import ( + "errors" + "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) +var ( + // ErrRegistryUnavailable - Get http error on the PullImage RPC call. + ErrRegistryUnavailable = errors.New("RegistryUnavailable") + + // ErrSignatureValidationFailed - Unable to validate the image signature on the PullImage RPC call. + ErrSignatureValidationFailed = errors.New("SignatureValidationFailed") +) + // IsNotFound returns a boolean indicating whether the error // is grpc not found error. // See https://github.com/grpc/grpc/blob/master/doc/statuscodes.md diff --git a/cluster-autoscaler/vendor/k8s.io/klog/v2/format.go b/cluster-autoscaler/vendor/k8s.io/klog/v2/format.go new file mode 100644 index 000000000000..63995ca6dbf4 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/klog/v2/format.go @@ -0,0 +1,65 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package klog + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/go-logr/logr" +) + +// Format wraps a value of an arbitrary type and implement fmt.Stringer and +// logr.Marshaler for them. Stringer returns pretty-printed JSON. MarshalLog +// returns the original value with a type that has no special methods, in +// particular no MarshalLog or MarshalJSON. +// +// Wrapping values like that is useful when the value has a broken +// implementation of these special functions (for example, a type which +// inherits String from TypeMeta, but then doesn't re-implement String) or the +// implementation produces output that is less readable or unstructured (for +// example, the generated String functions for Kubernetes API types). +func Format(obj interface{}) interface{} { + return formatAny{Object: obj} +} + +type formatAny struct { + Object interface{} +} + +func (f formatAny) String() string { + var buffer strings.Builder + encoder := json.NewEncoder(&buffer) + encoder.SetIndent("", " ") + if err := encoder.Encode(&f.Object); err != nil { + return fmt.Sprintf("error marshaling %T to JSON: %v", f, err) + } + return buffer.String() +} + +func (f formatAny) MarshalLog() interface{} { + // Returning a pointer to a pointer ensures that zapr doesn't find a + // fmt.Stringer or logr.Marshaler when it checks the type of the + // value. It then falls back to reflection, which dumps the value being + // pointed to (JSON doesn't have pointers). + ptr := &f.Object + return &ptr +} + +var _ fmt.Stringer = formatAny{} +var _ logr.Marshaler = formatAny{} diff --git a/cluster-autoscaler/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go b/cluster-autoscaler/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go index 1dc81a15fa62..bcdf5f8ee12d 100644 --- a/cluster-autoscaler/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go +++ b/cluster-autoscaler/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go @@ -18,6 +18,7 @@ package serialize import ( "bytes" + "encoding/json" "fmt" "strconv" @@ -196,11 +197,11 @@ func (f Formatter) KVFormat(b *bytes.Buffer, k, v interface{}) { case textWriter: writeTextWriterValue(b, v) case fmt.Stringer: - writeStringValue(b, true, StringerToString(v)) + writeStringValue(b, StringerToString(v)) case string: - writeStringValue(b, true, v) + writeStringValue(b, v) case error: - writeStringValue(b, true, ErrorToString(v)) + writeStringValue(b, ErrorToString(v)) case logr.Marshaler: value := MarshalerToValue(v) // A marshaler that returns a string is useful for @@ -215,9 +216,9 @@ func (f Formatter) KVFormat(b *bytes.Buffer, k, v interface{}) { // value directly. switch value := value.(type) { case string: - writeStringValue(b, true, value) + writeStringValue(b, value) default: - writeStringValue(b, false, f.AnyToString(value)) + f.formatAny(b, value) } case []byte: // In https://github.com/kubernetes/klog/pull/237 it was decided @@ -234,7 +235,7 @@ func (f Formatter) KVFormat(b *bytes.Buffer, k, v interface{}) { b.WriteByte('=') b.WriteString(fmt.Sprintf("%+q", v)) default: - writeStringValue(b, false, f.AnyToString(v)) + f.formatAny(b, v) } } @@ -242,12 +243,25 @@ func KVFormat(b *bytes.Buffer, k, v interface{}) { Formatter{}.KVFormat(b, k, v) } -// AnyToString is the historic fallback formatter. -func (f Formatter) AnyToString(v interface{}) string { +// formatAny is the fallback formatter for a value. It supports a hook (for +// example, for YAML encoding) and itself uses JSON encoding. +func (f Formatter) formatAny(b *bytes.Buffer, v interface{}) { + b.WriteRune('=') if f.AnyToStringHook != nil { - return f.AnyToStringHook(v) + b.WriteString(f.AnyToStringHook(v)) + return + } + encoder := json.NewEncoder(b) + l := b.Len() + if err := encoder.Encode(v); err != nil { + // This shouldn't happen. We discard whatever the encoder + // wrote and instead dump an error string. + b.Truncate(l) + b.WriteString(fmt.Sprintf(`""`, err)) + return } - return fmt.Sprintf("%+v", v) + // Remove trailing newline. + b.Truncate(b.Len() - 1) } // StringerToString converts a Stringer to a string, @@ -287,7 +301,7 @@ func ErrorToString(err error) (ret string) { } func writeTextWriterValue(b *bytes.Buffer, v textWriter) { - b.WriteRune('=') + b.WriteByte('=') defer func() { if err := recover(); err != nil { fmt.Fprintf(b, `""`, err) @@ -296,18 +310,13 @@ func writeTextWriterValue(b *bytes.Buffer, v textWriter) { v.WriteText(b) } -func writeStringValue(b *bytes.Buffer, quote bool, v string) { +func writeStringValue(b *bytes.Buffer, v string) { data := []byte(v) index := bytes.IndexByte(data, '\n') if index == -1 { b.WriteByte('=') - if quote { - // Simple string, quote quotation marks and non-printable characters. - b.WriteString(strconv.Quote(v)) - return - } - // Non-string with no line breaks. - b.WriteString(v) + // Simple string, quote quotation marks and non-printable characters. + b.WriteString(strconv.Quote(v)) return } diff --git a/cluster-autoscaler/vendor/k8s.io/klog/v2/k8s_references.go b/cluster-autoscaler/vendor/k8s.io/klog/v2/k8s_references.go index ecd3f8b69033..786af74bfd38 100644 --- a/cluster-autoscaler/vendor/k8s.io/klog/v2/k8s_references.go +++ b/cluster-autoscaler/vendor/k8s.io/klog/v2/k8s_references.go @@ -178,14 +178,14 @@ func (ks kobjSlice) process() (objs []interface{}, err string) { return objectRefs, "" } -var nilToken = []byte("") +var nilToken = []byte("null") func (ks kobjSlice) WriteText(out *bytes.Buffer) { s := reflect.ValueOf(ks.arg) switch s.Kind() { case reflect.Invalid: - // nil parameter, print as empty slice. - out.WriteString("[]") + // nil parameter, print as null. + out.Write(nilToken) return case reflect.Slice: // Okay, handle below. @@ -197,15 +197,15 @@ func (ks kobjSlice) WriteText(out *bytes.Buffer) { defer out.Write([]byte{']'}) for i := 0; i < s.Len(); i++ { if i > 0 { - out.Write([]byte{' '}) + out.Write([]byte{','}) } item := s.Index(i).Interface() if item == nil { out.Write(nilToken) } else if v, ok := item.(KMetadata); ok { - KObj(v).writeUnquoted(out) + KObj(v).WriteText(out) } else { - fmt.Fprintf(out, "", item) + fmt.Fprintf(out, `""`, item) return } } diff --git a/cluster-autoscaler/vendor/k8s.io/klog/v2/klog.go b/cluster-autoscaler/vendor/k8s.io/klog/v2/klog.go index 466eeaf265b5..152f8a6bd6d9 100644 --- a/cluster-autoscaler/vendor/k8s.io/klog/v2/klog.go +++ b/cluster-autoscaler/vendor/k8s.io/klog/v2/klog.go @@ -1228,6 +1228,19 @@ func CopyStandardLogTo(name string) { stdLog.SetOutput(logBridge(sev)) } +// NewStandardLogger returns a Logger that writes to the klog logs for the +// named and lower severities. +// +// Valid names are "INFO", "WARNING", "ERROR", and "FATAL". If the name is not +// recognized, NewStandardLogger panics. +func NewStandardLogger(name string) *stdLog.Logger { + sev, ok := severity.ByName(name) + if !ok { + panic(fmt.Sprintf("klog.NewStandardLogger(%q): unknown severity", name)) + } + return stdLog.New(logBridge(sev), "", stdLog.Lshortfile) +} + // logBridge provides the Write method that enables CopyStandardLogTo to connect // Go's standard logs to the logs provided by this package. type logBridge severity.Severity diff --git a/cluster-autoscaler/vendor/k8s.io/kms/apis/v2/api.pb.go b/cluster-autoscaler/vendor/k8s.io/kms/apis/v2/api.pb.go index cb746a64c964..1b634f9323e5 100644 --- a/cluster-autoscaler/vendor/k8s.io/kms/apis/v2/api.pb.go +++ b/cluster-autoscaler/vendor/k8s.io/kms/apis/v2/api.pb.go @@ -288,7 +288,6 @@ type EncryptResponse struct { // This can be used to inform staleness of data updated via value.Transformer.TransformFromStorage. KeyId string `protobuf:"bytes,2,opt,name=key_id,json=keyId,proto3" json:"key_id,omitempty"` // Additional metadata to be stored with the encrypted data. - // This metadata can contain the encrypted local KEK that was used to encrypt the DEK. // This data is stored in plaintext in etcd. KMS plugin implementations are responsible for pre-encrypting any sensitive data. Annotations map[string][]byte `protobuf:"bytes,3,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` XXX_NoUnkeyedLiteral struct{} `json:"-"` diff --git a/cluster-autoscaler/vendor/k8s.io/kms/apis/v2/api.proto b/cluster-autoscaler/vendor/k8s.io/kms/apis/v2/api.proto index 09b52126f2b7..3c7d335e8b66 100644 --- a/cluster-autoscaler/vendor/k8s.io/kms/apis/v2/api.proto +++ b/cluster-autoscaler/vendor/k8s.io/kms/apis/v2/api.proto @@ -73,7 +73,6 @@ message EncryptResponse { // This can be used to inform staleness of data updated via value.Transformer.TransformFromStorage. string key_id = 2; // Additional metadata to be stored with the encrypted data. - // This metadata can contain the encrypted local KEK that was used to encrypt the DEK. // This data is stored in plaintext in etcd. KMS plugin implementations are responsible for pre-encrypting any sensitive data. map annotations = 3; } diff --git a/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/cached/cache.go b/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/cached/cache.go index 16e34853af73..76415b7830bf 100644 --- a/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/cached/cache.go +++ b/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/cached/cache.go @@ -19,6 +19,8 @@ limitations under the License. // operations are not repeated unnecessarily. The operations can be // created as a tree, and replaced dynamically as needed. // +// All the operations in this module are thread-safe. +// // # Dependencies and types of caches // // This package uses a source/transform/sink model of caches to build @@ -34,12 +36,6 @@ limitations under the License. // replaced with a new one, and saves the previous results in case an // error pops-up. // -// # Atomicity -// -// Most of the operations are not atomic/thread-safe, except for -// [Replaceable.Replace] which can be performed while the objects -// are being read. -// // # Etags // // Etags in this library is a cache version identifier. It doesn't @@ -54,6 +50,7 @@ package cached import ( "fmt" + "sync" "sync/atomic" ) @@ -100,14 +97,6 @@ type Data[T any] interface { Get() Result[T] } -// T is the source type, V is the destination type. -type merger[K comparable, T, V any] struct { - mergeFn func(map[K]Result[T]) Result[V] - caches map[K]Data[T] - cacheResults map[K]Result[T] - result Result[V] -} - // NewMerger creates a new merge cache, a cache that merges the result // of other caches. The function only gets called if any of the // dependency has changed. @@ -125,27 +114,89 @@ type merger[K comparable, T, V any] struct { // function will remerge all the dependencies together everytime. Since // the list of dependencies is constant, there is no way to save some // partial merge information either. +// +// Also note that Golang map iteration is not stable. If the mergeFn +// depends on the order iteration to be stable, it will need to +// implement its own sorting or iteration order. func NewMerger[K comparable, T, V any](mergeFn func(results map[K]Result[T]) Result[V], caches map[K]Data[T]) Data[V] { - return &merger[K, T, V]{ + listCaches := make([]Data[T], 0, len(caches)) + // maps from index to key + indexes := make(map[int]K, len(caches)) + i := 0 + for k := range caches { + listCaches = append(listCaches, caches[k]) + indexes[i] = k + i++ + } + + return NewListMerger(func(results []Result[T]) Result[V] { + if len(results) != len(indexes) { + panic(fmt.Errorf("invalid result length %d, expected %d", len(results), len(indexes))) + } + m := make(map[K]Result[T], len(results)) + for i := range results { + m[indexes[i]] = results[i] + } + return mergeFn(m) + }, listCaches) +} + +type listMerger[T, V any] struct { + lock sync.Mutex + mergeFn func([]Result[T]) Result[V] + caches []Data[T] + cacheResults []Result[T] + result Result[V] +} + +// NewListMerger creates a new merge cache that merges the results of +// other caches in list form. The function only gets called if any of +// the dependency has changed. +// +// The benefit of ListMerger over the basic Merger is that caches are +// stored in an ordered list so the order of the cache will be +// preserved in the order of the results passed to the mergeFn. +// +// If any of the dependency returned an error before, or any of the +// dependency returned an error this time, or if the mergeFn failed +// before, then the function is reran. +// +// Note that this assumes there is no "partial" merge, the merge +// function will remerge all the dependencies together everytime. Since +// the list of dependencies is constant, there is no way to save some +// partial merge information either. +func NewListMerger[T, V any](mergeFn func(results []Result[T]) Result[V], caches []Data[T]) Data[V] { + return &listMerger[T, V]{ mergeFn: mergeFn, caches: caches, } } -func (c *merger[K, T, V]) prepareResults() map[K]Result[T] { - cacheResults := make(map[K]Result[T], len(c.caches)) - for key, cache := range c.caches { - cacheResults[key] = cache.Get() +func (c *listMerger[T, V]) prepareResultsLocked() []Result[T] { + cacheResults := make([]Result[T], len(c.caches)) + ch := make(chan struct { + int + Result[T] + }, len(c.caches)) + for i := range c.caches { + go func(index int) { + ch <- struct { + int + Result[T] + }{ + index, + c.caches[index].Get(), + } + }(i) + } + for i := 0; i < len(c.caches); i++ { + res := <-ch + cacheResults[res.int] = res.Result } return cacheResults } -// Rerun if: -// - The last run resulted in an error -// - Any of the dependency previously returned an error -// - Any of the dependency just returned an error -// - Any of the dependency's etag changed -func (c *merger[K, T, V]) needsRunning(results map[K]Result[T]) bool { +func (c *listMerger[T, V]) needsRunningLocked(results []Result[T]) bool { if c.cacheResults == nil { return true } @@ -155,12 +206,8 @@ func (c *merger[K, T, V]) needsRunning(results map[K]Result[T]) bool { if len(results) != len(c.cacheResults) { panic(fmt.Errorf("invalid number of results: %v (expected %v)", len(results), len(c.cacheResults))) } - for key, oldResult := range c.cacheResults { - newResult, ok := results[key] - if !ok { - panic(fmt.Errorf("unknown cache entry: %v", key)) - } - + for i, oldResult := range c.cacheResults { + newResult := results[i] if newResult.Etag != oldResult.Etag || newResult.Err != nil || oldResult.Err != nil { return true } @@ -168,17 +215,17 @@ func (c *merger[K, T, V]) needsRunning(results map[K]Result[T]) bool { return false } -func (c *merger[K, T, V]) Get() Result[V] { - cacheResults := c.prepareResults() - if c.needsRunning(cacheResults) { +func (c *listMerger[T, V]) Get() Result[V] { + c.lock.Lock() + defer c.lock.Unlock() + cacheResults := c.prepareResultsLocked() + if c.needsRunningLocked(cacheResults) { c.cacheResults = cacheResults c.result = c.mergeFn(c.cacheResults) } return c.result } -type transformerCacheKeyType struct{} - // NewTransformer creates a new cache that transforms the result of // another cache. The transformFn will only be called if the source // cache has updated the output, otherwise, the cached result will be @@ -188,20 +235,17 @@ type transformerCacheKeyType struct{} // this time, or if the transformerFn failed before, the function is // reran. func NewTransformer[T, V any](transformerFn func(Result[T]) Result[V], source Data[T]) Data[V] { - return NewMerger(func(caches map[transformerCacheKeyType]Result[T]) Result[V] { - cache, ok := caches[transformerCacheKeyType{}] - if len(caches) != 1 || !ok { + return NewListMerger(func(caches []Result[T]) Result[V] { + if len(caches) != 1 { panic(fmt.Errorf("invalid cache for transformer cache: %v", caches)) } - return transformerFn(cache) - }, map[transformerCacheKeyType]Data[T]{ - {}: source, - }) + return transformerFn(caches[0]) + }, []Data[T]{source}) } // NewSource creates a new cache that generates some data. This // will always be called since we don't know the origin of the data and -// if it needs to be updated or not. +// if it needs to be updated or not. sourceFn MUST be thread-safe. func NewSource[T any](sourceFn func() Result[T]) Data[T] { c := source[T](sourceFn) return &c @@ -222,25 +266,24 @@ func NewStaticSource[T any](staticFn func() Result[T]) Data[T] { } type static[T any] struct { + once sync.Once fn func() Result[T] - result *Result[T] + result Result[T] } func (c *static[T]) Get() Result[T] { - if c.result == nil { - result := c.fn() - c.result = &result - } - return *c.result + c.once.Do(func() { + c.result = c.fn() + }) + return c.result } -// Replaceable is a cache that carries the result even when the -// cache is replaced. The cache can be replaced atomically (without any -// lock held). This is the type that should typically be stored in +// Replaceable is a cache that carries the result even when the cache is +// replaced. This is the type that should typically be stored in // structs. type Replaceable[T any] struct { cache atomic.Pointer[Data[T]] - result *Result[T] + result atomic.Pointer[Result[T]] } // Get retrieves the data from the underlying source. [Replaceable] @@ -251,14 +294,19 @@ type Replaceable[T any] struct { // failure is returned. func (c *Replaceable[T]) Get() Result[T] { result := (*c.cache.Load()).Get() - if result.Err != nil && c.result != nil && c.result.Err == nil { - return *c.result + + for { + cResult := c.result.Load() + if result.Err != nil && cResult != nil && cResult.Err == nil { + return *cResult + } + if c.result.CompareAndSwap(cResult, &result) { + return result + } } - c.result = &result - return *c.result } -// Replace changes the cache in a thread-safe way. +// Replace changes the cache. func (c *Replaceable[T]) Replace(cache Data[T]) { c.cache.Swap(&cache) } diff --git a/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/handler/handler.go b/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/handler/handler.go index 84e902646537..2249a6737ac2 100644 --- a/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/handler/handler.go +++ b/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/handler/handler.go @@ -22,12 +22,13 @@ import ( "fmt" "net/http" "strconv" + "sync" "time" "github.com/NYTimes/gziphandler" "github.com/emicklei/go-restful/v3" "github.com/golang/protobuf/proto" - openapi_v2 "github.com/google/gnostic/openapiv2" + openapi_v2 "github.com/google/gnostic-models/openapiv2" "github.com/google/uuid" "github.com/munnerz/goautoneg" klog "k8s.io/klog/v2" @@ -98,16 +99,6 @@ func NewOpenAPIServiceLazy(swagger cached.Data[*spec.Swagger]) *OpenAPIService { return o } -func (o *OpenAPIService) getSwaggerBytes() (timedSpec, string, error) { - result := o.jsonCache.Get() - return result.Data, result.Etag, result.Err -} - -func (o *OpenAPIService) getSwaggerPbBytes() (timedSpec, string, error) { - result := o.protoCache.Get() - return result.Data, result.Etag, result.Err -} - func (o *OpenAPIService) UpdateSpec(swagger *spec.Swagger) error { o.UpdateSpecLazy(cached.NewResultOK(swagger, uuid.New().String())) return nil @@ -128,13 +119,17 @@ func ToProtoBinary(json []byte) ([]byte, error) { // RegisterOpenAPIVersionedService registers a handler to provide access to provided swagger spec. // // Deprecated: use OpenAPIService.RegisterOpenAPIVersionedService instead. -func RegisterOpenAPIVersionedService(spec *spec.Swagger, servePath string, handler common.PathHandler) (*OpenAPIService, error) { +func RegisterOpenAPIVersionedService(spec *spec.Swagger, servePath string, handler common.PathHandler) *OpenAPIService { o := NewOpenAPIService(spec) - return o, o.RegisterOpenAPIVersionedService(servePath, handler) + o.RegisterOpenAPIVersionedService(servePath, handler) + return o } // RegisterOpenAPIVersionedService registers a handler to provide access to provided swagger spec. -func (o *OpenAPIService) RegisterOpenAPIVersionedService(servePath string, handler common.PathHandler) error { +func (o *OpenAPIService) RegisterOpenAPIVersionedService(servePath string, handler common.PathHandler) { + // Mutex protects the cache chain + var mutex sync.Mutex + accepted := []struct { Type string SubType string @@ -163,7 +158,9 @@ func (o *OpenAPIService) RegisterOpenAPIVersionedService(servePath string, handl continue } // serve the first matching media type in the sorted clause list + mutex.Lock() result := accepts.GetDataAndEtag.Get() + mutex.Unlock() if result.Err != nil { klog.Errorf("Error in OpenAPI handler: %s", result.Err) // only return a 503 if we have no older cache data to serve @@ -187,8 +184,6 @@ func (o *OpenAPIService) RegisterOpenAPIVersionedService(servePath string, handl return }), )) - - return nil } // BuildAndRegisterOpenAPIVersionedService builds the spec and registers a handler to provide access to it. @@ -207,5 +202,6 @@ func BuildAndRegisterOpenAPIVersionedServiceFromRoutes(servePath string, routeCo return nil, err } o := NewOpenAPIService(spec) - return o, o.RegisterOpenAPIVersionedService(servePath, handler) + o.RegisterOpenAPIVersionedService(servePath, handler) + return o, nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/handler3/handler.go b/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/handler3/handler.go index 66b7a68da671..2263e2f32b75 100644 --- a/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/handler3/handler.go +++ b/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/handler3/handler.go @@ -30,7 +30,7 @@ import ( "time" "github.com/golang/protobuf/proto" - openapi_v3 "github.com/google/gnostic/openapiv3" + openapi_v3 "github.com/google/gnostic-models/openapiv3" "github.com/google/uuid" "github.com/munnerz/goautoneg" "k8s.io/klog/v2" diff --git a/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go b/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go index 763923dfffcb..5789e67ab7d9 100644 --- a/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go +++ b/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go @@ -21,7 +21,7 @@ import ( "sort" "strings" - openapi_v2 "github.com/google/gnostic/openapiv2" + openapi_v2 "github.com/google/gnostic-models/openapiv2" "gopkg.in/yaml.v2" ) diff --git a/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/util/proto/document_v3.go b/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/util/proto/document_v3.go index 519dcf2ebaee..d9f2896e3532 100644 --- a/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/util/proto/document_v3.go +++ b/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/util/proto/document_v3.go @@ -21,7 +21,7 @@ import ( "reflect" "strings" - openapi_v3 "github.com/google/gnostic/openapiv3" + openapi_v3 "github.com/google/gnostic-models/openapiv3" "gopkg.in/yaml.v3" ) diff --git a/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/validation/spec/gnostic.go b/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/validation/spec/gnostic.go index 406a09d9d1e0..6a77f2ac8229 100644 --- a/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/validation/spec/gnostic.go +++ b/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/validation/spec/gnostic.go @@ -21,7 +21,7 @@ import ( "strconv" "github.com/go-openapi/jsonreference" - openapi_v2 "github.com/google/gnostic/openapiv2" + openapi_v2 "github.com/google/gnostic-models/openapiv2" ) // Interfaces diff --git a/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/format.go b/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/format.go index 75c50053b1b8..c85067a26340 100644 --- a/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/format.go +++ b/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/format.go @@ -16,13 +16,10 @@ package strfmt import ( "encoding" - "fmt" "reflect" "strings" "sync" - "time" - "github.com/mitchellh/mapstructure" "k8s.io/kube-openapi/pkg/validation/errors" ) @@ -50,7 +47,6 @@ type Registry interface { ContainsName(string) bool Validates(string, string) bool Parse(string, string) (interface{}, error) - MapStructureHookFunc() mapstructure.DecodeHookFunc } type knownFormat struct { @@ -92,83 +88,6 @@ func NewSeededFormats(seeds []knownFormat, normalizer NameNormalizer) Registry { } } -// MapStructureHookFunc is a decode hook function for mapstructure -func (f *defaultFormats) MapStructureHookFunc() mapstructure.DecodeHookFunc { - return func(from reflect.Type, to reflect.Type, data interface{}) (interface{}, error) { - if from.Kind() != reflect.String { - return data, nil - } - for _, v := range f.data { - tpe, _ := f.GetType(v.Name) - if to == tpe { - switch v.Name { - case "date": - d, err := time.Parse(RFC3339FullDate, data.(string)) - if err != nil { - return nil, err - } - return Date(d), nil - case "datetime": - input := data.(string) - if len(input) == 0 { - return nil, fmt.Errorf("empty string is an invalid datetime format") - } - return ParseDateTime(input) - case "duration": - dur, err := ParseDuration(data.(string)) - if err != nil { - return nil, err - } - return Duration(dur), nil - case "uri": - return URI(data.(string)), nil - case "email": - return Email(data.(string)), nil - case "uuid": - return UUID(data.(string)), nil - case "uuid3": - return UUID3(data.(string)), nil - case "uuid4": - return UUID4(data.(string)), nil - case "uuid5": - return UUID5(data.(string)), nil - case "hostname": - return Hostname(data.(string)), nil - case "ipv4": - return IPv4(data.(string)), nil - case "ipv6": - return IPv6(data.(string)), nil - case "cidr": - return CIDR(data.(string)), nil - case "mac": - return MAC(data.(string)), nil - case "isbn": - return ISBN(data.(string)), nil - case "isbn10": - return ISBN10(data.(string)), nil - case "isbn13": - return ISBN13(data.(string)), nil - case "creditcard": - return CreditCard(data.(string)), nil - case "ssn": - return SSN(data.(string)), nil - case "hexcolor": - return HexColor(data.(string)), nil - case "rgbcolor": - return RGBColor(data.(string)), nil - case "byte": - return Base64(data.(string)), nil - case "password": - return Password(data.(string)), nil - default: - return nil, errors.InvalidTypeName(v.Name) - } - } - } - return data, nil - } -} - // Add adds a new format, return true if this was a new item instead of a replacement func (f *defaultFormats) Add(name string, strfmt Format, validator Validator) bool { f.Lock() diff --git a/cluster-autoscaler/vendor/k8s.io/kube-proxy/config/v1alpha1/doc.go b/cluster-autoscaler/vendor/k8s.io/kube-proxy/config/v1alpha1/doc.go deleted file mode 100644 index cb62b1cc1343..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kube-proxy/config/v1alpha1/doc.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package -// +k8s:openapi-gen=true -// +groupName=kubeproxy.config.k8s.io - -package v1alpha1 // import "k8s.io/kube-proxy/config/v1alpha1" diff --git a/cluster-autoscaler/vendor/k8s.io/kube-proxy/config/v1alpha1/register.go b/cluster-autoscaler/vendor/k8s.io/kube-proxy/config/v1alpha1/register.go deleted file mode 100644 index 16ed248fae09..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kube-proxy/config/v1alpha1/register.go +++ /dev/null @@ -1,43 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// GroupName is the group name used in this package -const GroupName = "kubeproxy.config.k8s.io" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} - -var ( - // SchemeBuilder is the scheme builder with scheme init functions to run for this API package - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - // AddToScheme is a global function that registers this API group & version to a scheme - AddToScheme = SchemeBuilder.AddToScheme -) - -// addKnownTypes registers known types to the given scheme -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &KubeProxyConfiguration{}, - ) - return nil -} diff --git a/cluster-autoscaler/vendor/k8s.io/kube-proxy/config/v1alpha1/types.go b/cluster-autoscaler/vendor/k8s.io/kube-proxy/config/v1alpha1/types.go deleted file mode 100644 index 3c6f3ecf6cce..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kube-proxy/config/v1alpha1/types.go +++ /dev/null @@ -1,201 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - componentbaseconfigv1alpha1 "k8s.io/component-base/config/v1alpha1" -) - -// KubeProxyIPTablesConfiguration contains iptables-related configuration -// details for the Kubernetes proxy server. -type KubeProxyIPTablesConfiguration struct { - // masqueradeBit is the bit of the iptables fwmark space to use for SNAT if using - // the pure iptables proxy mode. Values must be within the range [0, 31]. - MasqueradeBit *int32 `json:"masqueradeBit"` - // masqueradeAll tells kube-proxy to SNAT everything if using the pure iptables proxy mode. - MasqueradeAll bool `json:"masqueradeAll"` - // LocalhostNodePorts tells kube-proxy to allow service NodePorts to be accessed via - // localhost (iptables mode only) - LocalhostNodePorts *bool `json:"localhostNodePorts"` - // syncPeriod is the period that iptables rules are refreshed (e.g. '5s', '1m', - // '2h22m'). Must be greater than 0. - SyncPeriod metav1.Duration `json:"syncPeriod"` - // minSyncPeriod is the minimum period that iptables rules are refreshed (e.g. '5s', '1m', - // '2h22m'). - MinSyncPeriod metav1.Duration `json:"minSyncPeriod"` -} - -// KubeProxyIPVSConfiguration contains ipvs-related configuration -// details for the Kubernetes proxy server. -type KubeProxyIPVSConfiguration struct { - // syncPeriod is the period that ipvs rules are refreshed (e.g. '5s', '1m', - // '2h22m'). Must be greater than 0. - SyncPeriod metav1.Duration `json:"syncPeriod"` - // minSyncPeriod is the minimum period that ipvs rules are refreshed (e.g. '5s', '1m', - // '2h22m'). - MinSyncPeriod metav1.Duration `json:"minSyncPeriod"` - // ipvs scheduler - Scheduler string `json:"scheduler"` - // excludeCIDRs is a list of CIDR's which the ipvs proxier should not touch - // when cleaning up ipvs services. - ExcludeCIDRs []string `json:"excludeCIDRs"` - // strict ARP configure arp_ignore and arp_announce to avoid answering ARP queries - // from kube-ipvs0 interface - StrictARP bool `json:"strictARP"` - // tcpTimeout is the timeout value used for idle IPVS TCP sessions. - // The default value is 0, which preserves the current timeout value on the system. - TCPTimeout metav1.Duration `json:"tcpTimeout"` - // tcpFinTimeout is the timeout value used for IPVS TCP sessions after receiving a FIN. - // The default value is 0, which preserves the current timeout value on the system. - TCPFinTimeout metav1.Duration `json:"tcpFinTimeout"` - // udpTimeout is the timeout value used for IPVS UDP packets. - // The default value is 0, which preserves the current timeout value on the system. - UDPTimeout metav1.Duration `json:"udpTimeout"` -} - -// KubeProxyConntrackConfiguration contains conntrack settings for -// the Kubernetes proxy server. -type KubeProxyConntrackConfiguration struct { - // maxPerCore is the maximum number of NAT connections to track - // per CPU core (0 to leave the limit as-is and ignore min). - MaxPerCore *int32 `json:"maxPerCore"` - // min is the minimum value of connect-tracking records to allocate, - // regardless of conntrackMaxPerCore (set maxPerCore=0 to leave the limit as-is). - Min *int32 `json:"min"` - // tcpEstablishedTimeout is how long an idle TCP connection will be kept open - // (e.g. '2s'). Must be greater than 0 to set. - TCPEstablishedTimeout *metav1.Duration `json:"tcpEstablishedTimeout"` - // tcpCloseWaitTimeout is how long an idle conntrack entry - // in CLOSE_WAIT state will remain in the conntrack - // table. (e.g. '60s'). Must be greater than 0 to set. - TCPCloseWaitTimeout *metav1.Duration `json:"tcpCloseWaitTimeout"` -} - -// KubeProxyWinkernelConfiguration contains Windows/HNS settings for -// the Kubernetes proxy server. -type KubeProxyWinkernelConfiguration struct { - // networkName is the name of the network kube-proxy will use - // to create endpoints and policies - NetworkName string `json:"networkName"` - // sourceVip is the IP address of the source VIP endoint used for - // NAT when loadbalancing - SourceVip string `json:"sourceVip"` - // enableDSR tells kube-proxy whether HNS policies should be created - // with DSR - EnableDSR bool `json:"enableDSR"` - // RootHnsEndpointName is the name of hnsendpoint that is attached to - // l2bridge for root network namespace - RootHnsEndpointName string `json:"rootHnsEndpointName"` - // ForwardHealthCheckVip forwards service VIP for health check port on - // Windows - ForwardHealthCheckVip bool `json:"forwardHealthCheckVip"` -} - -// DetectLocalConfiguration contains optional settings related to DetectLocalMode option -type DetectLocalConfiguration struct { - // BridgeInterface is a string argument which represents a single bridge interface name. - // Kube-proxy considers traffic as local if originating from this given bridge. - // This argument should be set if DetectLocalMode is set to LocalModeBridgeInterface. - BridgeInterface string `json:"bridgeInterface"` - // InterfaceNamePrefix is a string argument which represents a single interface prefix name. - // Kube-proxy considers traffic as local if originating from one or more interfaces which match - // the given prefix. This argument should be set if DetectLocalMode is set to LocalModeInterfaceNamePrefix. - InterfaceNamePrefix string `json:"interfaceNamePrefix"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// KubeProxyConfiguration contains everything necessary to configure the -// Kubernetes proxy server. -type KubeProxyConfiguration struct { - metav1.TypeMeta `json:",inline"` - - // featureGates is a map of feature names to bools that enable or disable alpha/experimental features. - FeatureGates map[string]bool `json:"featureGates,omitempty"` - - // bindAddress is the IP address for the proxy server to serve on (set to 0.0.0.0 - // for all interfaces) - BindAddress string `json:"bindAddress"` - // healthzBindAddress is the IP address and port for the health check server to serve on, - // defaulting to 0.0.0.0:10256 - HealthzBindAddress string `json:"healthzBindAddress"` - // metricsBindAddress is the IP address and port for the metrics server to serve on, - // defaulting to 127.0.0.1:10249 (set to 0.0.0.0 for all interfaces) - MetricsBindAddress string `json:"metricsBindAddress"` - // bindAddressHardFail, if true, kube-proxy will treat failure to bind to a port as fatal and exit - BindAddressHardFail bool `json:"bindAddressHardFail"` - // enableProfiling enables profiling via web interface on /debug/pprof handler. - // Profiling handlers will be handled by metrics server. - EnableProfiling bool `json:"enableProfiling"` - // clusterCIDR is the CIDR range of the pods in the cluster. It is used to - // bridge traffic coming from outside of the cluster. If not provided, - // no off-cluster bridging will be performed. - ClusterCIDR string `json:"clusterCIDR"` - // hostnameOverride, if non-empty, will be used as the identity instead of the actual hostname. - HostnameOverride string `json:"hostnameOverride"` - // clientConnection specifies the kubeconfig file and client connection settings for the proxy - // server to use when communicating with the apiserver. - ClientConnection componentbaseconfigv1alpha1.ClientConnectionConfiguration `json:"clientConnection"` - // iptables contains iptables-related configuration options. - IPTables KubeProxyIPTablesConfiguration `json:"iptables"` - // ipvs contains ipvs-related configuration options. - IPVS KubeProxyIPVSConfiguration `json:"ipvs"` - // oomScoreAdj is the oom-score-adj value for kube-proxy process. Values must be within - // the range [-1000, 1000] - OOMScoreAdj *int32 `json:"oomScoreAdj"` - // mode specifies which proxy mode to use. - Mode ProxyMode `json:"mode"` - // portRange is the range of host ports (beginPort-endPort, inclusive) that may be consumed - // in order to proxy service traffic. If unspecified (0-0) then ports will be randomly chosen. - PortRange string `json:"portRange"` - // conntrack contains conntrack-related configuration options. - Conntrack KubeProxyConntrackConfiguration `json:"conntrack"` - // configSyncPeriod is how often configuration from the apiserver is refreshed. Must be greater - // than 0. - ConfigSyncPeriod metav1.Duration `json:"configSyncPeriod"` - // nodePortAddresses is the --nodeport-addresses value for kube-proxy process. Values must be valid - // IP blocks. These values are as a parameter to select the interfaces where nodeport works. - // In case someone would like to expose a service on localhost for local visit and some other interfaces for - // particular purpose, a list of IP blocks would do that. - // If set it to "127.0.0.0/8", kube-proxy will only select the loopback interface for NodePort. - // If set it to a non-zero IP block, kube-proxy will filter that down to just the IPs that applied to the node. - // An empty string slice is meant to select all network interfaces. - NodePortAddresses []string `json:"nodePortAddresses"` - // winkernel contains winkernel-related configuration options. - Winkernel KubeProxyWinkernelConfiguration `json:"winkernel"` - // ShowHiddenMetricsForVersion is the version for which you want to show hidden metrics. - ShowHiddenMetricsForVersion string `json:"showHiddenMetricsForVersion"` - // DetectLocalMode determines mode to use for detecting local traffic, defaults to LocalModeClusterCIDR - DetectLocalMode LocalMode `json:"detectLocalMode"` - // DetectLocal contains optional configuration settings related to DetectLocalMode. - DetectLocal DetectLocalConfiguration `json:"detectLocal"` -} - -// ProxyMode represents modes used by the Kubernetes proxy server. -// -// Currently, two modes of proxy are available on Linux platforms: 'iptables' and 'ipvs'. -// One mode of proxy is available on Windows platforms: 'kernelspace'. -// -// If the proxy mode is unspecified, the best-available proxy mode will be used (currently this -// is `iptables` on Linux and `kernelspace` on Windows). If the selected proxy mode cannot be -// used (due to lack of kernel support, missing userspace components, etc) then kube-proxy -// will exit with an error. -type ProxyMode string - -// LocalMode represents modes to detect local traffic from the node -type LocalMode string diff --git a/cluster-autoscaler/vendor/k8s.io/kube-proxy/config/v1alpha1/zz_generated.deepcopy.go b/cluster-autoscaler/vendor/k8s.io/kube-proxy/config/v1alpha1/zz_generated.deepcopy.go deleted file mode 100644 index 5ecdfb06c68e..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kube-proxy/config/v1alpha1/zz_generated.deepcopy.go +++ /dev/null @@ -1,198 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DetectLocalConfiguration) DeepCopyInto(out *DetectLocalConfiguration) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DetectLocalConfiguration. -func (in *DetectLocalConfiguration) DeepCopy() *DetectLocalConfiguration { - if in == nil { - return nil - } - out := new(DetectLocalConfiguration) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubeProxyConfiguration) DeepCopyInto(out *KubeProxyConfiguration) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.FeatureGates != nil { - in, out := &in.FeatureGates, &out.FeatureGates - *out = make(map[string]bool, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - out.ClientConnection = in.ClientConnection - in.IPTables.DeepCopyInto(&out.IPTables) - in.IPVS.DeepCopyInto(&out.IPVS) - if in.OOMScoreAdj != nil { - in, out := &in.OOMScoreAdj, &out.OOMScoreAdj - *out = new(int32) - **out = **in - } - in.Conntrack.DeepCopyInto(&out.Conntrack) - out.ConfigSyncPeriod = in.ConfigSyncPeriod - if in.NodePortAddresses != nil { - in, out := &in.NodePortAddresses, &out.NodePortAddresses - *out = make([]string, len(*in)) - copy(*out, *in) - } - out.Winkernel = in.Winkernel - out.DetectLocal = in.DetectLocal - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyConfiguration. -func (in *KubeProxyConfiguration) DeepCopy() *KubeProxyConfiguration { - if in == nil { - return nil - } - out := new(KubeProxyConfiguration) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *KubeProxyConfiguration) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubeProxyConntrackConfiguration) DeepCopyInto(out *KubeProxyConntrackConfiguration) { - *out = *in - if in.MaxPerCore != nil { - in, out := &in.MaxPerCore, &out.MaxPerCore - *out = new(int32) - **out = **in - } - if in.Min != nil { - in, out := &in.Min, &out.Min - *out = new(int32) - **out = **in - } - if in.TCPEstablishedTimeout != nil { - in, out := &in.TCPEstablishedTimeout, &out.TCPEstablishedTimeout - *out = new(v1.Duration) - **out = **in - } - if in.TCPCloseWaitTimeout != nil { - in, out := &in.TCPCloseWaitTimeout, &out.TCPCloseWaitTimeout - *out = new(v1.Duration) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyConntrackConfiguration. -func (in *KubeProxyConntrackConfiguration) DeepCopy() *KubeProxyConntrackConfiguration { - if in == nil { - return nil - } - out := new(KubeProxyConntrackConfiguration) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubeProxyIPTablesConfiguration) DeepCopyInto(out *KubeProxyIPTablesConfiguration) { - *out = *in - if in.MasqueradeBit != nil { - in, out := &in.MasqueradeBit, &out.MasqueradeBit - *out = new(int32) - **out = **in - } - if in.LocalhostNodePorts != nil { - in, out := &in.LocalhostNodePorts, &out.LocalhostNodePorts - *out = new(bool) - **out = **in - } - out.SyncPeriod = in.SyncPeriod - out.MinSyncPeriod = in.MinSyncPeriod - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyIPTablesConfiguration. -func (in *KubeProxyIPTablesConfiguration) DeepCopy() *KubeProxyIPTablesConfiguration { - if in == nil { - return nil - } - out := new(KubeProxyIPTablesConfiguration) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubeProxyIPVSConfiguration) DeepCopyInto(out *KubeProxyIPVSConfiguration) { - *out = *in - out.SyncPeriod = in.SyncPeriod - out.MinSyncPeriod = in.MinSyncPeriod - if in.ExcludeCIDRs != nil { - in, out := &in.ExcludeCIDRs, &out.ExcludeCIDRs - *out = make([]string, len(*in)) - copy(*out, *in) - } - out.TCPTimeout = in.TCPTimeout - out.TCPFinTimeout = in.TCPFinTimeout - out.UDPTimeout = in.UDPTimeout - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyIPVSConfiguration. -func (in *KubeProxyIPVSConfiguration) DeepCopy() *KubeProxyIPVSConfiguration { - if in == nil { - return nil - } - out := new(KubeProxyIPVSConfiguration) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubeProxyWinkernelConfiguration) DeepCopyInto(out *KubeProxyWinkernelConfiguration) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyWinkernelConfiguration. -func (in *KubeProxyWinkernelConfiguration) DeepCopy() *KubeProxyWinkernelConfiguration { - if in == nil { - return nil - } - out := new(KubeProxyWinkernelConfiguration) - in.DeepCopyInto(out) - return out -} diff --git a/cluster-autoscaler/vendor/k8s.io/kube-scheduler/config/v1/types.go b/cluster-autoscaler/vendor/k8s.io/kube-scheduler/config/v1/types.go index 703516fb78c3..2ee4e4cddd82 100644 --- a/cluster-autoscaler/vendor/k8s.io/kube-scheduler/config/v1/types.go +++ b/cluster-autoscaler/vendor/k8s.io/kube-scheduler/config/v1/types.go @@ -89,6 +89,12 @@ type KubeSchedulerConfiguration struct { // with the extender. These extenders are shared by all scheduler profiles. // +listType=set Extenders []Extender `json:"extenders,omitempty"` + + // DelayCacheUntilActive specifies when to start caching. If this is true and leader election is enabled, + // the scheduler will wait to fill informer caches until it is the leader. Doing so will have slower + // failover with the benefit of lower memory overhead while waiting to become leader. + // Defaults to false. + DelayCacheUntilActive bool `json:"delayCacheUntilActive,omitempty"` } // DecodeNestedObjects decodes plugin args for known types. diff --git a/cluster-autoscaler/vendor/k8s.io/kube-scheduler/config/v1beta2/doc.go b/cluster-autoscaler/vendor/k8s.io/kube-scheduler/config/v1beta2/doc.go deleted file mode 100644 index c9f5f62ed8ee..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kube-scheduler/config/v1beta2/doc.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package -// +k8s:openapi-gen=true -// +groupName=kubescheduler.config.k8s.io - -package v1beta2 // import "k8s.io/kube-scheduler/config/v1beta2" diff --git a/cluster-autoscaler/vendor/k8s.io/kube-scheduler/config/v1beta2/register.go b/cluster-autoscaler/vendor/k8s.io/kube-scheduler/config/v1beta2/register.go deleted file mode 100644 index 59fc014a9306..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kube-scheduler/config/v1beta2/register.go +++ /dev/null @@ -1,50 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta2 - -import ( - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// GroupName is the group name used in this package -const GroupName = "kubescheduler.config.k8s.io" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta2"} - -var ( - // SchemeBuilder is the scheme builder with scheme init functions to run for this API package - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - // AddToScheme is a global function that registers this API group & version to a scheme - AddToScheme = SchemeBuilder.AddToScheme -) - -// addKnownTypes registers known types to the given scheme -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &KubeSchedulerConfiguration{}, - &DefaultPreemptionArgs{}, - &InterPodAffinityArgs{}, - &NodeResourcesBalancedAllocationArgs{}, - &NodeResourcesFitArgs{}, - &PodTopologySpreadArgs{}, - &VolumeBindingArgs{}, - &NodeAffinityArgs{}, - ) - return nil -} diff --git a/cluster-autoscaler/vendor/k8s.io/kube-scheduler/config/v1beta2/types.go b/cluster-autoscaler/vendor/k8s.io/kube-scheduler/config/v1beta2/types.go deleted file mode 100644 index 0e47967adb4b..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kube-scheduler/config/v1beta2/types.go +++ /dev/null @@ -1,369 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta2 - -import ( - "bytes" - "fmt" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - componentbaseconfigv1alpha1 "k8s.io/component-base/config/v1alpha1" - "sigs.k8s.io/yaml" -) - -const ( - // SchedulerDefaultLockObjectNamespace defines default scheduler lock object namespace ("kube-system") - SchedulerDefaultLockObjectNamespace string = metav1.NamespaceSystem - - // SchedulerDefaultLockObjectName defines default scheduler lock object name ("kube-scheduler") - SchedulerDefaultLockObjectName = "kube-scheduler" - - // SchedulerDefaultProviderName defines the default provider names - SchedulerDefaultProviderName = "DefaultProvider" -) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// KubeSchedulerConfiguration configures a scheduler -type KubeSchedulerConfiguration struct { - metav1.TypeMeta `json:",inline"` - - // Parallelism defines the amount of parallelism in algorithms for scheduling a Pods. Must be greater than 0. Defaults to 16 - Parallelism *int32 `json:"parallelism,omitempty"` - - // LeaderElection defines the configuration of leader election client. - LeaderElection componentbaseconfigv1alpha1.LeaderElectionConfiguration `json:"leaderElection"` - - // ClientConnection specifies the kubeconfig file and client connection - // settings for the proxy server to use when communicating with the apiserver. - ClientConnection componentbaseconfigv1alpha1.ClientConnectionConfiguration `json:"clientConnection"` - - // Note: Both HealthzBindAddress and MetricsBindAddress fields are deprecated. - // Only empty address or port 0 is allowed. Anything else will fail validation. - // HealthzBindAddress is the IP address and port for the health check server to serve on. - HealthzBindAddress *string `json:"healthzBindAddress,omitempty"` - // MetricsBindAddress is the IP address and port for the metrics server to serve on. - MetricsBindAddress *string `json:"metricsBindAddress,omitempty"` - - // DebuggingConfiguration holds configuration for Debugging related features - // TODO: We might wanna make this a substruct like Debugging componentbaseconfigv1alpha1.DebuggingConfiguration - componentbaseconfigv1alpha1.DebuggingConfiguration `json:",inline"` - - // PercentageOfNodesToScore is the percentage of all nodes that once found feasible - // for running a pod, the scheduler stops its search for more feasible nodes in - // the cluster. This helps improve scheduler's performance. Scheduler always tries to find - // at least "minFeasibleNodesToFind" feasible nodes no matter what the value of this flag is. - // Example: if the cluster size is 500 nodes and the value of this flag is 30, - // then scheduler stops finding further feasible nodes once it finds 150 feasible ones. - // When the value is 0, default percentage (5%--50% based on the size of the cluster) of the - // nodes will be scored. - PercentageOfNodesToScore *int32 `json:"percentageOfNodesToScore,omitempty"` - - // PodInitialBackoffSeconds is the initial backoff for unschedulable pods. - // If specified, it must be greater than 0. If this value is null, the default value (1s) - // will be used. - PodInitialBackoffSeconds *int64 `json:"podInitialBackoffSeconds,omitempty"` - - // PodMaxBackoffSeconds is the max backoff for unschedulable pods. - // If specified, it must be greater than podInitialBackoffSeconds. If this value is null, - // the default value (10s) will be used. - PodMaxBackoffSeconds *int64 `json:"podMaxBackoffSeconds,omitempty"` - - // Profiles are scheduling profiles that kube-scheduler supports. Pods can - // choose to be scheduled under a particular profile by setting its associated - // scheduler name. Pods that don't specify any scheduler name are scheduled - // with the "default-scheduler" profile, if present here. - // +listType=map - // +listMapKey=schedulerName - Profiles []KubeSchedulerProfile `json:"profiles,omitempty"` - - // Extenders are the list of scheduler extenders, each holding the values of how to communicate - // with the extender. These extenders are shared by all scheduler profiles. - // +listType=set - Extenders []Extender `json:"extenders,omitempty"` -} - -// DecodeNestedObjects decodes plugin args for known types. -func (c *KubeSchedulerConfiguration) DecodeNestedObjects(d runtime.Decoder) error { - var strictDecodingErrs []error - for i := range c.Profiles { - prof := &c.Profiles[i] - for j := range prof.PluginConfig { - err := prof.PluginConfig[j].decodeNestedObjects(d) - if err != nil { - decodingErr := fmt.Errorf("decoding .profiles[%d].pluginConfig[%d]: %w", i, j, err) - if runtime.IsStrictDecodingError(err) { - strictDecodingErrs = append(strictDecodingErrs, decodingErr) - } else { - return decodingErr - } - } - } - } - if len(strictDecodingErrs) > 0 { - return runtime.NewStrictDecodingError(strictDecodingErrs) - } - return nil -} - -// EncodeNestedObjects encodes plugin args. -func (c *KubeSchedulerConfiguration) EncodeNestedObjects(e runtime.Encoder) error { - for i := range c.Profiles { - prof := &c.Profiles[i] - for j := range prof.PluginConfig { - err := prof.PluginConfig[j].encodeNestedObjects(e) - if err != nil { - return fmt.Errorf("encoding .profiles[%d].pluginConfig[%d]: %w", i, j, err) - } - } - } - return nil -} - -// KubeSchedulerProfile is a scheduling profile. -type KubeSchedulerProfile struct { - // SchedulerName is the name of the scheduler associated to this profile. - // If SchedulerName matches with the pod's "spec.schedulerName", then the pod - // is scheduled with this profile. - SchedulerName *string `json:"schedulerName,omitempty"` - - // Plugins specify the set of plugins that should be enabled or disabled. - // Enabled plugins are the ones that should be enabled in addition to the - // default plugins. Disabled plugins are any of the default plugins that - // should be disabled. - // When no enabled or disabled plugin is specified for an extension point, - // default plugins for that extension point will be used if there is any. - // If a QueueSort plugin is specified, the same QueueSort Plugin and - // PluginConfig must be specified for all profiles. - Plugins *Plugins `json:"plugins,omitempty"` - - // PluginConfig is an optional set of custom plugin arguments for each plugin. - // Omitting config args for a plugin is equivalent to using the default config - // for that plugin. - // +listType=map - // +listMapKey=name - PluginConfig []PluginConfig `json:"pluginConfig,omitempty"` -} - -// Plugins include multiple extension points. When specified, the list of plugins for -// a particular extension point are the only ones enabled. If an extension point is -// omitted from the config, then the default set of plugins is used for that extension point. -// Enabled plugins are called in the order specified here, after default plugins. If they need to -// be invoked before default plugins, default plugins must be disabled and re-enabled here in desired order. -type Plugins struct { - // PreEnqueue is a list of plugins that should be invoked before adding pods to the scheduling queue. - PreEnqueue PluginSet `json:"preEnqueue,omitempty"` - - // QueueSort is a list of plugins that should be invoked when sorting pods in the scheduling queue. - QueueSort PluginSet `json:"queueSort,omitempty"` - - // PreFilter is a list of plugins that should be invoked at "PreFilter" extension point of the scheduling framework. - PreFilter PluginSet `json:"preFilter,omitempty"` - - // Filter is a list of plugins that should be invoked when filtering out nodes that cannot run the Pod. - Filter PluginSet `json:"filter,omitempty"` - - // PostFilter is a list of plugins that are invoked after filtering phase, but only when no feasible nodes were found for the pod. - PostFilter PluginSet `json:"postFilter,omitempty"` - - // PreScore is a list of plugins that are invoked before scoring. - PreScore PluginSet `json:"preScore,omitempty"` - - // Score is a list of plugins that should be invoked when ranking nodes that have passed the filtering phase. - Score PluginSet `json:"score,omitempty"` - - // Reserve is a list of plugins invoked when reserving/unreserving resources - // after a node is assigned to run the pod. - Reserve PluginSet `json:"reserve,omitempty"` - - // Permit is a list of plugins that control binding of a Pod. These plugins can prevent or delay binding of a Pod. - Permit PluginSet `json:"permit,omitempty"` - - // PreBind is a list of plugins that should be invoked before a pod is bound. - PreBind PluginSet `json:"preBind,omitempty"` - - // Bind is a list of plugins that should be invoked at "Bind" extension point of the scheduling framework. - // The scheduler call these plugins in order. Scheduler skips the rest of these plugins as soon as one returns success. - Bind PluginSet `json:"bind,omitempty"` - - // PostBind is a list of plugins that should be invoked after a pod is successfully bound. - PostBind PluginSet `json:"postBind,omitempty"` - - // MultiPoint is a simplified config section to enable plugins for all valid extension points. - MultiPoint PluginSet `json:"multiPoint,omitempty"` -} - -// PluginSet specifies enabled and disabled plugins for an extension point. -// If an array is empty, missing, or nil, default plugins at that extension point will be used. -type PluginSet struct { - // Enabled specifies plugins that should be enabled in addition to default plugins. - // If the default plugin is also configured in the scheduler config file, the weight of plugin will - // be overridden accordingly. - // These are called after default plugins and in the same order specified here. - // +listType=atomic - Enabled []Plugin `json:"enabled,omitempty"` - // Disabled specifies default plugins that should be disabled. - // When all default plugins need to be disabled, an array containing only one "*" should be provided. - // +listType=map - // +listMapKey=name - Disabled []Plugin `json:"disabled,omitempty"` -} - -// Plugin specifies a plugin name and its weight when applicable. Weight is used only for Score plugins. -type Plugin struct { - // Name defines the name of plugin - Name string `json:"name"` - // Weight defines the weight of plugin, only used for Score plugins. - Weight *int32 `json:"weight,omitempty"` -} - -// PluginConfig specifies arguments that should be passed to a plugin at the time of initialization. -// A plugin that is invoked at multiple extension points is initialized once. Args can have arbitrary structure. -// It is up to the plugin to process these Args. -type PluginConfig struct { - // Name defines the name of plugin being configured - Name string `json:"name"` - // Args defines the arguments passed to the plugins at the time of initialization. Args can have arbitrary structure. - Args runtime.RawExtension `json:"args,omitempty"` -} - -func (c *PluginConfig) decodeNestedObjects(d runtime.Decoder) error { - gvk := SchemeGroupVersion.WithKind(c.Name + "Args") - // dry-run to detect and skip out-of-tree plugin args. - if _, _, err := d.Decode(nil, &gvk, nil); runtime.IsNotRegisteredError(err) { - return nil - } - - var strictDecodingErr error - obj, parsedGvk, err := d.Decode(c.Args.Raw, &gvk, nil) - if err != nil { - decodingArgsErr := fmt.Errorf("decoding args for plugin %s: %w", c.Name, err) - if obj != nil && runtime.IsStrictDecodingError(err) { - strictDecodingErr = runtime.NewStrictDecodingError([]error{decodingArgsErr}) - } else { - return decodingArgsErr - } - } - if parsedGvk.GroupKind() != gvk.GroupKind() { - return fmt.Errorf("args for plugin %s were not of type %s, got %s", c.Name, gvk.GroupKind(), parsedGvk.GroupKind()) - } - c.Args.Object = obj - return strictDecodingErr -} - -func (c *PluginConfig) encodeNestedObjects(e runtime.Encoder) error { - if c.Args.Object == nil { - return nil - } - var buf bytes.Buffer - err := e.Encode(c.Args.Object, &buf) - if err != nil { - return err - } - // The encoder might be a YAML encoder, but the parent encoder expects - // JSON output, so we convert YAML back to JSON. - // This is a no-op if produces JSON. - json, err := yaml.YAMLToJSON(buf.Bytes()) - if err != nil { - return err - } - c.Args.Raw = json - return nil -} - -// Extender holds the parameters used to communicate with the extender. If a verb is unspecified/empty, -// it is assumed that the extender chose not to provide that extension. -type Extender struct { - // URLPrefix at which the extender is available - URLPrefix string `json:"urlPrefix"` - // Verb for the filter call, empty if not supported. This verb is appended to the URLPrefix when issuing the filter call to extender. - FilterVerb string `json:"filterVerb,omitempty"` - // Verb for the preempt call, empty if not supported. This verb is appended to the URLPrefix when issuing the preempt call to extender. - PreemptVerb string `json:"preemptVerb,omitempty"` - // Verb for the prioritize call, empty if not supported. This verb is appended to the URLPrefix when issuing the prioritize call to extender. - PrioritizeVerb string `json:"prioritizeVerb,omitempty"` - // The numeric multiplier for the node scores that the prioritize call generates. - // The weight should be a positive integer - Weight int64 `json:"weight,omitempty"` - // Verb for the bind call, empty if not supported. This verb is appended to the URLPrefix when issuing the bind call to extender. - // If this method is implemented by the extender, it is the extender's responsibility to bind the pod to apiserver. Only one extender - // can implement this function. - BindVerb string `json:"bindVerb,omitempty"` - // EnableHTTPS specifies whether https should be used to communicate with the extender - EnableHTTPS bool `json:"enableHTTPS,omitempty"` - // TLSConfig specifies the transport layer security config - TLSConfig *ExtenderTLSConfig `json:"tlsConfig,omitempty"` - // HTTPTimeout specifies the timeout duration for a call to the extender. Filter timeout fails the scheduling of the pod. Prioritize - // timeout is ignored, k8s/other extenders priorities are used to select the node. - HTTPTimeout metav1.Duration `json:"httpTimeout,omitempty"` - // NodeCacheCapable specifies that the extender is capable of caching node information, - // so the scheduler should only send minimal information about the eligible nodes - // assuming that the extender already cached full details of all nodes in the cluster - NodeCacheCapable bool `json:"nodeCacheCapable,omitempty"` - // ManagedResources is a list of extended resources that are managed by - // this extender. - // - A pod will be sent to the extender on the Filter, Prioritize and Bind - // (if the extender is the binder) phases iff the pod requests at least - // one of the extended resources in this list. If empty or unspecified, - // all pods will be sent to this extender. - // - If IgnoredByScheduler is set to true for a resource, kube-scheduler - // will skip checking the resource in predicates. - // +optional - // +listType=atomic - ManagedResources []ExtenderManagedResource `json:"managedResources,omitempty"` - // Ignorable specifies if the extender is ignorable, i.e. scheduling should not - // fail when the extender returns an error or is not reachable. - Ignorable bool `json:"ignorable,omitempty"` -} - -// ExtenderManagedResource describes the arguments of extended resources -// managed by an extender. -type ExtenderManagedResource struct { - // Name is the extended resource name. - Name string `json:"name"` - // IgnoredByScheduler indicates whether kube-scheduler should ignore this - // resource when applying predicates. - IgnoredByScheduler bool `json:"ignoredByScheduler,omitempty"` -} - -// ExtenderTLSConfig contains settings to enable TLS with extender -type ExtenderTLSConfig struct { - // Server should be accessed without verifying the TLS certificate. For testing only. - Insecure bool `json:"insecure,omitempty"` - // ServerName is passed to the server for SNI and is used in the client to check server - // certificates against. If ServerName is empty, the hostname used to contact the - // server is used. - ServerName string `json:"serverName,omitempty"` - - // Server requires TLS client certificate authentication - CertFile string `json:"certFile,omitempty"` - // Server requires TLS client certificate authentication - KeyFile string `json:"keyFile,omitempty"` - // Trusted root certificates for server - CAFile string `json:"caFile,omitempty"` - - // CertData holds PEM-encoded bytes (typically read from a client certificate file). - // CertData takes precedence over CertFile - CertData []byte `json:"certData,omitempty"` - // KeyData holds PEM-encoded bytes (typically read from a client certificate key file). - // KeyData takes precedence over KeyFile - KeyData []byte `json:"keyData,omitempty"` - // CAData holds PEM-encoded bytes (typically read from a root certificates bundle). - // CAData takes precedence over CAFile - CAData []byte `json:"caData,omitempty"` -} diff --git a/cluster-autoscaler/vendor/k8s.io/kube-scheduler/config/v1beta2/types_pluginargs.go b/cluster-autoscaler/vendor/k8s.io/kube-scheduler/config/v1beta2/types_pluginargs.go deleted file mode 100644 index 602db995df2d..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kube-scheduler/config/v1beta2/types_pluginargs.go +++ /dev/null @@ -1,229 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta2 - -import ( - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// DefaultPreemptionArgs holds arguments used to configure the -// DefaultPreemption plugin. -type DefaultPreemptionArgs struct { - metav1.TypeMeta `json:",inline"` - - // MinCandidateNodesPercentage is the minimum number of candidates to - // shortlist when dry running preemption as a percentage of number of nodes. - // Must be in the range [0, 100]. Defaults to 10% of the cluster size if - // unspecified. - MinCandidateNodesPercentage *int32 `json:"minCandidateNodesPercentage,omitempty"` - // MinCandidateNodesAbsolute is the absolute minimum number of candidates to - // shortlist. The likely number of candidates enumerated for dry running - // preemption is given by the formula: - // numCandidates = max(numNodes * minCandidateNodesPercentage, minCandidateNodesAbsolute) - // We say "likely" because there are other factors such as PDB violations - // that play a role in the number of candidates shortlisted. Must be at least - // 0 nodes. Defaults to 100 nodes if unspecified. - MinCandidateNodesAbsolute *int32 `json:"minCandidateNodesAbsolute,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// InterPodAffinityArgs holds arguments used to configure the InterPodAffinity plugin. -type InterPodAffinityArgs struct { - metav1.TypeMeta `json:",inline"` - - // HardPodAffinityWeight is the scoring weight for existing pods with a - // matching hard affinity to the incoming pod. - HardPodAffinityWeight *int32 `json:"hardPodAffinityWeight,omitempty"` - - // IgnorePreferredTermsOfExistingPods configures the scheduler to ignore existing pods' preferred affinity - // rules when scoring candidate nodes, unless the incoming pod has inter-pod affinities. - IgnorePreferredTermsOfExistingPods bool `json:"ignorePreferredTermsOfExistingPods"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// NodeResourcesFitArgs holds arguments used to configure the NodeResourcesFit plugin. -type NodeResourcesFitArgs struct { - metav1.TypeMeta `json:",inline"` - - // IgnoredResources is the list of resources that NodeResources fit filter - // should ignore. This doesn't apply to scoring. - // +listType=atomic - IgnoredResources []string `json:"ignoredResources,omitempty"` - // IgnoredResourceGroups defines the list of resource groups that NodeResources fit filter should ignore. - // e.g. if group is ["example.com"], it will ignore all resource names that begin - // with "example.com", such as "example.com/aaa" and "example.com/bbb". - // A resource group name can't contain '/'. This doesn't apply to scoring. - // +listType=atomic - IgnoredResourceGroups []string `json:"ignoredResourceGroups,omitempty"` - - // ScoringStrategy selects the node resource scoring strategy. - // The default strategy is LeastAllocated with an equal "cpu" and "memory" weight. - ScoringStrategy *ScoringStrategy `json:"scoringStrategy,omitempty"` -} - -// PodTopologySpreadConstraintsDefaulting defines how to set default constraints -// for the PodTopologySpread plugin. -type PodTopologySpreadConstraintsDefaulting string - -const ( - // SystemDefaulting instructs to use the kubernetes defined default. - SystemDefaulting PodTopologySpreadConstraintsDefaulting = "System" - // ListDefaulting instructs to use the config provided default. - ListDefaulting PodTopologySpreadConstraintsDefaulting = "List" -) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodTopologySpreadArgs holds arguments used to configure the PodTopologySpread plugin. -type PodTopologySpreadArgs struct { - metav1.TypeMeta `json:",inline"` - - // DefaultConstraints defines topology spread constraints to be applied to - // Pods that don't define any in `pod.spec.topologySpreadConstraints`. - // `.defaultConstraints[*].labelSelectors` must be empty, as they are - // deduced from the Pod's membership to Services, ReplicationControllers, - // ReplicaSets or StatefulSets. - // When not empty, .defaultingType must be "List". - // +optional - // +listType=atomic - DefaultConstraints []corev1.TopologySpreadConstraint `json:"defaultConstraints,omitempty"` - - // DefaultingType determines how .defaultConstraints are deduced. Can be one - // of "System" or "List". - // - // - "System": Use kubernetes defined constraints that spread Pods among - // Nodes and Zones. - // - "List": Use constraints defined in .defaultConstraints. - // - // Defaults to "System". - // +optional - DefaultingType PodTopologySpreadConstraintsDefaulting `json:"defaultingType,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// NodeResourcesBalancedAllocationArgs holds arguments used to configure NodeResourcesBalancedAllocation plugin. -type NodeResourcesBalancedAllocationArgs struct { - metav1.TypeMeta `json:",inline"` - - // Resources to be managed, the default is "cpu" and "memory" if not specified. - // +listType=map - // +listMapKey=name - Resources []ResourceSpec `json:"resources,omitempty"` -} - -// UtilizationShapePoint represents single point of priority function shape. -type UtilizationShapePoint struct { - // Utilization (x axis). Valid values are 0 to 100. Fully utilized node maps to 100. - Utilization int32 `json:"utilization"` - // Score assigned to given utilization (y axis). Valid values are 0 to 10. - Score int32 `json:"score"` -} - -// ResourceSpec represents a single resource. -type ResourceSpec struct { - // Name of the resource. - Name string `json:"name"` - // Weight of the resource. - Weight int64 `json:"weight,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// VolumeBindingArgs holds arguments used to configure the VolumeBinding plugin. -type VolumeBindingArgs struct { - metav1.TypeMeta `json:",inline"` - - // BindTimeoutSeconds is the timeout in seconds in volume binding operation. - // Value must be non-negative integer. The value zero indicates no waiting. - // If this value is nil, the default value (600) will be used. - BindTimeoutSeconds *int64 `json:"bindTimeoutSeconds,omitempty"` - - // Shape specifies the points defining the score function shape, which is - // used to score nodes based on the utilization of statically provisioned - // PVs. The utilization is calculated by dividing the total requested - // storage of the pod by the total capacity of feasible PVs on each node. - // Each point contains utilization (ranges from 0 to 100) and its - // associated score (ranges from 0 to 10). You can turn the priority by - // specifying different scores for different utilization numbers. - // The default shape points are: - // 1) 0 for 0 utilization - // 2) 10 for 100 utilization - // All points must be sorted in increasing order by utilization. - // +featureGate=VolumeCapacityPriority - // +optional - // +listType=atomic - Shape []UtilizationShapePoint `json:"shape,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// NodeAffinityArgs holds arguments to configure the NodeAffinity plugin. -type NodeAffinityArgs struct { - metav1.TypeMeta `json:",inline"` - - // AddedAffinity is applied to all Pods additionally to the NodeAffinity - // specified in the PodSpec. That is, Nodes need to satisfy AddedAffinity - // AND .spec.NodeAffinity. AddedAffinity is empty by default (all Nodes - // match). - // When AddedAffinity is used, some Pods with affinity requirements that match - // a specific Node (such as Daemonset Pods) might remain unschedulable. - // +optional - AddedAffinity *corev1.NodeAffinity `json:"addedAffinity,omitempty"` -} - -// ScoringStrategyType the type of scoring strategy used in NodeResourcesFit plugin. -type ScoringStrategyType string - -const ( - // LeastAllocated strategy prioritizes nodes with least allocated resources. - LeastAllocated ScoringStrategyType = "LeastAllocated" - // MostAllocated strategy prioritizes nodes with most allocated resources. - MostAllocated ScoringStrategyType = "MostAllocated" - // RequestedToCapacityRatio strategy allows specifying a custom shape function - // to score nodes based on the request to capacity ratio. - RequestedToCapacityRatio ScoringStrategyType = "RequestedToCapacityRatio" -) - -// ScoringStrategy define ScoringStrategyType for node resource plugin -type ScoringStrategy struct { - // Type selects which strategy to run. - Type ScoringStrategyType `json:"type,omitempty"` - - // Resources to consider when scoring. - // The default resource set includes "cpu" and "memory" with an equal weight. - // Allowed weights go from 1 to 100. - // Weight defaults to 1 if not specified or explicitly set to 0. - // +listType=map - // +listMapKey=topologyKey - Resources []ResourceSpec `json:"resources,omitempty"` - - // Arguments specific to RequestedToCapacityRatio strategy. - RequestedToCapacityRatio *RequestedToCapacityRatioParam `json:"requestedToCapacityRatio,omitempty"` -} - -// RequestedToCapacityRatioParam define RequestedToCapacityRatio parameters -type RequestedToCapacityRatioParam struct { - // Shape is a list of points defining the scoring function shape. - // +listType=atomic - Shape []UtilizationShapePoint `json:"shape,omitempty"` -} diff --git a/cluster-autoscaler/vendor/k8s.io/kube-scheduler/config/v1beta2/zz_generated.deepcopy.go b/cluster-autoscaler/vendor/k8s.io/kube-scheduler/config/v1beta2/zz_generated.deepcopy.go deleted file mode 100644 index 7ffacf0f3da6..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kube-scheduler/config/v1beta2/zz_generated.deepcopy.go +++ /dev/null @@ -1,614 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1beta2 - -import ( - v1 "k8s.io/api/core/v1" - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DefaultPreemptionArgs) DeepCopyInto(out *DefaultPreemptionArgs) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.MinCandidateNodesPercentage != nil { - in, out := &in.MinCandidateNodesPercentage, &out.MinCandidateNodesPercentage - *out = new(int32) - **out = **in - } - if in.MinCandidateNodesAbsolute != nil { - in, out := &in.MinCandidateNodesAbsolute, &out.MinCandidateNodesAbsolute - *out = new(int32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultPreemptionArgs. -func (in *DefaultPreemptionArgs) DeepCopy() *DefaultPreemptionArgs { - if in == nil { - return nil - } - out := new(DefaultPreemptionArgs) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *DefaultPreemptionArgs) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Extender) DeepCopyInto(out *Extender) { - *out = *in - if in.TLSConfig != nil { - in, out := &in.TLSConfig, &out.TLSConfig - *out = new(ExtenderTLSConfig) - (*in).DeepCopyInto(*out) - } - out.HTTPTimeout = in.HTTPTimeout - if in.ManagedResources != nil { - in, out := &in.ManagedResources, &out.ManagedResources - *out = make([]ExtenderManagedResource, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Extender. -func (in *Extender) DeepCopy() *Extender { - if in == nil { - return nil - } - out := new(Extender) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ExtenderManagedResource) DeepCopyInto(out *ExtenderManagedResource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtenderManagedResource. -func (in *ExtenderManagedResource) DeepCopy() *ExtenderManagedResource { - if in == nil { - return nil - } - out := new(ExtenderManagedResource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ExtenderTLSConfig) DeepCopyInto(out *ExtenderTLSConfig) { - *out = *in - if in.CertData != nil { - in, out := &in.CertData, &out.CertData - *out = make([]byte, len(*in)) - copy(*out, *in) - } - if in.KeyData != nil { - in, out := &in.KeyData, &out.KeyData - *out = make([]byte, len(*in)) - copy(*out, *in) - } - if in.CAData != nil { - in, out := &in.CAData, &out.CAData - *out = make([]byte, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtenderTLSConfig. -func (in *ExtenderTLSConfig) DeepCopy() *ExtenderTLSConfig { - if in == nil { - return nil - } - out := new(ExtenderTLSConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *InterPodAffinityArgs) DeepCopyInto(out *InterPodAffinityArgs) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.HardPodAffinityWeight != nil { - in, out := &in.HardPodAffinityWeight, &out.HardPodAffinityWeight - *out = new(int32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InterPodAffinityArgs. -func (in *InterPodAffinityArgs) DeepCopy() *InterPodAffinityArgs { - if in == nil { - return nil - } - out := new(InterPodAffinityArgs) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *InterPodAffinityArgs) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubeSchedulerConfiguration) DeepCopyInto(out *KubeSchedulerConfiguration) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.Parallelism != nil { - in, out := &in.Parallelism, &out.Parallelism - *out = new(int32) - **out = **in - } - in.LeaderElection.DeepCopyInto(&out.LeaderElection) - out.ClientConnection = in.ClientConnection - if in.HealthzBindAddress != nil { - in, out := &in.HealthzBindAddress, &out.HealthzBindAddress - *out = new(string) - **out = **in - } - if in.MetricsBindAddress != nil { - in, out := &in.MetricsBindAddress, &out.MetricsBindAddress - *out = new(string) - **out = **in - } - in.DebuggingConfiguration.DeepCopyInto(&out.DebuggingConfiguration) - if in.PercentageOfNodesToScore != nil { - in, out := &in.PercentageOfNodesToScore, &out.PercentageOfNodesToScore - *out = new(int32) - **out = **in - } - if in.PodInitialBackoffSeconds != nil { - in, out := &in.PodInitialBackoffSeconds, &out.PodInitialBackoffSeconds - *out = new(int64) - **out = **in - } - if in.PodMaxBackoffSeconds != nil { - in, out := &in.PodMaxBackoffSeconds, &out.PodMaxBackoffSeconds - *out = new(int64) - **out = **in - } - if in.Profiles != nil { - in, out := &in.Profiles, &out.Profiles - *out = make([]KubeSchedulerProfile, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Extenders != nil { - in, out := &in.Extenders, &out.Extenders - *out = make([]Extender, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeSchedulerConfiguration. -func (in *KubeSchedulerConfiguration) DeepCopy() *KubeSchedulerConfiguration { - if in == nil { - return nil - } - out := new(KubeSchedulerConfiguration) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *KubeSchedulerConfiguration) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubeSchedulerProfile) DeepCopyInto(out *KubeSchedulerProfile) { - *out = *in - if in.SchedulerName != nil { - in, out := &in.SchedulerName, &out.SchedulerName - *out = new(string) - **out = **in - } - if in.Plugins != nil { - in, out := &in.Plugins, &out.Plugins - *out = new(Plugins) - (*in).DeepCopyInto(*out) - } - if in.PluginConfig != nil { - in, out := &in.PluginConfig, &out.PluginConfig - *out = make([]PluginConfig, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeSchedulerProfile. -func (in *KubeSchedulerProfile) DeepCopy() *KubeSchedulerProfile { - if in == nil { - return nil - } - out := new(KubeSchedulerProfile) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeAffinityArgs) DeepCopyInto(out *NodeAffinityArgs) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.AddedAffinity != nil { - in, out := &in.AddedAffinity, &out.AddedAffinity - *out = new(v1.NodeAffinity) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeAffinityArgs. -func (in *NodeAffinityArgs) DeepCopy() *NodeAffinityArgs { - if in == nil { - return nil - } - out := new(NodeAffinityArgs) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NodeAffinityArgs) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeResourcesBalancedAllocationArgs) DeepCopyInto(out *NodeResourcesBalancedAllocationArgs) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.Resources != nil { - in, out := &in.Resources, &out.Resources - *out = make([]ResourceSpec, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeResourcesBalancedAllocationArgs. -func (in *NodeResourcesBalancedAllocationArgs) DeepCopy() *NodeResourcesBalancedAllocationArgs { - if in == nil { - return nil - } - out := new(NodeResourcesBalancedAllocationArgs) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NodeResourcesBalancedAllocationArgs) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeResourcesFitArgs) DeepCopyInto(out *NodeResourcesFitArgs) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.IgnoredResources != nil { - in, out := &in.IgnoredResources, &out.IgnoredResources - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.IgnoredResourceGroups != nil { - in, out := &in.IgnoredResourceGroups, &out.IgnoredResourceGroups - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.ScoringStrategy != nil { - in, out := &in.ScoringStrategy, &out.ScoringStrategy - *out = new(ScoringStrategy) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeResourcesFitArgs. -func (in *NodeResourcesFitArgs) DeepCopy() *NodeResourcesFitArgs { - if in == nil { - return nil - } - out := new(NodeResourcesFitArgs) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NodeResourcesFitArgs) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Plugin) DeepCopyInto(out *Plugin) { - *out = *in - if in.Weight != nil { - in, out := &in.Weight, &out.Weight - *out = new(int32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Plugin. -func (in *Plugin) DeepCopy() *Plugin { - if in == nil { - return nil - } - out := new(Plugin) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PluginConfig) DeepCopyInto(out *PluginConfig) { - *out = *in - in.Args.DeepCopyInto(&out.Args) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginConfig. -func (in *PluginConfig) DeepCopy() *PluginConfig { - if in == nil { - return nil - } - out := new(PluginConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PluginSet) DeepCopyInto(out *PluginSet) { - *out = *in - if in.Enabled != nil { - in, out := &in.Enabled, &out.Enabled - *out = make([]Plugin, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Disabled != nil { - in, out := &in.Disabled, &out.Disabled - *out = make([]Plugin, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginSet. -func (in *PluginSet) DeepCopy() *PluginSet { - if in == nil { - return nil - } - out := new(PluginSet) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Plugins) DeepCopyInto(out *Plugins) { - *out = *in - in.PreEnqueue.DeepCopyInto(&out.PreEnqueue) - in.QueueSort.DeepCopyInto(&out.QueueSort) - in.PreFilter.DeepCopyInto(&out.PreFilter) - in.Filter.DeepCopyInto(&out.Filter) - in.PostFilter.DeepCopyInto(&out.PostFilter) - in.PreScore.DeepCopyInto(&out.PreScore) - in.Score.DeepCopyInto(&out.Score) - in.Reserve.DeepCopyInto(&out.Reserve) - in.Permit.DeepCopyInto(&out.Permit) - in.PreBind.DeepCopyInto(&out.PreBind) - in.Bind.DeepCopyInto(&out.Bind) - in.PostBind.DeepCopyInto(&out.PostBind) - in.MultiPoint.DeepCopyInto(&out.MultiPoint) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Plugins. -func (in *Plugins) DeepCopy() *Plugins { - if in == nil { - return nil - } - out := new(Plugins) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodTopologySpreadArgs) DeepCopyInto(out *PodTopologySpreadArgs) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.DefaultConstraints != nil { - in, out := &in.DefaultConstraints, &out.DefaultConstraints - *out = make([]v1.TopologySpreadConstraint, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodTopologySpreadArgs. -func (in *PodTopologySpreadArgs) DeepCopy() *PodTopologySpreadArgs { - if in == nil { - return nil - } - out := new(PodTopologySpreadArgs) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodTopologySpreadArgs) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RequestedToCapacityRatioParam) DeepCopyInto(out *RequestedToCapacityRatioParam) { - *out = *in - if in.Shape != nil { - in, out := &in.Shape, &out.Shape - *out = make([]UtilizationShapePoint, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestedToCapacityRatioParam. -func (in *RequestedToCapacityRatioParam) DeepCopy() *RequestedToCapacityRatioParam { - if in == nil { - return nil - } - out := new(RequestedToCapacityRatioParam) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceSpec) DeepCopyInto(out *ResourceSpec) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSpec. -func (in *ResourceSpec) DeepCopy() *ResourceSpec { - if in == nil { - return nil - } - out := new(ResourceSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ScoringStrategy) DeepCopyInto(out *ScoringStrategy) { - *out = *in - if in.Resources != nil { - in, out := &in.Resources, &out.Resources - *out = make([]ResourceSpec, len(*in)) - copy(*out, *in) - } - if in.RequestedToCapacityRatio != nil { - in, out := &in.RequestedToCapacityRatio, &out.RequestedToCapacityRatio - *out = new(RequestedToCapacityRatioParam) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScoringStrategy. -func (in *ScoringStrategy) DeepCopy() *ScoringStrategy { - if in == nil { - return nil - } - out := new(ScoringStrategy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *UtilizationShapePoint) DeepCopyInto(out *UtilizationShapePoint) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UtilizationShapePoint. -func (in *UtilizationShapePoint) DeepCopy() *UtilizationShapePoint { - if in == nil { - return nil - } - out := new(UtilizationShapePoint) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeBindingArgs) DeepCopyInto(out *VolumeBindingArgs) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.BindTimeoutSeconds != nil { - in, out := &in.BindTimeoutSeconds, &out.BindTimeoutSeconds - *out = new(int64) - **out = **in - } - if in.Shape != nil { - in, out := &in.Shape, &out.Shape - *out = make([]UtilizationShapePoint, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeBindingArgs. -func (in *VolumeBindingArgs) DeepCopy() *VolumeBindingArgs { - if in == nil { - return nil - } - out := new(VolumeBindingArgs) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *VolumeBindingArgs) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubelet/config/v1beta1/types.go b/cluster-autoscaler/vendor/k8s.io/kubelet/config/v1beta1/types.go index 68cafdb1a94b..16f4831cd01d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubelet/config/v1beta1/types.go +++ b/cluster-autoscaler/vendor/k8s.io/kubelet/config/v1beta1/types.go @@ -150,6 +150,7 @@ type KubeletConfiguration struct { // +optional TLSPrivateKeyFile string `json:"tlsPrivateKeyFile,omitempty"` // tlsCipherSuites is the list of allowed cipher suites for the server. + // Note that TLS 1.3 ciphersuites are not configurable. // Values are from tls package constants (https://golang.org/pkg/crypto/tls/#pkg-constants). // Default: nil // +optional diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cri/streaming/.import-restrictions b/cluster-autoscaler/vendor/k8s.io/kubelet/pkg/cri/streaming/.import-restrictions similarity index 100% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cri/streaming/.import-restrictions rename to cluster-autoscaler/vendor/k8s.io/kubelet/pkg/cri/streaming/.import-restrictions diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cri/streaming/errors.go b/cluster-autoscaler/vendor/k8s.io/kubelet/pkg/cri/streaming/errors.go similarity index 100% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cri/streaming/errors.go rename to cluster-autoscaler/vendor/k8s.io/kubelet/pkg/cri/streaming/errors.go diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cri/streaming/portforward/constants.go b/cluster-autoscaler/vendor/k8s.io/kubelet/pkg/cri/streaming/portforward/constants.go similarity index 100% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cri/streaming/portforward/constants.go rename to cluster-autoscaler/vendor/k8s.io/kubelet/pkg/cri/streaming/portforward/constants.go diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cri/streaming/portforward/httpstream.go b/cluster-autoscaler/vendor/k8s.io/kubelet/pkg/cri/streaming/portforward/httpstream.go similarity index 100% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cri/streaming/portforward/httpstream.go rename to cluster-autoscaler/vendor/k8s.io/kubelet/pkg/cri/streaming/portforward/httpstream.go diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cri/streaming/portforward/portforward.go b/cluster-autoscaler/vendor/k8s.io/kubelet/pkg/cri/streaming/portforward/portforward.go similarity index 100% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cri/streaming/portforward/portforward.go rename to cluster-autoscaler/vendor/k8s.io/kubelet/pkg/cri/streaming/portforward/portforward.go diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cri/streaming/portforward/websocket.go b/cluster-autoscaler/vendor/k8s.io/kubelet/pkg/cri/streaming/portforward/websocket.go similarity index 100% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cri/streaming/portforward/websocket.go rename to cluster-autoscaler/vendor/k8s.io/kubelet/pkg/cri/streaming/portforward/websocket.go diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cri/streaming/remotecommand/attach.go b/cluster-autoscaler/vendor/k8s.io/kubelet/pkg/cri/streaming/remotecommand/attach.go similarity index 100% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cri/streaming/remotecommand/attach.go rename to cluster-autoscaler/vendor/k8s.io/kubelet/pkg/cri/streaming/remotecommand/attach.go diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cri/streaming/remotecommand/doc.go b/cluster-autoscaler/vendor/k8s.io/kubelet/pkg/cri/streaming/remotecommand/doc.go similarity index 100% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cri/streaming/remotecommand/doc.go rename to cluster-autoscaler/vendor/k8s.io/kubelet/pkg/cri/streaming/remotecommand/doc.go diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cri/streaming/remotecommand/exec.go b/cluster-autoscaler/vendor/k8s.io/kubelet/pkg/cri/streaming/remotecommand/exec.go similarity index 100% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cri/streaming/remotecommand/exec.go rename to cluster-autoscaler/vendor/k8s.io/kubelet/pkg/cri/streaming/remotecommand/exec.go diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cri/streaming/remotecommand/httpstream.go b/cluster-autoscaler/vendor/k8s.io/kubelet/pkg/cri/streaming/remotecommand/httpstream.go similarity index 100% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cri/streaming/remotecommand/httpstream.go rename to cluster-autoscaler/vendor/k8s.io/kubelet/pkg/cri/streaming/remotecommand/httpstream.go diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cri/streaming/remotecommand/websocket.go b/cluster-autoscaler/vendor/k8s.io/kubelet/pkg/cri/streaming/remotecommand/websocket.go similarity index 100% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cri/streaming/remotecommand/websocket.go rename to cluster-autoscaler/vendor/k8s.io/kubelet/pkg/cri/streaming/remotecommand/websocket.go diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cri/streaming/request_cache.go b/cluster-autoscaler/vendor/k8s.io/kubelet/pkg/cri/streaming/request_cache.go similarity index 100% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cri/streaming/request_cache.go rename to cluster-autoscaler/vendor/k8s.io/kubelet/pkg/cri/streaming/request_cache.go diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cri/streaming/server.go b/cluster-autoscaler/vendor/k8s.io/kubelet/pkg/cri/streaming/server.go similarity index 98% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cri/streaming/server.go rename to cluster-autoscaler/vendor/k8s.io/kubelet/pkg/cri/streaming/server.go index 3e989d8aee97..fe5c22b04979 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cri/streaming/server.go +++ b/cluster-autoscaler/vendor/k8s.io/kubelet/pkg/cri/streaming/server.go @@ -36,8 +36,8 @@ import ( remotecommandconsts "k8s.io/apimachinery/pkg/util/remotecommand" "k8s.io/client-go/tools/remotecommand" runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" - "k8s.io/kubernetes/pkg/kubelet/cri/streaming/portforward" - remotecommandserver "k8s.io/kubernetes/pkg/kubelet/cri/streaming/remotecommand" + "k8s.io/kubelet/pkg/cri/streaming/portforward" + remotecommandserver "k8s.io/kubelet/pkg/cri/streaming/remotecommand" ) // Server is the library interface to serve the stream requests. diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/conntrack.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/conntrack.go deleted file mode 100644 index 4ff3c383735a..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/conntrack.go +++ /dev/null @@ -1,144 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package app - -import ( - "errors" - "os" - "strconv" - "strings" - - "k8s.io/klog/v2" - "k8s.io/mount-utils" - - "k8s.io/component-helpers/node/util/sysctl" -) - -// Conntracker is an interface to the global sysctl. Descriptions of the various -// sysctl fields can be found here: -// -// https://www.kernel.org/doc/Documentation/networking/nf_conntrack-sysctl.txt -type Conntracker interface { - // SetMax adjusts nf_conntrack_max. - SetMax(max int) error - // SetTCPEstablishedTimeout adjusts nf_conntrack_tcp_timeout_established. - SetTCPEstablishedTimeout(seconds int) error - // SetTCPCloseWaitTimeout nf_conntrack_tcp_timeout_close_wait. - SetTCPCloseWaitTimeout(seconds int) error -} - -type realConntracker struct{} - -var errReadOnlySysFS = errors.New("readOnlySysFS") - -func (rct realConntracker) SetMax(max int) error { - if err := rct.setIntSysCtl("nf_conntrack_max", max); err != nil { - return err - } - klog.InfoS("Setting nf_conntrack_max", "nfConntrackMax", max) - - // Linux does not support writing to /sys/module/nf_conntrack/parameters/hashsize - // when the writer process is not in the initial network namespace - // (https://github.com/torvalds/linux/blob/v4.10/net/netfilter/nf_conntrack_core.c#L1795-L1796). - // Usually that's fine. But in some configurations such as with github.com/kinvolk/kubeadm-nspawn, - // kube-proxy is in another netns. - // Therefore, check if writing in hashsize is necessary and skip the writing if not. - hashsize, err := readIntStringFile("/sys/module/nf_conntrack/parameters/hashsize") - if err != nil { - return err - } - if hashsize >= (max / 4) { - return nil - } - - // sysfs is expected to be mounted as 'rw'. However, it may be - // unexpectedly mounted as 'ro' by docker because of a known docker - // issue (https://github.com/docker/docker/issues/24000). Setting - // conntrack will fail when sysfs is readonly. When that happens, we - // don't set conntrack hashsize and return a special error - // errReadOnlySysFS here. The caller should deal with - // errReadOnlySysFS differently. - writable, err := isSysFSWritable() - if err != nil { - return err - } - if !writable { - return errReadOnlySysFS - } - // TODO: generify this and sysctl to a new sysfs.WriteInt() - klog.InfoS("Setting conntrack hashsize", "conntrackHashsize", max/4) - return writeIntStringFile("/sys/module/nf_conntrack/parameters/hashsize", max/4) -} - -func (rct realConntracker) SetTCPEstablishedTimeout(seconds int) error { - return rct.setIntSysCtl("nf_conntrack_tcp_timeout_established", seconds) -} - -func (rct realConntracker) SetTCPCloseWaitTimeout(seconds int) error { - return rct.setIntSysCtl("nf_conntrack_tcp_timeout_close_wait", seconds) -} - -func (realConntracker) setIntSysCtl(name string, value int) error { - entry := "net/netfilter/" + name - - sys := sysctl.New() - if val, _ := sys.GetSysctl(entry); val != value && val < value { - klog.InfoS("Set sysctl", "entry", entry, "value", value) - if err := sys.SetSysctl(entry, value); err != nil { - return err - } - } - return nil -} - -// isSysFSWritable checks /proc/mounts to see whether sysfs is 'rw' or not. -func isSysFSWritable() (bool, error) { - const permWritable = "rw" - const sysfsDevice = "sysfs" - m := mount.New("" /* default mount path */) - mountPoints, err := m.List() - if err != nil { - klog.ErrorS(err, "Failed to list mount points") - return false, err - } - - for _, mountPoint := range mountPoints { - if mountPoint.Type != sysfsDevice { - continue - } - // Check whether sysfs is 'rw' - if len(mountPoint.Opts) > 0 && mountPoint.Opts[0] == permWritable { - return true, nil - } - klog.ErrorS(nil, "Sysfs is not writable", "mountPoint", mountPoint, "mountOptions", mountPoint.Opts) - return false, errReadOnlySysFS - } - - return false, errors.New("no sysfs mounted") -} - -func readIntStringFile(filename string) (int, error) { - b, err := os.ReadFile(filename) - if err != nil { - return -1, err - } - return strconv.Atoi(strings.TrimSpace(string(b))) -} - -func writeIntStringFile(filename string, value int) error { - return os.WriteFile(filename, []byte(strconv.Itoa(value)), 0640) -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/init_others.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/init_others.go deleted file mode 100644 index cee69f1a0eb2..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/init_others.go +++ /dev/null @@ -1,31 +0,0 @@ -//go:build !windows -// +build !windows - -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package app - -import ( - "github.com/spf13/pflag" -) - -func initForOS(service bool) error { - return nil -} - -func (o *Options) addOSFlags(fs *pflag.FlagSet) { -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/init_windows.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/init_windows.go deleted file mode 100644 index 32ed6dc7fe0a..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/init_windows.go +++ /dev/null @@ -1,46 +0,0 @@ -//go:build windows -// +build windows - -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package app - -import ( - "k8s.io/kubernetes/pkg/windows/service" - - "github.com/spf13/pflag" -) - -const ( - serviceName = "kube-proxy" -) - -func initForOS(windowsService bool) error { - if windowsService { - return service.InitService(serviceName) - } - return nil -} - -func (o *Options) addOSFlags(fs *pflag.FlagSet) { - fs.BoolVar(&o.WindowsService, "windows-service", o.WindowsService, "Enable Windows Service Control Manager API integration") - fs.StringVar(&o.config.Winkernel.SourceVip, "source-vip", o.config.Winkernel.SourceVip, "The IP address of the source VIP for non-DSR.") - fs.StringVar(&o.config.Winkernel.NetworkName, "network-name", o.config.Winkernel.NetworkName, "The name of the cluster network.") - fs.BoolVar(&o.config.Winkernel.EnableDSR, "enable-dsr", o.config.Winkernel.EnableDSR, "If true make kube-proxy apply DSR policies for service VIP") - fs.StringVar(&o.config.Winkernel.RootHnsEndpointName, "root-hnsendpoint-name", "cbr0", "The name of the hns endpoint name for root namespace attached to l2bridge") - fs.BoolVar(&o.config.Winkernel.ForwardHealthCheckVip, "forward-healthcheck-vip", o.config.Winkernel.ForwardHealthCheckVip, "If true forward service VIP for health check port") -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/server.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/server.go deleted file mode 100644 index 0f13dc72dc38..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/server.go +++ /dev/null @@ -1,842 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package app does all of the work necessary to configure and run a -// Kubernetes app process. -package app - -import ( - goflag "flag" - "fmt" - "net" - "net/http" - "os" - "strings" - "time" - - utilnode "k8s.io/kubernetes/pkg/util/node" - - "github.com/fsnotify/fsnotify" - "github.com/spf13/cobra" - "github.com/spf13/pflag" - - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/fields" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/serializer" - "k8s.io/apimachinery/pkg/selection" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/apiserver/pkg/server/healthz" - "k8s.io/apiserver/pkg/server/mux" - "k8s.io/apiserver/pkg/server/routes" - utilfeature "k8s.io/apiserver/pkg/util/feature" - "k8s.io/client-go/informers" - clientset "k8s.io/client-go/kubernetes" - v1core "k8s.io/client-go/kubernetes/typed/core/v1" - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/clientcmd" - clientcmdapi "k8s.io/client-go/tools/clientcmd/api" - "k8s.io/client-go/tools/events" - cliflag "k8s.io/component-base/cli/flag" - componentbaseconfig "k8s.io/component-base/config" - "k8s.io/component-base/configz" - "k8s.io/component-base/logs" - logsapi "k8s.io/component-base/logs/api/v1" - metricsfeatures "k8s.io/component-base/metrics/features" - "k8s.io/component-base/metrics/legacyregistry" - "k8s.io/component-base/metrics/prometheus/slis" - "k8s.io/component-base/version" - "k8s.io/component-base/version/verflag" - "k8s.io/klog/v2" - "k8s.io/kube-proxy/config/v1alpha1" - api "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/cluster/ports" - "k8s.io/kubernetes/pkg/kubelet/qos" - "k8s.io/kubernetes/pkg/proxy" - "k8s.io/kubernetes/pkg/proxy/apis" - kubeproxyconfig "k8s.io/kubernetes/pkg/proxy/apis/config" - proxyconfigscheme "k8s.io/kubernetes/pkg/proxy/apis/config/scheme" - kubeproxyconfigv1alpha1 "k8s.io/kubernetes/pkg/proxy/apis/config/v1alpha1" - "k8s.io/kubernetes/pkg/proxy/apis/config/validation" - "k8s.io/kubernetes/pkg/proxy/config" - "k8s.io/kubernetes/pkg/proxy/healthcheck" - proxyutil "k8s.io/kubernetes/pkg/proxy/util" - "k8s.io/kubernetes/pkg/util/filesystem" - utilflag "k8s.io/kubernetes/pkg/util/flag" - utilipset "k8s.io/kubernetes/pkg/util/ipset" - utiliptables "k8s.io/kubernetes/pkg/util/iptables" - utilipvs "k8s.io/kubernetes/pkg/util/ipvs" - "k8s.io/kubernetes/pkg/util/oom" - "k8s.io/utils/exec" - netutils "k8s.io/utils/net" - "k8s.io/utils/pointer" -) - -func init() { - utilruntime.Must(metricsfeatures.AddFeatureGates(utilfeature.DefaultMutableFeatureGate)) - logsapi.AddFeatureGates(utilfeature.DefaultMutableFeatureGate) -} - -// proxyRun defines the interface to run a specified ProxyServer -type proxyRun interface { - Run() error -} - -// Options contains everything necessary to create and run a proxy server. -type Options struct { - // ConfigFile is the location of the proxy server's configuration file. - ConfigFile string - // WriteConfigTo is the path where the default configuration will be written. - WriteConfigTo string - // CleanupAndExit, when true, makes the proxy server clean up iptables and ipvs rules, then exit. - CleanupAndExit bool - // WindowsService should be set to true if kube-proxy is running as a service on Windows. - // Its corresponding flag only gets registered in Windows builds - WindowsService bool - // config is the proxy server's configuration object. - config *kubeproxyconfig.KubeProxyConfiguration - // watcher is used to watch on the update change of ConfigFile - watcher filesystem.FSWatcher - // proxyServer is the interface to run the proxy server - proxyServer proxyRun - // errCh is the channel that errors will be sent - errCh chan error - - // The fields below here are placeholders for flags that can't be directly mapped into - // config.KubeProxyConfiguration. - // - // TODO remove these fields once the deprecated flags are removed. - - // master is used to override the kubeconfig's URL to the apiserver. - master string - // healthzPort is the port to be used by the healthz server. - healthzPort int32 - // metricsPort is the port to be used by the metrics server. - metricsPort int32 - - // hostnameOverride, if set from the command line flag, takes precedence over the `HostnameOverride` value from the config file - hostnameOverride string -} - -// AddFlags adds flags to fs and binds them to options. -func (o *Options) AddFlags(fs *pflag.FlagSet) { - o.addOSFlags(fs) - - fs.StringVar(&o.ConfigFile, "config", o.ConfigFile, "The path to the configuration file.") - fs.StringVar(&o.WriteConfigTo, "write-config-to", o.WriteConfigTo, "If set, write the default configuration values to this file and exit.") - fs.StringVar(&o.config.ClientConnection.Kubeconfig, "kubeconfig", o.config.ClientConnection.Kubeconfig, "Path to kubeconfig file with authorization information (the master location can be overridden by the master flag).") - fs.StringVar(&o.config.ClusterCIDR, "cluster-cidr", o.config.ClusterCIDR, "The CIDR range of pods in the cluster. When configured, traffic sent to a Service cluster IP from outside this range will be masqueraded and traffic sent from pods to an external LoadBalancer IP will be directed to the respective cluster IP instead. "+ - "For dual-stack clusters, a comma-separated list is accepted with at least one CIDR per IP family (IPv4 and IPv6). "+ - "This parameter is ignored if a config file is specified by --config.") - fs.StringVar(&o.config.ClientConnection.ContentType, "kube-api-content-type", o.config.ClientConnection.ContentType, "Content type of requests sent to apiserver.") - fs.StringVar(&o.master, "master", o.master, "The address of the Kubernetes API server (overrides any value in kubeconfig)") - fs.StringVar(&o.hostnameOverride, "hostname-override", o.hostnameOverride, "If non-empty, will use this string as identification instead of the actual hostname.") - fs.StringVar(&o.config.IPVS.Scheduler, "ipvs-scheduler", o.config.IPVS.Scheduler, "The ipvs scheduler type when proxy mode is ipvs") - fs.StringVar(&o.config.ShowHiddenMetricsForVersion, "show-hidden-metrics-for-version", o.config.ShowHiddenMetricsForVersion, - "The previous version for which you want to show hidden metrics. "+ - "Only the previous minor version is meaningful, other values will not be allowed. "+ - "The format is ., e.g.: '1.16'. "+ - "The purpose of this format is make sure you have the opportunity to notice if the next release hides additional metrics, "+ - "rather than being surprised when they are permanently removed in the release after that. "+ - "This parameter is ignored if a config file is specified by --config.") - - fs.StringSliceVar(&o.config.IPVS.ExcludeCIDRs, "ipvs-exclude-cidrs", o.config.IPVS.ExcludeCIDRs, "A comma-separated list of CIDR's which the ipvs proxier should not touch when cleaning up IPVS rules.") - fs.StringSliceVar(&o.config.NodePortAddresses, "nodeport-addresses", o.config.NodePortAddresses, - "A string slice of values which specify the addresses to use for NodePorts. Values may be valid IP blocks (e.g. 1.2.3.0/24, 1.2.3.4/32). The default empty string slice ([]) means to use all local addresses. This parameter is ignored if a config file is specified by --config.") - - fs.BoolVar(&o.CleanupAndExit, "cleanup", o.CleanupAndExit, "If true cleanup iptables and ipvs rules and exit.") - - fs.Var(&utilflag.IPVar{Val: &o.config.BindAddress}, "bind-address", "The IP address for the proxy server to serve on (set to '0.0.0.0' for all IPv4 interfaces and '::' for all IPv6 interfaces). This parameter is ignored if a config file is specified by --config.") - fs.Var(&utilflag.IPPortVar{Val: &o.config.HealthzBindAddress}, "healthz-bind-address", "The IP address with port for the health check server to serve on (set to '0.0.0.0:10256' for all IPv4 interfaces and '[::]:10256' for all IPv6 interfaces). Set empty to disable. This parameter is ignored if a config file is specified by --config.") - fs.Var(&utilflag.IPPortVar{Val: &o.config.MetricsBindAddress}, "metrics-bind-address", "The IP address with port for the metrics server to serve on (set to '0.0.0.0:10249' for all IPv4 interfaces and '[::]:10249' for all IPv6 interfaces). Set empty to disable. This parameter is ignored if a config file is specified by --config.") - fs.BoolVar(&o.config.BindAddressHardFail, "bind-address-hard-fail", o.config.BindAddressHardFail, "If true kube-proxy will treat failure to bind to a port as fatal and exit") - fs.Var(utilflag.PortRangeVar{Val: &o.config.PortRange}, "proxy-port-range", "Range of host ports (beginPort-endPort, single port or beginPort+offset, inclusive) that may be consumed in order to proxy service traffic. If (unspecified, 0, or 0-0) then ports will be randomly chosen.") - fs.Var(&o.config.Mode, "proxy-mode", "Which proxy mode to use: on Linux this can be 'iptables' (default) or 'ipvs'. On Windows the only supported value is 'kernelspace'."+ - "This parameter is ignored if a config file is specified by --config.") - fs.Var(cliflag.NewMapStringBool(&o.config.FeatureGates), "feature-gates", "A set of key=value pairs that describe feature gates for alpha/experimental features. "+ - "Options are:\n"+strings.Join(utilfeature.DefaultFeatureGate.KnownFeatures(), "\n")+"\n"+ - "This parameter is ignored if a config file is specified by --config.") - - fs.Int32Var(&o.healthzPort, "healthz-port", o.healthzPort, "The port to bind the health check server. Use 0 to disable.") - fs.MarkDeprecated("healthz-port", "This flag is deprecated and will be removed in a future release. Please use --healthz-bind-address instead.") - fs.Int32Var(&o.metricsPort, "metrics-port", o.metricsPort, "The port to bind the metrics server. Use 0 to disable.") - fs.MarkDeprecated("metrics-port", "This flag is deprecated and will be removed in a future release. Please use --metrics-bind-address instead.") - fs.Int32Var(o.config.OOMScoreAdj, "oom-score-adj", pointer.Int32Deref(o.config.OOMScoreAdj, int32(qos.KubeProxyOOMScoreAdj)), "The oom-score-adj value for kube-proxy process. Values must be within the range [-1000, 1000]. This parameter is ignored if a config file is specified by --config.") - fs.Int32Var(o.config.IPTables.MasqueradeBit, "iptables-masquerade-bit", pointer.Int32Deref(o.config.IPTables.MasqueradeBit, 14), "If using the pure iptables proxy, the bit of the fwmark space to mark packets requiring SNAT with. Must be within the range [0, 31].") - fs.Int32Var(o.config.Conntrack.MaxPerCore, "conntrack-max-per-core", *o.config.Conntrack.MaxPerCore, - "Maximum number of NAT connections to track per CPU core (0 to leave the limit as-is and ignore conntrack-min).") - fs.Int32Var(o.config.Conntrack.Min, "conntrack-min", *o.config.Conntrack.Min, - "Minimum number of conntrack entries to allocate, regardless of conntrack-max-per-core (set conntrack-max-per-core=0 to leave the limit as-is).") - fs.Int32Var(&o.config.ClientConnection.Burst, "kube-api-burst", o.config.ClientConnection.Burst, "Burst to use while talking with kubernetes apiserver") - - fs.DurationVar(&o.config.IPTables.SyncPeriod.Duration, "iptables-sync-period", o.config.IPTables.SyncPeriod.Duration, "The maximum interval of how often iptables rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0.") - fs.DurationVar(&o.config.IPTables.MinSyncPeriod.Duration, "iptables-min-sync-period", o.config.IPTables.MinSyncPeriod.Duration, "The minimum interval of how often the iptables rules can be refreshed as endpoints and services change (e.g. '5s', '1m', '2h22m').") - fs.DurationVar(&o.config.IPVS.SyncPeriod.Duration, "ipvs-sync-period", o.config.IPVS.SyncPeriod.Duration, "The maximum interval of how often ipvs rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0.") - fs.DurationVar(&o.config.IPVS.MinSyncPeriod.Duration, "ipvs-min-sync-period", o.config.IPVS.MinSyncPeriod.Duration, "The minimum interval of how often the ipvs rules can be refreshed as endpoints and services change (e.g. '5s', '1m', '2h22m').") - fs.DurationVar(&o.config.IPVS.TCPTimeout.Duration, "ipvs-tcp-timeout", o.config.IPVS.TCPTimeout.Duration, "The timeout for idle IPVS TCP connections, 0 to leave as-is. (e.g. '5s', '1m', '2h22m').") - fs.DurationVar(&o.config.IPVS.TCPFinTimeout.Duration, "ipvs-tcpfin-timeout", o.config.IPVS.TCPFinTimeout.Duration, "The timeout for IPVS TCP connections after receiving a FIN packet, 0 to leave as-is. (e.g. '5s', '1m', '2h22m').") - fs.DurationVar(&o.config.IPVS.UDPTimeout.Duration, "ipvs-udp-timeout", o.config.IPVS.UDPTimeout.Duration, "The timeout for IPVS UDP packets, 0 to leave as-is. (e.g. '5s', '1m', '2h22m').") - fs.DurationVar(&o.config.Conntrack.TCPEstablishedTimeout.Duration, "conntrack-tcp-timeout-established", o.config.Conntrack.TCPEstablishedTimeout.Duration, "Idle timeout for established TCP connections (0 to leave as-is)") - fs.DurationVar( - &o.config.Conntrack.TCPCloseWaitTimeout.Duration, "conntrack-tcp-timeout-close-wait", - o.config.Conntrack.TCPCloseWaitTimeout.Duration, - "NAT timeout for TCP connections in the CLOSE_WAIT state") - fs.DurationVar(&o.config.ConfigSyncPeriod.Duration, "config-sync-period", o.config.ConfigSyncPeriod.Duration, "How often configuration from the apiserver is refreshed. Must be greater than 0.") - - fs.BoolVar(&o.config.IPVS.StrictARP, "ipvs-strict-arp", o.config.IPVS.StrictARP, "Enable strict ARP by setting arp_ignore to 1 and arp_announce to 2") - fs.BoolVar(&o.config.IPTables.MasqueradeAll, "masquerade-all", o.config.IPTables.MasqueradeAll, "If using the pure iptables proxy, SNAT all traffic sent via Service cluster IPs (this not commonly needed)") - fs.BoolVar(o.config.IPTables.LocalhostNodePorts, "iptables-localhost-nodeports", pointer.BoolDeref(o.config.IPTables.LocalhostNodePorts, true), "If false Kube-proxy will disable the legacy behavior of allowing NodePort services to be accessed via localhost, This only applies to iptables mode and ipv4.") - fs.BoolVar(&o.config.EnableProfiling, "profiling", o.config.EnableProfiling, "If true enables profiling via web interface on /debug/pprof handler. This parameter is ignored if a config file is specified by --config.") - - fs.Float32Var(&o.config.ClientConnection.QPS, "kube-api-qps", o.config.ClientConnection.QPS, "QPS to use while talking with kubernetes apiserver") - fs.Var(&o.config.DetectLocalMode, "detect-local-mode", "Mode to use to detect local traffic. This parameter is ignored if a config file is specified by --config.") - fs.StringVar(&o.config.DetectLocal.BridgeInterface, "pod-bridge-interface", o.config.DetectLocal.BridgeInterface, "A bridge interface name in the cluster. Kube-proxy considers traffic as local if originating from an interface which matches the value. This argument should be set if DetectLocalMode is set to BridgeInterface.") - fs.StringVar(&o.config.DetectLocal.InterfaceNamePrefix, "pod-interface-name-prefix", o.config.DetectLocal.InterfaceNamePrefix, "An interface prefix in the cluster. Kube-proxy considers traffic as local if originating from interfaces that match the given prefix. This argument should be set if DetectLocalMode is set to InterfaceNamePrefix.") -} - -// NewOptions returns initialized Options -func NewOptions() *Options { - return &Options{ - config: new(kubeproxyconfig.KubeProxyConfiguration), - healthzPort: ports.ProxyHealthzPort, - metricsPort: ports.ProxyStatusPort, - errCh: make(chan error), - } -} - -// Complete completes all the required options. -func (o *Options) Complete() error { - if len(o.ConfigFile) == 0 && len(o.WriteConfigTo) == 0 { - klog.InfoS("Warning, all flags other than --config, --write-config-to, and --cleanup are deprecated, please begin using a config file ASAP") - o.config.HealthzBindAddress = addressFromDeprecatedFlags(o.config.HealthzBindAddress, o.healthzPort) - o.config.MetricsBindAddress = addressFromDeprecatedFlags(o.config.MetricsBindAddress, o.metricsPort) - } - - // Load the config file here in Complete, so that Validate validates the fully-resolved config. - if len(o.ConfigFile) > 0 { - c, err := o.loadConfigFromFile(o.ConfigFile) - if err != nil { - return err - } - o.config = c - - if err := o.initWatcher(); err != nil { - return err - } - } - - if err := o.processHostnameOverrideFlag(); err != nil { - return err - } - - return utilfeature.DefaultMutableFeatureGate.SetFromMap(o.config.FeatureGates) -} - -// Creates a new filesystem watcher and adds watches for the config file. -func (o *Options) initWatcher() error { - fswatcher := filesystem.NewFsnotifyWatcher() - err := fswatcher.Init(o.eventHandler, o.errorHandler) - if err != nil { - return err - } - err = fswatcher.AddWatch(o.ConfigFile) - if err != nil { - return err - } - o.watcher = fswatcher - return nil -} - -func (o *Options) eventHandler(ent fsnotify.Event) { - if ent.Has(fsnotify.Write) || ent.Has(fsnotify.Rename) { - // error out when ConfigFile is updated - o.errCh <- fmt.Errorf("content of the proxy server's configuration file was updated") - return - } - o.errCh <- nil -} - -func (o *Options) errorHandler(err error) { - o.errCh <- err -} - -// processHostnameOverrideFlag processes hostname-override flag -func (o *Options) processHostnameOverrideFlag() error { - // Check if hostname-override flag is set and use value since configFile always overrides - if len(o.hostnameOverride) > 0 { - hostName := strings.TrimSpace(o.hostnameOverride) - if len(hostName) == 0 { - return fmt.Errorf("empty hostname-override is invalid") - } - o.config.HostnameOverride = strings.ToLower(hostName) - } - - return nil -} - -// Validate validates all the required options. -func (o *Options) Validate() error { - if errs := validation.Validate(o.config); len(errs) != 0 { - return errs.ToAggregate() - } - - return nil -} - -// Run runs the specified ProxyServer. -func (o *Options) Run() error { - defer close(o.errCh) - if len(o.WriteConfigTo) > 0 { - return o.writeConfigFile() - } - - if o.CleanupAndExit { - return cleanupAndExit() - } - - proxyServer, err := NewProxyServer(o) - if err != nil { - return err - } - - o.proxyServer = proxyServer - return o.runLoop() -} - -// runLoop will watch on the update change of the proxy server's configuration file. -// Return an error when updated -func (o *Options) runLoop() error { - if o.watcher != nil { - o.watcher.Run() - } - - // run the proxy in goroutine - go func() { - err := o.proxyServer.Run() - o.errCh <- err - }() - - for { - err := <-o.errCh - if err != nil { - return err - } - } -} - -func (o *Options) writeConfigFile() (err error) { - const mediaType = runtime.ContentTypeYAML - info, ok := runtime.SerializerInfoForMediaType(proxyconfigscheme.Codecs.SupportedMediaTypes(), mediaType) - if !ok { - return fmt.Errorf("unable to locate encoder -- %q is not a supported media type", mediaType) - } - - encoder := proxyconfigscheme.Codecs.EncoderForVersion(info.Serializer, v1alpha1.SchemeGroupVersion) - - configFile, err := os.Create(o.WriteConfigTo) - if err != nil { - return err - } - - defer func() { - ferr := configFile.Close() - if ferr != nil && err == nil { - err = ferr - } - }() - - if err = encoder.Encode(o.config, configFile); err != nil { - return err - } - - klog.InfoS("Wrote configuration", "file", o.WriteConfigTo) - - return nil -} - -// addressFromDeprecatedFlags returns server address from flags -// passed on the command line based on the following rules: -// 1. If port is 0, disable the server (e.g. set address to empty). -// 2. Otherwise, set the port portion of the config accordingly. -func addressFromDeprecatedFlags(addr string, port int32) string { - if port == 0 { - return "" - } - return proxyutil.AppendPortIfNeeded(addr, port) -} - -// newLenientSchemeAndCodecs returns a scheme that has only v1alpha1 registered into -// it and a CodecFactory with strict decoding disabled. -func newLenientSchemeAndCodecs() (*runtime.Scheme, *serializer.CodecFactory, error) { - lenientScheme := runtime.NewScheme() - if err := kubeproxyconfig.AddToScheme(lenientScheme); err != nil { - return nil, nil, fmt.Errorf("failed to add kube-proxy config API to lenient scheme: %v", err) - } - if err := kubeproxyconfigv1alpha1.AddToScheme(lenientScheme); err != nil { - return nil, nil, fmt.Errorf("failed to add kube-proxy config v1alpha1 API to lenient scheme: %v", err) - } - lenientCodecs := serializer.NewCodecFactory(lenientScheme, serializer.DisableStrict) - return lenientScheme, &lenientCodecs, nil -} - -// loadConfigFromFile loads the contents of file and decodes it as a -// KubeProxyConfiguration object. -func (o *Options) loadConfigFromFile(file string) (*kubeproxyconfig.KubeProxyConfiguration, error) { - data, err := os.ReadFile(file) - if err != nil { - return nil, err - } - - return o.loadConfig(data) -} - -// loadConfig decodes a serialized KubeProxyConfiguration to the internal type. -func (o *Options) loadConfig(data []byte) (*kubeproxyconfig.KubeProxyConfiguration, error) { - - configObj, gvk, err := proxyconfigscheme.Codecs.UniversalDecoder().Decode(data, nil, nil) - if err != nil { - // Try strict decoding first. If that fails decode with a lenient - // decoder, which has only v1alpha1 registered, and log a warning. - // The lenient path is to be dropped when support for v1alpha1 is dropped. - if !runtime.IsStrictDecodingError(err) { - return nil, fmt.Errorf("failed to decode: %w", err) - } - - _, lenientCodecs, lenientErr := newLenientSchemeAndCodecs() - if lenientErr != nil { - return nil, lenientErr - } - - configObj, gvk, lenientErr = lenientCodecs.UniversalDecoder().Decode(data, nil, nil) - if lenientErr != nil { - // Lenient decoding failed with the current version, return the - // original strict error. - return nil, fmt.Errorf("failed lenient decoding: %v", err) - } - - // Continue with the v1alpha1 object that was decoded leniently, but emit a warning. - klog.InfoS("Using lenient decoding as strict decoding failed", "err", err) - } - - proxyConfig, ok := configObj.(*kubeproxyconfig.KubeProxyConfiguration) - if !ok { - return nil, fmt.Errorf("got unexpected config type: %v", gvk) - } - return proxyConfig, nil -} - -// ApplyDefaults applies the default values to Options. -func (o *Options) ApplyDefaults(in *kubeproxyconfig.KubeProxyConfiguration) (*kubeproxyconfig.KubeProxyConfiguration, error) { - external, err := proxyconfigscheme.Scheme.ConvertToVersion(in, v1alpha1.SchemeGroupVersion) - if err != nil { - return nil, err - } - - proxyconfigscheme.Scheme.Default(external) - - internal, err := proxyconfigscheme.Scheme.ConvertToVersion(external, kubeproxyconfig.SchemeGroupVersion) - if err != nil { - return nil, err - } - - out := internal.(*kubeproxyconfig.KubeProxyConfiguration) - - return out, nil -} - -// NewProxyCommand creates a *cobra.Command object with default parameters -func NewProxyCommand() *cobra.Command { - opts := NewOptions() - - cmd := &cobra.Command{ - Use: "kube-proxy", - Long: `The Kubernetes network proxy runs on each node. This -reflects services as defined in the Kubernetes API on each node and can do simple -TCP, UDP, and SCTP stream forwarding or round robin TCP, UDP, and SCTP forwarding across a set of backends. -Service cluster IPs and ports are currently found through Docker-links-compatible -environment variables specifying ports opened by the service proxy. There is an optional -addon that provides cluster DNS for these cluster IPs. The user must create a service -with the apiserver API to configure the proxy.`, - RunE: func(cmd *cobra.Command, args []string) error { - verflag.PrintAndExitIfRequested() - cliflag.PrintFlags(cmd.Flags()) - - if err := initForOS(opts.WindowsService); err != nil { - return fmt.Errorf("failed os init: %w", err) - } - - if err := opts.Complete(); err != nil { - return fmt.Errorf("failed complete: %w", err) - } - - if err := opts.Validate(); err != nil { - return fmt.Errorf("failed validate: %w", err) - } - // add feature enablement metrics - utilfeature.DefaultMutableFeatureGate.AddMetrics() - if err := opts.Run(); err != nil { - klog.ErrorS(err, "Error running ProxyServer") - return err - } - - return nil - }, - Args: func(cmd *cobra.Command, args []string) error { - for _, arg := range args { - if len(arg) > 0 { - return fmt.Errorf("%q does not take any arguments, got %q", cmd.CommandPath(), args) - } - } - return nil - }, - } - - var err error - opts.config, err = opts.ApplyDefaults(opts.config) - if err != nil { - klog.ErrorS(err, "Unable to create flag defaults") - // ACTION REQUIRED: Exit code changed from 255 to 1 - os.Exit(1) - } - - fs := cmd.Flags() - opts.AddFlags(fs) - fs.AddGoFlagSet(goflag.CommandLine) // for --boot-id-file and --machine-id-file - - _ = cmd.MarkFlagFilename("config", "yaml", "yml", "json") - - return cmd -} - -// ProxyServer represents all the parameters required to start the Kubernetes proxy server. All -// fields are required. -type ProxyServer struct { - Client clientset.Interface - EventClient v1core.EventsGetter - IptInterface utiliptables.Interface - IpvsInterface utilipvs.Interface - IpsetInterface utilipset.Interface - execer exec.Interface - Proxier proxy.Provider - Broadcaster events.EventBroadcaster - Recorder events.EventRecorder - ConntrackConfiguration kubeproxyconfig.KubeProxyConntrackConfiguration - Conntracker Conntracker // if nil, ignored - ProxyMode kubeproxyconfig.ProxyMode - NodeRef *v1.ObjectReference - MetricsBindAddress string - BindAddressHardFail bool - EnableProfiling bool - OOMScoreAdj *int32 - ConfigSyncPeriod time.Duration - HealthzServer healthcheck.ProxierHealthUpdater - localDetectorMode kubeproxyconfig.LocalMode -} - -// createClients creates a kube client and an event client from the given config and masterOverride. -// TODO remove masterOverride when CLI flags are removed. -func createClients(config componentbaseconfig.ClientConnectionConfiguration, masterOverride string) (clientset.Interface, v1core.EventsGetter, error) { - var kubeConfig *rest.Config - var err error - - if len(config.Kubeconfig) == 0 && len(masterOverride) == 0 { - klog.InfoS("Neither kubeconfig file nor master URL was specified, falling back to in-cluster config") - kubeConfig, err = rest.InClusterConfig() - } else { - // This creates a client, first loading any specified kubeconfig - // file, and then overriding the Master flag, if non-empty. - kubeConfig, err = clientcmd.NewNonInteractiveDeferredLoadingClientConfig( - &clientcmd.ClientConfigLoadingRules{ExplicitPath: config.Kubeconfig}, - &clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: masterOverride}}).ClientConfig() - } - if err != nil { - return nil, nil, err - } - - kubeConfig.AcceptContentTypes = config.AcceptContentTypes - kubeConfig.ContentType = config.ContentType - kubeConfig.QPS = config.QPS - kubeConfig.Burst = int(config.Burst) - - client, err := clientset.NewForConfig(kubeConfig) - if err != nil { - return nil, nil, err - } - - eventClient, err := clientset.NewForConfig(kubeConfig) - if err != nil { - return nil, nil, err - } - - return client, eventClient.CoreV1(), nil -} - -func serveHealthz(hz healthcheck.ProxierHealthUpdater, errCh chan error) { - if hz == nil { - return - } - - fn := func() { - err := hz.Run() - if err != nil { - klog.ErrorS(err, "Healthz server failed") - if errCh != nil { - errCh <- fmt.Errorf("healthz server failed: %v", err) - // if in hardfail mode, never retry again - blockCh := make(chan error) - <-blockCh - } - } else { - klog.ErrorS(nil, "Healthz server returned without error") - } - } - go wait.Until(fn, 5*time.Second, wait.NeverStop) -} - -func serveMetrics(bindAddress string, proxyMode kubeproxyconfig.ProxyMode, enableProfiling bool, errCh chan error) { - if len(bindAddress) == 0 { - return - } - - proxyMux := mux.NewPathRecorderMux("kube-proxy") - healthz.InstallHandler(proxyMux) - if utilfeature.DefaultFeatureGate.Enabled(metricsfeatures.ComponentSLIs) { - slis.SLIMetricsWithReset{}.Install(proxyMux) - } - proxyMux.HandleFunc("/proxyMode", func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "text/plain; charset=utf-8") - w.Header().Set("X-Content-Type-Options", "nosniff") - fmt.Fprintf(w, "%s", proxyMode) - }) - - proxyMux.Handle("/metrics", legacyregistry.Handler()) - - if enableProfiling { - routes.Profiling{}.Install(proxyMux) - routes.DebugFlags{}.Install(proxyMux, "v", routes.StringFlagPutHandler(logs.GlogSetter)) - } - - configz.InstallHandler(proxyMux) - - fn := func() { - err := http.ListenAndServe(bindAddress, proxyMux) - if err != nil { - err = fmt.Errorf("starting metrics server failed: %v", err) - utilruntime.HandleError(err) - if errCh != nil { - errCh <- err - // if in hardfail mode, never retry again - blockCh := make(chan error) - <-blockCh - } - } - } - go wait.Until(fn, 5*time.Second, wait.NeverStop) -} - -// Run runs the specified ProxyServer. This should never exit (unless CleanupAndExit is set). -// TODO: At the moment, Run() cannot return a nil error, otherwise it's caller will never exit. Update callers of Run to handle nil errors. -func (s *ProxyServer) Run() error { - // To help debugging, immediately log version - klog.InfoS("Version info", "version", version.Get()) - - klog.InfoS("Golang settings", "GOGC", os.Getenv("GOGC"), "GOMAXPROCS", os.Getenv("GOMAXPROCS"), "GOTRACEBACK", os.Getenv("GOTRACEBACK")) - - // TODO(vmarmol): Use container config for this. - var oomAdjuster *oom.OOMAdjuster - if s.OOMScoreAdj != nil { - oomAdjuster = oom.NewOOMAdjuster() - if err := oomAdjuster.ApplyOOMScoreAdj(0, int(*s.OOMScoreAdj)); err != nil { - klog.V(2).InfoS("Failed to apply OOMScore", "err", err) - } - } - - if s.Broadcaster != nil && s.EventClient != nil { - stopCh := make(chan struct{}) - s.Broadcaster.StartRecordingToSink(stopCh) - } - - // TODO(thockin): make it possible for healthz and metrics to be on the same port. - - var errCh chan error - if s.BindAddressHardFail { - errCh = make(chan error) - } - - // Start up a healthz server if requested - serveHealthz(s.HealthzServer, errCh) - - // Start up a metrics server if requested - serveMetrics(s.MetricsBindAddress, s.ProxyMode, s.EnableProfiling, errCh) - - // Tune conntrack, if requested - // Conntracker is always nil for windows - if s.Conntracker != nil { - max, err := getConntrackMax(s.ConntrackConfiguration) - if err != nil { - return err - } - if max > 0 { - err := s.Conntracker.SetMax(max) - if err != nil { - if err != errReadOnlySysFS { - return err - } - // errReadOnlySysFS is caused by a known docker issue (https://github.com/docker/docker/issues/24000), - // the only remediation we know is to restart the docker daemon. - // Here we'll send an node event with specific reason and message, the - // administrator should decide whether and how to handle this issue, - // whether to drain the node and restart docker. Occurs in other container runtimes - // as well. - // TODO(random-liu): Remove this when the docker bug is fixed. - const message = "CRI error: /sys is read-only: " + - "cannot modify conntrack limits, problems may arise later (If running Docker, see docker issue #24000)" - s.Recorder.Eventf(s.NodeRef, nil, api.EventTypeWarning, err.Error(), "StartKubeProxy", message) - } - } - - if s.ConntrackConfiguration.TCPEstablishedTimeout != nil && s.ConntrackConfiguration.TCPEstablishedTimeout.Duration > 0 { - timeout := int(s.ConntrackConfiguration.TCPEstablishedTimeout.Duration / time.Second) - if err := s.Conntracker.SetTCPEstablishedTimeout(timeout); err != nil { - return err - } - } - - if s.ConntrackConfiguration.TCPCloseWaitTimeout != nil && s.ConntrackConfiguration.TCPCloseWaitTimeout.Duration > 0 { - timeout := int(s.ConntrackConfiguration.TCPCloseWaitTimeout.Duration / time.Second) - if err := s.Conntracker.SetTCPCloseWaitTimeout(timeout); err != nil { - return err - } - } - } - - noProxyName, err := labels.NewRequirement(apis.LabelServiceProxyName, selection.DoesNotExist, nil) - if err != nil { - return err - } - - noHeadlessEndpoints, err := labels.NewRequirement(v1.IsHeadlessService, selection.DoesNotExist, nil) - if err != nil { - return err - } - - labelSelector := labels.NewSelector() - labelSelector = labelSelector.Add(*noProxyName, *noHeadlessEndpoints) - - // Make informers that filter out objects that want a non-default service proxy. - informerFactory := informers.NewSharedInformerFactoryWithOptions(s.Client, s.ConfigSyncPeriod, - informers.WithTweakListOptions(func(options *metav1.ListOptions) { - options.LabelSelector = labelSelector.String() - })) - - // Create configs (i.e. Watches for Services and EndpointSlices) - // Note: RegisterHandler() calls need to happen before creation of Sources because sources - // only notify on changes, and the initial update (on process start) may be lost if no handlers - // are registered yet. - serviceConfig := config.NewServiceConfig(informerFactory.Core().V1().Services(), s.ConfigSyncPeriod) - serviceConfig.RegisterEventHandler(s.Proxier) - go serviceConfig.Run(wait.NeverStop) - - endpointSliceConfig := config.NewEndpointSliceConfig(informerFactory.Discovery().V1().EndpointSlices(), s.ConfigSyncPeriod) - endpointSliceConfig.RegisterEventHandler(s.Proxier) - go endpointSliceConfig.Run(wait.NeverStop) - - // This has to start after the calls to NewServiceConfig because that - // function must configure its shared informer event handlers first. - informerFactory.Start(wait.NeverStop) - - // Make an informer that selects for our nodename. - currentNodeInformerFactory := informers.NewSharedInformerFactoryWithOptions(s.Client, s.ConfigSyncPeriod, - informers.WithTweakListOptions(func(options *metav1.ListOptions) { - options.FieldSelector = fields.OneTermEqualSelector("metadata.name", s.NodeRef.Name).String() - })) - nodeConfig := config.NewNodeConfig(currentNodeInformerFactory.Core().V1().Nodes(), s.ConfigSyncPeriod) - // https://issues.k8s.io/111321 - if s.localDetectorMode == kubeproxyconfig.LocalModeNodeCIDR { - nodeConfig.RegisterEventHandler(&proxy.NodePodCIDRHandler{}) - } - nodeConfig.RegisterEventHandler(s.Proxier) - - go nodeConfig.Run(wait.NeverStop) - - // This has to start after the calls to NewNodeConfig because that must - // configure the shared informer event handler first. - currentNodeInformerFactory.Start(wait.NeverStop) - - // Birth Cry after the birth is successful - s.birthCry() - - go s.Proxier.SyncLoop() - - return <-errCh -} - -func (s *ProxyServer) birthCry() { - s.Recorder.Eventf(s.NodeRef, nil, api.EventTypeNormal, "Starting", "StartKubeProxy", "") -} - -func getConntrackMax(config kubeproxyconfig.KubeProxyConntrackConfiguration) (int, error) { - if config.MaxPerCore != nil && *config.MaxPerCore > 0 { - floor := 0 - if config.Min != nil { - floor = int(*config.Min) - } - scaled := int(*config.MaxPerCore) * detectNumCPU() - if scaled > floor { - klog.V(3).InfoS("GetConntrackMax: using scaled conntrack-max-per-core") - return scaled, nil - } - klog.V(3).InfoS("GetConntrackMax: using conntrack-min") - return floor, nil - } - return 0, nil -} - -// detectNodeIP returns the nodeIP used by the proxier -// The order of precedence is: -// 1. config.bindAddress if bindAddress is not 0.0.0.0 or :: -// 2. the primary IP from the Node object, if set -// 3. if no IP is found it defaults to 127.0.0.1 and IPv4 -func detectNodeIP(client clientset.Interface, hostname, bindAddress string) net.IP { - nodeIP := netutils.ParseIPSloppy(bindAddress) - if nodeIP.IsUnspecified() { - nodeIP = utilnode.GetNodeIP(client, hostname) - } - if nodeIP == nil { - klog.InfoS("Can't determine this node's IP, assuming 127.0.0.1; if this is incorrect, please set the --bind-address flag") - nodeIP = netutils.ParseIPSloppy("127.0.0.1") - } - return nodeIP -} - -// nodeIPTuple takes an addresses and return a tuple (ipv4,ipv6) -// The returned tuple is guaranteed to have the order (ipv4,ipv6). The address NOT of the passed address -// will have "any" address (0.0.0.0 or ::) inserted. -func nodeIPTuple(bindAddress string) [2]net.IP { - nodes := [2]net.IP{net.IPv4zero, net.IPv6zero} - - adr := netutils.ParseIPSloppy(bindAddress) - if netutils.IsIPv6(adr) { - nodes[1] = adr - } else { - nodes[0] = adr - } - - return nodes -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/server_others.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/server_others.go deleted file mode 100644 index 56ad20f9567e..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/server_others.go +++ /dev/null @@ -1,581 +0,0 @@ -//go:build !windows -// +build !windows - -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package app does all of the work necessary to configure and run a -// Kubernetes app process. -package app - -import ( - "context" - "errors" - "fmt" - goruntime "runtime" - "strings" - "time" - - "github.com/google/cadvisor/machine" - "github.com/google/cadvisor/utils/sysfs" - "k8s.io/apimachinery/pkg/watch" - - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/client-go/tools/cache" - "k8s.io/client-go/tools/events" - - "k8s.io/apimachinery/pkg/fields" - - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - clientset "k8s.io/client-go/kubernetes" - toolswatch "k8s.io/client-go/tools/watch" - "k8s.io/component-base/configz" - "k8s.io/component-base/metrics" - nodeutil "k8s.io/component-helpers/node/util" - utilsysctl "k8s.io/component-helpers/node/util/sysctl" - "k8s.io/kubernetes/pkg/proxy" - proxyconfigapi "k8s.io/kubernetes/pkg/proxy/apis/config" - "k8s.io/kubernetes/pkg/proxy/apis/config/scheme" - "k8s.io/kubernetes/pkg/proxy/healthcheck" - "k8s.io/kubernetes/pkg/proxy/iptables" - "k8s.io/kubernetes/pkg/proxy/ipvs" - proxymetrics "k8s.io/kubernetes/pkg/proxy/metrics" - proxyutil "k8s.io/kubernetes/pkg/proxy/util" - proxyutiliptables "k8s.io/kubernetes/pkg/proxy/util/iptables" - utilipset "k8s.io/kubernetes/pkg/util/ipset" - utiliptables "k8s.io/kubernetes/pkg/util/iptables" - utilipvs "k8s.io/kubernetes/pkg/util/ipvs" - "k8s.io/utils/exec" - netutils "k8s.io/utils/net" - - "k8s.io/klog/v2" -) - -// timeoutForNodePodCIDR is the time to wait for allocators to assign a PodCIDR to the -// node after it is registered. -var timeoutForNodePodCIDR = 5 * time.Minute - -// NewProxyServer returns a new ProxyServer. -func NewProxyServer(o *Options) (*ProxyServer, error) { - return newProxyServer(o.config, o.master) -} - -func newProxyServer( - config *proxyconfigapi.KubeProxyConfiguration, - master string) (*ProxyServer, error) { - - if config == nil { - return nil, errors.New("config is required") - } - - if c, err := configz.New(proxyconfigapi.GroupName); err == nil { - c.Set(config) - } else { - return nil, fmt.Errorf("unable to register configz: %s", err) - } - - var ipvsInterface utilipvs.Interface - var ipsetInterface utilipset.Interface - - if len(config.ShowHiddenMetricsForVersion) > 0 { - metrics.SetShowHidden() - } - - hostname, err := nodeutil.GetHostname(config.HostnameOverride) - if err != nil { - return nil, err - } - - client, eventClient, err := createClients(config.ClientConnection, master) - if err != nil { - return nil, err - } - - nodeIP := detectNodeIP(client, hostname, config.BindAddress) - klog.InfoS("Detected node IP", "address", nodeIP.String()) - - // Create event recorder - eventBroadcaster := events.NewBroadcaster(&events.EventSinkImpl{Interface: client.EventsV1()}) - recorder := eventBroadcaster.NewRecorder(scheme.Scheme, "kube-proxy") - - nodeRef := &v1.ObjectReference{ - Kind: "Node", - Name: hostname, - UID: types.UID(hostname), - Namespace: "", - } - - var healthzServer healthcheck.ProxierHealthUpdater - if len(config.HealthzBindAddress) > 0 { - healthzServer = healthcheck.NewProxierHealthServer(config.HealthzBindAddress, 2*config.IPTables.SyncPeriod.Duration, recorder, nodeRef) - } - - var proxier proxy.Provider - var detectLocalMode proxyconfigapi.LocalMode - - proxyMode := getProxyMode(config.Mode) - detectLocalMode, err = getDetectLocalMode(config) - if err != nil { - return nil, fmt.Errorf("cannot determine detect-local-mode: %v", err) - } - - var nodeInfo *v1.Node - if detectLocalMode == proxyconfigapi.LocalModeNodeCIDR { - klog.InfoS("Watching for node, awaiting podCIDR allocation", "hostname", hostname) - nodeInfo, err = waitForPodCIDR(client, hostname) - if err != nil { - return nil, err - } - klog.InfoS("NodeInfo", "podCIDR", nodeInfo.Spec.PodCIDR, "podCIDRs", nodeInfo.Spec.PodCIDRs) - } - - klog.V(2).InfoS("DetectLocalMode", "localMode", string(detectLocalMode)) - - primaryFamily := v1.IPv4Protocol - primaryProtocol := utiliptables.ProtocolIPv4 - if netutils.IsIPv6(nodeIP) { - primaryFamily = v1.IPv6Protocol - primaryProtocol = utiliptables.ProtocolIPv6 - } - execer := exec.New() - iptInterface := utiliptables.New(execer, primaryProtocol) - - var ipt [2]utiliptables.Interface - dualStack := true // While we assume that node supports, we do further checks below - - // Create iptables handlers for both families, one is already created - // Always ordered as IPv4, IPv6 - if primaryProtocol == utiliptables.ProtocolIPv4 { - ipt[0] = iptInterface - ipt[1] = utiliptables.New(execer, utiliptables.ProtocolIPv6) - } else { - ipt[0] = utiliptables.New(execer, utiliptables.ProtocolIPv4) - ipt[1] = iptInterface - } - - nodePortAddresses := config.NodePortAddresses - - if !ipt[0].Present() { - return nil, fmt.Errorf("iptables is not supported for primary IP family %q", primaryProtocol) - } else if !ipt[1].Present() { - klog.InfoS("kube-proxy running in single-stack mode: secondary ipFamily is not supported", "ipFamily", ipt[1].Protocol()) - dualStack = false - - // Validate NodePortAddresses is single-stack - npaByFamily := proxyutil.MapCIDRsByIPFamily(config.NodePortAddresses) - secondaryFamily := proxyutil.OtherIPFamily(primaryFamily) - badAddrs := npaByFamily[secondaryFamily] - if len(badAddrs) > 0 { - klog.InfoS("Ignoring --nodeport-addresses of the wrong family", "ipFamily", secondaryFamily, "addresses", badAddrs) - nodePortAddresses = npaByFamily[primaryFamily] - } - } - - if proxyMode == proxyconfigapi.ProxyModeIPTables { - klog.InfoS("Using iptables Proxier") - if config.IPTables.MasqueradeBit == nil { - // MasqueradeBit must be specified or defaulted. - return nil, fmt.Errorf("unable to read IPTables MasqueradeBit from config") - } - - if dualStack { - klog.InfoS("kube-proxy running in dual-stack mode", "ipFamily", iptInterface.Protocol()) - klog.InfoS("Creating dualStackProxier for iptables") - // Always ordered to match []ipt - var localDetectors [2]proxyutiliptables.LocalTrafficDetector - localDetectors, err = getDualStackLocalDetectorTuple(detectLocalMode, config, ipt, nodeInfo) - if err != nil { - return nil, fmt.Errorf("unable to create proxier: %v", err) - } - - // TODO this has side effects that should only happen when Run() is invoked. - proxier, err = iptables.NewDualStackProxier( - ipt, - utilsysctl.New(), - execer, - config.IPTables.SyncPeriod.Duration, - config.IPTables.MinSyncPeriod.Duration, - config.IPTables.MasqueradeAll, - *config.IPTables.LocalhostNodePorts, - int(*config.IPTables.MasqueradeBit), - localDetectors, - hostname, - nodeIPTuple(config.BindAddress), - recorder, - healthzServer, - nodePortAddresses, - ) - } else { - // Create a single-stack proxier if and only if the node does not support dual-stack (i.e, no iptables support). - var localDetector proxyutiliptables.LocalTrafficDetector - localDetector, err = getLocalDetector(detectLocalMode, config, iptInterface, nodeInfo) - if err != nil { - return nil, fmt.Errorf("unable to create proxier: %v", err) - } - - // TODO this has side effects that should only happen when Run() is invoked. - proxier, err = iptables.NewProxier( - primaryFamily, - iptInterface, - utilsysctl.New(), - execer, - config.IPTables.SyncPeriod.Duration, - config.IPTables.MinSyncPeriod.Duration, - config.IPTables.MasqueradeAll, - *config.IPTables.LocalhostNodePorts, - int(*config.IPTables.MasqueradeBit), - localDetector, - hostname, - nodeIP, - recorder, - healthzServer, - nodePortAddresses, - ) - } - - if err != nil { - return nil, fmt.Errorf("unable to create proxier: %v", err) - } - proxymetrics.RegisterMetrics() - } else if proxyMode == proxyconfigapi.ProxyModeIPVS { - kernelHandler := ipvs.NewLinuxKernelHandler() - ipsetInterface = utilipset.New(execer) - ipvsInterface = utilipvs.New() - if err := ipvs.CanUseIPVSProxier(ipvsInterface, ipsetInterface, config.IPVS.Scheduler); err != nil { - return nil, fmt.Errorf("can't use the IPVS proxier: %v", err) - } - - klog.InfoS("Using ipvs Proxier") - if dualStack { - klog.InfoS("Creating dualStackProxier for ipvs") - - nodeIPs := nodeIPTuple(config.BindAddress) - - // Always ordered to match []ipt - var localDetectors [2]proxyutiliptables.LocalTrafficDetector - localDetectors, err = getDualStackLocalDetectorTuple(detectLocalMode, config, ipt, nodeInfo) - if err != nil { - return nil, fmt.Errorf("unable to create proxier: %v", err) - } - - proxier, err = ipvs.NewDualStackProxier( - ipt, - ipvsInterface, - ipsetInterface, - utilsysctl.New(), - execer, - config.IPVS.SyncPeriod.Duration, - config.IPVS.MinSyncPeriod.Duration, - config.IPVS.ExcludeCIDRs, - config.IPVS.StrictARP, - config.IPVS.TCPTimeout.Duration, - config.IPVS.TCPFinTimeout.Duration, - config.IPVS.UDPTimeout.Duration, - config.IPTables.MasqueradeAll, - int(*config.IPTables.MasqueradeBit), - localDetectors, - hostname, - nodeIPs, - recorder, - healthzServer, - config.IPVS.Scheduler, - nodePortAddresses, - kernelHandler, - ) - } else { - var localDetector proxyutiliptables.LocalTrafficDetector - localDetector, err = getLocalDetector(detectLocalMode, config, iptInterface, nodeInfo) - if err != nil { - return nil, fmt.Errorf("unable to create proxier: %v", err) - } - - proxier, err = ipvs.NewProxier( - primaryFamily, - iptInterface, - ipvsInterface, - ipsetInterface, - utilsysctl.New(), - execer, - config.IPVS.SyncPeriod.Duration, - config.IPVS.MinSyncPeriod.Duration, - config.IPVS.ExcludeCIDRs, - config.IPVS.StrictARP, - config.IPVS.TCPTimeout.Duration, - config.IPVS.TCPFinTimeout.Duration, - config.IPVS.UDPTimeout.Duration, - config.IPTables.MasqueradeAll, - int(*config.IPTables.MasqueradeBit), - localDetector, - hostname, - nodeIP, - recorder, - healthzServer, - config.IPVS.Scheduler, - nodePortAddresses, - kernelHandler, - ) - } - if err != nil { - return nil, fmt.Errorf("unable to create proxier: %v", err) - } - proxymetrics.RegisterMetrics() - } - - return &ProxyServer{ - Client: client, - EventClient: eventClient, - IptInterface: iptInterface, - IpvsInterface: ipvsInterface, - IpsetInterface: ipsetInterface, - execer: execer, - Proxier: proxier, - Broadcaster: eventBroadcaster, - Recorder: recorder, - ConntrackConfiguration: config.Conntrack, - Conntracker: &realConntracker{}, - ProxyMode: proxyMode, - NodeRef: nodeRef, - MetricsBindAddress: config.MetricsBindAddress, - BindAddressHardFail: config.BindAddressHardFail, - EnableProfiling: config.EnableProfiling, - OOMScoreAdj: config.OOMScoreAdj, - ConfigSyncPeriod: config.ConfigSyncPeriod.Duration, - HealthzServer: healthzServer, - localDetectorMode: detectLocalMode, - }, nil -} - -func waitForPodCIDR(client clientset.Interface, nodeName string) (*v1.Node, error) { - // since allocators can assign the podCIDR after the node registers, we do a watch here to wait - // for podCIDR to be assigned, instead of assuming that the Get() on startup will have it. - ctx, cancelFunc := context.WithTimeout(context.TODO(), timeoutForNodePodCIDR) - defer cancelFunc() - - fieldSelector := fields.OneTermEqualSelector("metadata.name", nodeName).String() - lw := &cache.ListWatch{ - ListFunc: func(options metav1.ListOptions) (object runtime.Object, e error) { - options.FieldSelector = fieldSelector - return client.CoreV1().Nodes().List(ctx, options) - }, - WatchFunc: func(options metav1.ListOptions) (i watch.Interface, e error) { - options.FieldSelector = fieldSelector - return client.CoreV1().Nodes().Watch(ctx, options) - }, - } - condition := func(event watch.Event) (bool, error) { - // don't process delete events - if event.Type != watch.Modified && event.Type != watch.Added { - return false, nil - } - - n, ok := event.Object.(*v1.Node) - if !ok { - return false, fmt.Errorf("event object not of type Node") - } - // don't consider the node if is going to be deleted and keep waiting - if !n.DeletionTimestamp.IsZero() { - return false, nil - } - return n.Spec.PodCIDR != "" && len(n.Spec.PodCIDRs) > 0, nil - } - - evt, err := toolswatch.UntilWithSync(ctx, lw, &v1.Node{}, nil, condition) - if err != nil { - return nil, fmt.Errorf("timeout waiting for PodCIDR allocation to configure detect-local-mode %v: %v", proxyconfigapi.LocalModeNodeCIDR, err) - } - if n, ok := evt.Object.(*v1.Node); ok { - return n, nil - } - return nil, fmt.Errorf("event object not of type node") -} - -func detectNumCPU() int { - // try get numCPU from /sys firstly due to a known issue (https://github.com/kubernetes/kubernetes/issues/99225) - _, numCPU, err := machine.GetTopology(sysfs.NewRealSysFs()) - if err != nil || numCPU < 1 { - return goruntime.NumCPU() - } - return numCPU -} - -func getDetectLocalMode(config *proxyconfigapi.KubeProxyConfiguration) (proxyconfigapi.LocalMode, error) { - mode := config.DetectLocalMode - switch mode { - case proxyconfigapi.LocalModeClusterCIDR, proxyconfigapi.LocalModeNodeCIDR, proxyconfigapi.LocalModeBridgeInterface, proxyconfigapi.LocalModeInterfaceNamePrefix: - return mode, nil - default: - if strings.TrimSpace(mode.String()) != "" { - return mode, fmt.Errorf("unknown detect-local-mode: %v", mode) - } - klog.V(4).InfoS("Defaulting detect-local-mode", "localModeClusterCIDR", string(proxyconfigapi.LocalModeClusterCIDR)) - return proxyconfigapi.LocalModeClusterCIDR, nil - } -} - -func getLocalDetector(mode proxyconfigapi.LocalMode, config *proxyconfigapi.KubeProxyConfiguration, ipt utiliptables.Interface, nodeInfo *v1.Node) (proxyutiliptables.LocalTrafficDetector, error) { - switch mode { - case proxyconfigapi.LocalModeClusterCIDR: - if len(strings.TrimSpace(config.ClusterCIDR)) == 0 { - klog.InfoS("Detect-local-mode set to ClusterCIDR, but no cluster CIDR defined") - break - } - return proxyutiliptables.NewDetectLocalByCIDR(config.ClusterCIDR, ipt) - case proxyconfigapi.LocalModeNodeCIDR: - if len(strings.TrimSpace(nodeInfo.Spec.PodCIDR)) == 0 { - klog.InfoS("Detect-local-mode set to NodeCIDR, but no PodCIDR defined at node") - break - } - return proxyutiliptables.NewDetectLocalByCIDR(nodeInfo.Spec.PodCIDR, ipt) - case proxyconfigapi.LocalModeBridgeInterface: - if len(strings.TrimSpace(config.DetectLocal.BridgeInterface)) == 0 { - return nil, fmt.Errorf("Detect-local-mode set to BridgeInterface, but no bridge-interface-name %s is defined", config.DetectLocal.BridgeInterface) - } - return proxyutiliptables.NewDetectLocalByBridgeInterface(config.DetectLocal.BridgeInterface) - case proxyconfigapi.LocalModeInterfaceNamePrefix: - if len(strings.TrimSpace(config.DetectLocal.InterfaceNamePrefix)) == 0 { - return nil, fmt.Errorf("Detect-local-mode set to InterfaceNamePrefix, but no interface-prefix %s is defined", config.DetectLocal.InterfaceNamePrefix) - } - return proxyutiliptables.NewDetectLocalByInterfaceNamePrefix(config.DetectLocal.InterfaceNamePrefix) - } - klog.InfoS("Defaulting to no-op detect-local", "detectLocalMode", string(mode)) - return proxyutiliptables.NewNoOpLocalDetector(), nil -} - -func getDualStackLocalDetectorTuple(mode proxyconfigapi.LocalMode, config *proxyconfigapi.KubeProxyConfiguration, ipt [2]utiliptables.Interface, nodeInfo *v1.Node) ([2]proxyutiliptables.LocalTrafficDetector, error) { - var err error - localDetectors := [2]proxyutiliptables.LocalTrafficDetector{proxyutiliptables.NewNoOpLocalDetector(), proxyutiliptables.NewNoOpLocalDetector()} - switch mode { - case proxyconfigapi.LocalModeClusterCIDR: - if len(strings.TrimSpace(config.ClusterCIDR)) == 0 { - klog.InfoS("Detect-local-mode set to ClusterCIDR, but no cluster CIDR defined") - break - } - - clusterCIDRs := cidrTuple(config.ClusterCIDR) - - if len(strings.TrimSpace(clusterCIDRs[0])) == 0 { - klog.InfoS("Detect-local-mode set to ClusterCIDR, but no IPv4 cluster CIDR defined, defaulting to no-op detect-local for IPv4") - } else { - localDetectors[0], err = proxyutiliptables.NewDetectLocalByCIDR(clusterCIDRs[0], ipt[0]) - if err != nil { // don't loose the original error - return localDetectors, err - } - } - - if len(strings.TrimSpace(clusterCIDRs[1])) == 0 { - klog.InfoS("Detect-local-mode set to ClusterCIDR, but no IPv6 cluster CIDR defined, defaulting to no-op detect-local for IPv6") - } else { - localDetectors[1], err = proxyutiliptables.NewDetectLocalByCIDR(clusterCIDRs[1], ipt[1]) - } - return localDetectors, err - case proxyconfigapi.LocalModeNodeCIDR: - if nodeInfo == nil || len(strings.TrimSpace(nodeInfo.Spec.PodCIDR)) == 0 { - klog.InfoS("No node info available to configure detect-local-mode NodeCIDR") - break - } - // localDetectors, like ipt, need to be of the order [IPv4, IPv6], but PodCIDRs is setup so that PodCIDRs[0] == PodCIDR. - // so have to handle the case where PodCIDR can be IPv6 and set that to localDetectors[1] - if netutils.IsIPv6CIDRString(nodeInfo.Spec.PodCIDR) { - localDetectors[1], err = proxyutiliptables.NewDetectLocalByCIDR(nodeInfo.Spec.PodCIDR, ipt[1]) - if err != nil { - return localDetectors, err - } - if len(nodeInfo.Spec.PodCIDRs) > 1 { - localDetectors[0], err = proxyutiliptables.NewDetectLocalByCIDR(nodeInfo.Spec.PodCIDRs[1], ipt[0]) - } - } else { - localDetectors[0], err = proxyutiliptables.NewDetectLocalByCIDR(nodeInfo.Spec.PodCIDR, ipt[0]) - if err != nil { - return localDetectors, err - } - if len(nodeInfo.Spec.PodCIDRs) > 1 { - localDetectors[1], err = proxyutiliptables.NewDetectLocalByCIDR(nodeInfo.Spec.PodCIDRs[1], ipt[1]) - } - } - return localDetectors, err - case proxyconfigapi.LocalModeBridgeInterface, proxyconfigapi.LocalModeInterfaceNamePrefix: - localDetector, err := getLocalDetector(mode, config, ipt[0], nodeInfo) - if err == nil { - localDetectors[0] = localDetector - localDetectors[1] = localDetector - } - return localDetectors, err - default: - klog.InfoS("Unknown detect-local-mode", "detectLocalMode", mode) - } - klog.InfoS("Defaulting to no-op detect-local", "detectLocalMode", string(mode)) - return localDetectors, nil -} - -// cidrTuple takes a comma separated list of CIDRs and return a tuple (ipv4cidr,ipv6cidr) -// The returned tuple is guaranteed to have the order (ipv4,ipv6) and if no cidr from a family is found an -// empty string "" is inserted. -func cidrTuple(cidrList string) [2]string { - cidrs := [2]string{"", ""} - foundIPv4 := false - foundIPv6 := false - - for _, cidr := range strings.Split(cidrList, ",") { - if netutils.IsIPv6CIDRString(cidr) && !foundIPv6 { - cidrs[1] = cidr - foundIPv6 = true - } else if !foundIPv4 { - cidrs[0] = cidr - foundIPv4 = true - } - if foundIPv6 && foundIPv4 { - break - } - } - - return cidrs -} - -func getProxyMode(proxyMode proxyconfigapi.ProxyMode) proxyconfigapi.ProxyMode { - if proxyMode == "" { - klog.InfoS("Using iptables proxy") - return proxyconfigapi.ProxyModeIPTables - } else { - return proxyMode - } -} - -// cleanupAndExit remove iptables rules and ipset/ipvs rules -func cleanupAndExit() error { - execer := exec.New() - - // cleanup IPv6 and IPv4 iptables rules, regardless of current configuration - ipts := []utiliptables.Interface{ - utiliptables.New(execer, utiliptables.ProtocolIPv4), - utiliptables.New(execer, utiliptables.ProtocolIPv6), - } - - ipsetInterface := utilipset.New(execer) - ipvsInterface := utilipvs.New() - - var encounteredError bool - for _, ipt := range ipts { - encounteredError = iptables.CleanupLeftovers(ipt) || encounteredError - encounteredError = ipvs.CleanupLeftovers(ipvsInterface, ipt, ipsetInterface) || encounteredError - } - if encounteredError { - return errors.New("encountered an error while tearing down rules") - } - - return nil -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/server_windows.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/server_windows.go deleted file mode 100644 index 6336fa72cc00..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/server_windows.go +++ /dev/null @@ -1,172 +0,0 @@ -//go:build windows -// +build windows - -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package app does all of the work necessary to configure and run a -// Kubernetes app process. -package app - -import ( - "errors" - "fmt" - "net" - goruntime "runtime" - "strconv" - - // Enable pprof HTTP handlers. - _ "net/http/pprof" - - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/tools/events" - "k8s.io/component-base/configz" - "k8s.io/component-base/metrics" - nodeutil "k8s.io/component-helpers/node/util" - "k8s.io/klog/v2" - "k8s.io/kubernetes/pkg/proxy" - proxyconfigapi "k8s.io/kubernetes/pkg/proxy/apis/config" - proxyconfigscheme "k8s.io/kubernetes/pkg/proxy/apis/config/scheme" - "k8s.io/kubernetes/pkg/proxy/healthcheck" - "k8s.io/kubernetes/pkg/proxy/winkernel" -) - -// NewProxyServer returns a new ProxyServer. -func NewProxyServer(o *Options) (*ProxyServer, error) { - return newProxyServer(o.config, o.master) -} - -func newProxyServer(config *proxyconfigapi.KubeProxyConfiguration, master string) (*ProxyServer, error) { - if config == nil { - return nil, errors.New("config is required") - } - - if c, err := configz.New(proxyconfigapi.GroupName); err == nil { - c.Set(config) - } else { - return nil, fmt.Errorf("unable to register configz: %s", err) - } - - if len(config.ShowHiddenMetricsForVersion) > 0 { - metrics.SetShowHidden() - } - - client, eventClient, err := createClients(config.ClientConnection, master) - if err != nil { - return nil, err - } - - // Create event recorder - hostname, err := nodeutil.GetHostname(config.HostnameOverride) - if err != nil { - return nil, err - } - nodeIP := detectNodeIP(client, hostname, config.BindAddress) - klog.InfoS("Detected node IP", "IP", nodeIP.String()) - - eventBroadcaster := events.NewBroadcaster(&events.EventSinkImpl{Interface: client.EventsV1()}) - recorder := eventBroadcaster.NewRecorder(proxyconfigscheme.Scheme, "kube-proxy") - - nodeRef := &v1.ObjectReference{ - Kind: "Node", - Name: hostname, - UID: types.UID(hostname), - Namespace: "", - } - - var healthzServer healthcheck.ProxierHealthUpdater - var healthzPort int - if len(config.HealthzBindAddress) > 0 { - healthzServer = healthcheck.NewProxierHealthServer(config.HealthzBindAddress, 2*config.IPTables.SyncPeriod.Duration, recorder, nodeRef) - _, port, _ := net.SplitHostPort(config.HealthzBindAddress) - healthzPort, _ = strconv.Atoi(port) - } - - // Check if Kernel Space can be used. - canUseWinKernelProxy, err := winkernel.CanUseWinKernelProxier(winkernel.WindowsKernelCompatTester{}) - if !canUseWinKernelProxy && err != nil { - return nil, err - } - - var proxier proxy.Provider - proxyMode := proxyconfigapi.ProxyModeKernelspace - dualStackMode := getDualStackMode(config.Winkernel.NetworkName, winkernel.DualStackCompatTester{}) - if dualStackMode { - klog.InfoS("Creating dualStackProxier for Windows kernel.") - - proxier, err = winkernel.NewDualStackProxier( - config.IPTables.SyncPeriod.Duration, - config.IPTables.MinSyncPeriod.Duration, - config.IPTables.MasqueradeAll, - int(*config.IPTables.MasqueradeBit), - config.ClusterCIDR, - hostname, - nodeIPTuple(config.BindAddress), - recorder, - healthzServer, - config.Winkernel, - healthzPort, - ) - } else { - proxier, err = winkernel.NewProxier( - config.IPTables.SyncPeriod.Duration, - config.IPTables.MinSyncPeriod.Duration, - config.IPTables.MasqueradeAll, - int(*config.IPTables.MasqueradeBit), - config.ClusterCIDR, - hostname, - nodeIP, - recorder, - healthzServer, - config.Winkernel, - healthzPort, - ) - } - if err != nil { - return nil, fmt.Errorf("unable to create proxier: %v", err) - } - winkernel.RegisterMetrics() - - return &ProxyServer{ - Client: client, - EventClient: eventClient, - Proxier: proxier, - Broadcaster: eventBroadcaster, - Recorder: recorder, - ProxyMode: proxyMode, - NodeRef: nodeRef, - MetricsBindAddress: config.MetricsBindAddress, - BindAddressHardFail: config.BindAddressHardFail, - EnableProfiling: config.EnableProfiling, - OOMScoreAdj: config.OOMScoreAdj, - ConfigSyncPeriod: config.ConfigSyncPeriod.Duration, - HealthzServer: healthzServer, - }, nil -} - -func getDualStackMode(networkname string, compatTester winkernel.StackCompatTester) bool { - return compatTester.DualStackCompatible(networkname) -} - -func detectNumCPU() int { - return goruntime.NumCPU() -} - -// cleanupAndExit cleans up after a previous proxy run -func cleanupAndExit() error { - return errors.New("--cleanup-and-exit is not implemented on Windows") -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/server.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/server.go index 4c4fb27b2448..742524e325f5 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/server.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/server.go @@ -92,7 +92,6 @@ import ( "k8s.io/kubernetes/pkg/kubelet/certificate/bootstrap" "k8s.io/kubernetes/pkg/kubelet/cm" "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology" - "k8s.io/kubernetes/pkg/kubelet/cm/cpuset" "k8s.io/kubernetes/pkg/kubelet/config" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/eviction" @@ -108,6 +107,7 @@ import ( "k8s.io/kubernetes/pkg/util/rlimit" "k8s.io/kubernetes/pkg/volume/util/hostutil" "k8s.io/kubernetes/pkg/volume/util/subpath" + "k8s.io/utils/cpuset" "k8s.io/utils/exec" netutils "k8s.io/utils/net" ) @@ -256,7 +256,7 @@ is checked every 20 seconds (also configurable with a flag).`, config.StaticPodURLHeader[k] = []string{""} } // log the kubelet's config for inspection - klog.V(5).InfoS("KubeletConfiguration", "configuration", config) + klog.V(5).InfoS("KubeletConfiguration", "configuration", klog.Format(config)) // set up signal context for kubelet shutdown ctx := genericapiserver.SetupSignalContext() @@ -1061,6 +1061,12 @@ func InitializeTLS(kf *options.KubeletFlags, kc *kubeletconfiginternal.KubeletCo return nil, err } + if minTLSVersion == tls.VersionTLS13 { + if len(tlsCipherSuites) != 0 { + klog.InfoS("Warning: TLS 1.3 cipher suites are not configurable, ignoring --tls-cipher-suites") + } + } + tlsOptions := &server.TLSOptions{ Config: &tls.Config{ MinVersion: minTLSVersion, @@ -1181,9 +1187,7 @@ func startKubelet(k kubelet.Bootstrap, podCfg *config.PodConfig, kubeCfg *kubele if kubeCfg.ReadOnlyPort > 0 { go k.ListenAndServeReadOnly(netutils.ParseIPSloppy(kubeCfg.Address), uint(kubeCfg.ReadOnlyPort)) } - if utilfeature.DefaultFeatureGate.Enabled(features.KubeletPodResources) { - go k.ListenAndServePodResources() - } + go k.ListenAndServePodResources() } func createAndInitKubelet(kubeServer *options.KubeletServer, diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/validation/validation.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/validation/validation.go index 14c287c39634..8c10a956f134 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/validation/validation.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/validation/validation.go @@ -280,7 +280,7 @@ func ValidateControllerRevisionUpdate(newHistory, oldHistory *apps.ControllerRev // ValidateDaemonSet tests if required fields in the DaemonSet are set. func ValidateDaemonSet(ds *apps.DaemonSet, opts apivalidation.PodValidationOptions) field.ErrorList { allErrs := apivalidation.ValidateObjectMeta(&ds.ObjectMeta, true, ValidateDaemonSetName, field.NewPath("metadata")) - allErrs = append(allErrs, ValidateDaemonSetSpec(&ds.Spec, field.NewPath("spec"), opts)...) + allErrs = append(allErrs, ValidateDaemonSetSpec(&ds.Spec, nil, field.NewPath("spec"), opts)...) return allErrs } @@ -288,7 +288,7 @@ func ValidateDaemonSet(ds *apps.DaemonSet, opts apivalidation.PodValidationOptio func ValidateDaemonSetUpdate(ds, oldDS *apps.DaemonSet, opts apivalidation.PodValidationOptions) field.ErrorList { allErrs := apivalidation.ValidateObjectMetaUpdate(&ds.ObjectMeta, &oldDS.ObjectMeta, field.NewPath("metadata")) allErrs = append(allErrs, ValidateDaemonSetSpecUpdate(&ds.Spec, &oldDS.Spec, field.NewPath("spec"))...) - allErrs = append(allErrs, ValidateDaemonSetSpec(&ds.Spec, field.NewPath("spec"), opts)...) + allErrs = append(allErrs, ValidateDaemonSetSpec(&ds.Spec, &oldDS.Spec, field.NewPath("spec"), opts)...) return allErrs } @@ -344,7 +344,7 @@ func ValidateDaemonSetStatusUpdate(ds, oldDS *apps.DaemonSet) field.ErrorList { } // ValidateDaemonSetSpec tests if required fields in the DaemonSetSpec are set. -func ValidateDaemonSetSpec(spec *apps.DaemonSetSpec, fldPath *field.Path, opts apivalidation.PodValidationOptions) field.ErrorList { +func ValidateDaemonSetSpec(spec, oldSpec *apps.DaemonSetSpec, fldPath *field.Path, opts apivalidation.PodValidationOptions) field.ErrorList { allErrs := field.ErrorList{} labelSelectorValidationOpts := unversionedvalidation.LabelSelectorValidationOptions{AllowInvalidLabelValueInSelector: opts.AllowInvalidLabelValueInSelector} @@ -359,8 +359,12 @@ func ValidateDaemonSetSpec(spec *apps.DaemonSetSpec, fldPath *field.Path, opts a } allErrs = append(allErrs, apivalidation.ValidatePodTemplateSpec(&spec.Template, fldPath.Child("template"), opts)...) - // Daemons typically run on more than one node, so mark Read-Write persistent disks as invalid. - allErrs = append(allErrs, apivalidation.ValidateReadOnlyPersistentDisks(spec.Template.Spec.Volumes, fldPath.Child("template", "spec", "volumes"))...) + // get rid of apivalidation.ValidateReadOnlyPersistentDisks,stop passing oldSpec to this function + var oldVols []api.Volume + if oldSpec != nil { + oldVols = oldSpec.Template.Spec.Volumes // +k8s:verify-mutation:reason=clone + } + allErrs = append(allErrs, apivalidation.ValidateReadOnlyPersistentDisks(spec.Template.Spec.Volumes, oldVols, fldPath.Child("template", "spec", "volumes"))...) // RestartPolicy has already been first-order validated as per ValidatePodTemplateSpec(). if spec.Template.Spec.RestartPolicy != api.RestartPolicyAlways { allErrs = append(allErrs, field.NotSupported(fldPath.Child("template", "spec", "restartPolicy"), spec.Template.Spec.RestartPolicy, []string{string(api.RestartPolicyAlways)})) @@ -544,7 +548,7 @@ func ValidateRollback(rollback *apps.RollbackConfig, fldPath *field.Path) field. } // ValidateDeploymentSpec validates given deployment spec. -func ValidateDeploymentSpec(spec *apps.DeploymentSpec, fldPath *field.Path, opts apivalidation.PodValidationOptions) field.ErrorList { +func ValidateDeploymentSpec(spec, oldSpec *apps.DeploymentSpec, fldPath *field.Path, opts apivalidation.PodValidationOptions) field.ErrorList { allErrs := field.ErrorList{} allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(spec.Replicas), fldPath.Child("replicas"))...) @@ -562,7 +566,12 @@ func ValidateDeploymentSpec(spec *apps.DeploymentSpec, fldPath *field.Path, opts if err != nil { allErrs = append(allErrs, field.Invalid(fldPath.Child("selector"), spec.Selector, "invalid label selector")) } else { - allErrs = append(allErrs, ValidatePodTemplateSpecForReplicaSet(&spec.Template, selector, spec.Replicas, fldPath.Child("template"), opts)...) + // oldSpec is not empty, pass oldSpec.template + var oldTemplate *api.PodTemplateSpec + if oldSpec != nil { + oldTemplate = &oldSpec.Template // +k8s:verify-mutation:reason=clone + } + allErrs = append(allErrs, ValidatePodTemplateSpecForReplicaSet(&spec.Template, oldTemplate, selector, spec.Replicas, fldPath.Child("template"), opts)...) } allErrs = append(allErrs, ValidateDeploymentStrategy(&spec.Strategy, fldPath.Child("strategy"))...) @@ -614,7 +623,7 @@ func ValidateDeploymentStatus(status *apps.DeploymentStatus, fldPath *field.Path // ValidateDeploymentUpdate tests if an update to a Deployment is valid. func ValidateDeploymentUpdate(update, old *apps.Deployment, opts apivalidation.PodValidationOptions) field.ErrorList { allErrs := apivalidation.ValidateObjectMetaUpdate(&update.ObjectMeta, &old.ObjectMeta, field.NewPath("metadata")) - allErrs = append(allErrs, ValidateDeploymentSpec(&update.Spec, field.NewPath("spec"), opts)...) + allErrs = append(allErrs, ValidateDeploymentSpec(&update.Spec, &old.Spec, field.NewPath("spec"), opts)...) return allErrs } @@ -637,7 +646,7 @@ func ValidateDeploymentStatusUpdate(update, old *apps.Deployment) field.ErrorLis // ValidateDeployment validates a given Deployment. func ValidateDeployment(obj *apps.Deployment, opts apivalidation.PodValidationOptions) field.ErrorList { allErrs := apivalidation.ValidateObjectMeta(&obj.ObjectMeta, true, ValidateDeploymentName, field.NewPath("metadata")) - allErrs = append(allErrs, ValidateDeploymentSpec(&obj.Spec, field.NewPath("spec"), opts)...) + allErrs = append(allErrs, ValidateDeploymentSpec(&obj.Spec, nil, field.NewPath("spec"), opts)...) return allErrs } @@ -660,7 +669,7 @@ var ValidateReplicaSetName = apimachineryvalidation.NameIsDNSSubdomain // ValidateReplicaSet tests if required fields in the ReplicaSet are set. func ValidateReplicaSet(rs *apps.ReplicaSet, opts apivalidation.PodValidationOptions) field.ErrorList { allErrs := apivalidation.ValidateObjectMeta(&rs.ObjectMeta, true, ValidateReplicaSetName, field.NewPath("metadata")) - allErrs = append(allErrs, ValidateReplicaSetSpec(&rs.Spec, field.NewPath("spec"), opts)...) + allErrs = append(allErrs, ValidateReplicaSetSpec(&rs.Spec, nil, field.NewPath("spec"), opts)...) return allErrs } @@ -668,7 +677,7 @@ func ValidateReplicaSet(rs *apps.ReplicaSet, opts apivalidation.PodValidationOpt func ValidateReplicaSetUpdate(rs, oldRs *apps.ReplicaSet, opts apivalidation.PodValidationOptions) field.ErrorList { allErrs := field.ErrorList{} allErrs = append(allErrs, apivalidation.ValidateObjectMetaUpdate(&rs.ObjectMeta, &oldRs.ObjectMeta, field.NewPath("metadata"))...) - allErrs = append(allErrs, ValidateReplicaSetSpec(&rs.Spec, field.NewPath("spec"), opts)...) + allErrs = append(allErrs, ValidateReplicaSetSpec(&rs.Spec, &oldRs.Spec, field.NewPath("spec"), opts)...) return allErrs } @@ -705,7 +714,7 @@ func ValidateReplicaSetStatus(status apps.ReplicaSetStatus, fldPath *field.Path) } // ValidateReplicaSetSpec tests if required fields in the ReplicaSet spec are set. -func ValidateReplicaSetSpec(spec *apps.ReplicaSetSpec, fldPath *field.Path, opts apivalidation.PodValidationOptions) field.ErrorList { +func ValidateReplicaSetSpec(spec, oldSpec *apps.ReplicaSetSpec, fldPath *field.Path, opts apivalidation.PodValidationOptions) field.ErrorList { allErrs := field.ErrorList{} allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(spec.Replicas), fldPath.Child("replicas"))...) @@ -725,13 +734,18 @@ func ValidateReplicaSetSpec(spec *apps.ReplicaSetSpec, fldPath *field.Path, opts if err != nil { allErrs = append(allErrs, field.Invalid(fldPath.Child("selector"), spec.Selector, "invalid label selector")) } else { - allErrs = append(allErrs, ValidatePodTemplateSpecForReplicaSet(&spec.Template, selector, spec.Replicas, fldPath.Child("template"), opts)...) + // oldSpec is not empty, pass oldSpec.template. + var oldTemplate *api.PodTemplateSpec + if oldSpec != nil { + oldTemplate = &oldSpec.Template // +k8s:verify-mutation:reason=clone + } + allErrs = append(allErrs, ValidatePodTemplateSpecForReplicaSet(&spec.Template, oldTemplate, selector, spec.Replicas, fldPath.Child("template"), opts)...) } return allErrs } // ValidatePodTemplateSpecForReplicaSet validates the given template and ensures that it is in accordance with the desired selector and replicas. -func ValidatePodTemplateSpecForReplicaSet(template *api.PodTemplateSpec, selector labels.Selector, replicas int32, fldPath *field.Path, opts apivalidation.PodValidationOptions) field.ErrorList { +func ValidatePodTemplateSpecForReplicaSet(template, oldTemplate *api.PodTemplateSpec, selector labels.Selector, replicas int32, fldPath *field.Path, opts apivalidation.PodValidationOptions) field.ErrorList { allErrs := field.ErrorList{} if template == nil { allErrs = append(allErrs, field.Required(fldPath, "")) @@ -744,8 +758,14 @@ func ValidatePodTemplateSpecForReplicaSet(template *api.PodTemplateSpec, selecto } } allErrs = append(allErrs, apivalidation.ValidatePodTemplateSpec(template, fldPath, opts)...) + // Daemons run on more than one node, Cancel verification of read and write volumes. + // get rid of apivalidation.ValidateReadOnlyPersistentDisks,stop passing oldTemplate to this function + var oldVols []api.Volume + if oldTemplate != nil { + oldVols = oldTemplate.Spec.Volumes // +k8s:verify-mutation:reason=clone + } if replicas > 1 { - allErrs = append(allErrs, apivalidation.ValidateReadOnlyPersistentDisks(template.Spec.Volumes, fldPath.Child("spec", "volumes"))...) + allErrs = append(allErrs, apivalidation.ValidateReadOnlyPersistentDisks(template.Spec.Volumes, oldVols, fldPath.Child("spec", "volumes"))...) } // RestartPolicy has already been first-order validated as per ValidatePodTemplateSpec(). if template.Spec.RestartPolicy != api.RestartPolicyAlways { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/OWNERS b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/OWNERS index ba7b77a5a78b..ab572136eace 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/OWNERS +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/OWNERS @@ -2,7 +2,6 @@ reviewers: - thockin - - lavalamp - smarterclayton - wojtek-t - deads2k diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/helper/helpers.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/helper/helpers.go index 4cdbae980026..77464b25766e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/helper/helpers.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/helper/helpers.go @@ -453,41 +453,6 @@ func PersistentVolumeClaimHasClass(claim *core.PersistentVolumeClaim) bool { return false } -func toResourceNames(resources core.ResourceList) []core.ResourceName { - result := []core.ResourceName{} - for resourceName := range resources { - result = append(result, resourceName) - } - return result -} - -func toSet(resourceNames []core.ResourceName) sets.String { - result := sets.NewString() - for _, resourceName := range resourceNames { - result.Insert(string(resourceName)) - } - return result -} - -// toContainerResourcesSet returns a set of resources names in container resource requirements -func toContainerResourcesSet(ctr *core.Container) sets.String { - resourceNames := toResourceNames(ctr.Resources.Requests) - resourceNames = append(resourceNames, toResourceNames(ctr.Resources.Limits)...) - return toSet(resourceNames) -} - -// ToPodResourcesSet returns a set of resource names in all containers in a pod. -func ToPodResourcesSet(podSpec *core.PodSpec) sets.String { - result := sets.NewString() - for i := range podSpec.InitContainers { - result = result.Union(toContainerResourcesSet(&podSpec.InitContainers[i])) - } - for i := range podSpec.Containers { - result = result.Union(toContainerResourcesSet(&podSpec.Containers[i])) - } - return result -} - // GetDeletionCostFromPodAnnotations returns the integer value of pod-deletion-cost. Returns 0 // if not set or the value is invalid. func GetDeletionCostFromPodAnnotations(annotations map[string]string) (int32, error) { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/install/OWNERS b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/install/OWNERS index 215733b59788..1a5f757675f9 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/install/OWNERS +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/install/OWNERS @@ -1,7 +1,6 @@ # See the OWNERS docs at https://go.k8s.io/owners reviewers: - - lavalamp - smarterclayton - deads2k - caesarxuchao diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/types.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/types.go index d8f657b74230..04ffd250b61e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/types.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/types.go @@ -2037,7 +2037,8 @@ type SecretEnvSource struct { // HTTPHeader describes a custom header to be used in HTTP probes type HTTPHeader struct { - // The header field name + // The header field name. + // This will be canonicalized upon output, so case-variant names will be understood as the same header. Name string // The header field value Value string @@ -4082,10 +4083,9 @@ type ServiceSpec struct { // This feature depends on whether the underlying cloud-provider supports specifying // the loadBalancerIP when a load balancer is created. // This field will be ignored if the cloud-provider does not support the feature. - // Deprecated: This field was under-specified and its meaning varies across implementations, - // and it cannot support dual-stack. - // As of Kubernetes v1.24, users are encouraged to use implementation-specific annotations when available. - // This field may be removed in a future API version. + // Deprecated: This field was under-specified and its meaning varies across implementations. + // Using it is non-portable and it may not support dual-stack. + // Users are encouraged to use implementation-specific annotations when available. // +optional LoadBalancerIP string @@ -5802,12 +5802,9 @@ type WindowsSecurityContextOptions struct { RunAsUserName *string // HostProcess determines if a container should be run as a 'Host Process' container. - // This field is alpha-level and will only be honored by components that enable the - // WindowsHostProcessContainers feature flag. Setting this field without the feature - // flag will result in errors when validating the Pod. All of a Pod's containers must - // have the same effective HostProcess value (it is not allowed to have a mix of HostProcess - // containers and non-HostProcess containers). In addition, if HostProcess is true - // then HostNetwork must also be set to true. + // All of a Pod's containers must have the same effective HostProcess value + // (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + // In addition, if HostProcess is true then HostNetwork must also be set to true. // +optional HostProcess *bool } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/v1/OWNERS b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/v1/OWNERS index dfcc2e714b4c..a47166dcef20 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/v1/OWNERS +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/v1/OWNERS @@ -2,7 +2,6 @@ reviewers: - thockin - - lavalamp - smarterclayton - wojtek-t - deads2k diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/v1/conversion.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/v1/conversion.go index dd92428cdf3b..6793616f3a89 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/v1/conversion.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/v1/conversion.go @@ -42,6 +42,7 @@ func addConversionFuncs(scheme *runtime.Scheme) error { "spec.restartPolicy", "spec.schedulerName", "spec.serviceAccountName", + "spec.hostNetwork", "status.phase", "status.podIP", "status.podIPs", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/v1/defaults.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/v1/defaults.go index 433ae39b51f5..51337fe169cb 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/v1/defaults.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/v1/defaults.go @@ -118,8 +118,8 @@ func SetDefaults_Service(obj *v1.Service) { if sp.Protocol == "" { sp.Protocol = v1.ProtocolTCP } - if sp.TargetPort == intstr.FromInt(0) || sp.TargetPort == intstr.FromString("") { - sp.TargetPort = intstr.FromInt(int(sp.Port)) + if sp.TargetPort == intstr.FromInt32(0) || sp.TargetPort == intstr.FromString("") { + sp.TargetPort = intstr.FromInt32(sp.Port) } } // Defaults ExternalTrafficPolicy field for NodePort / LoadBalancer service @@ -199,6 +199,11 @@ func SetDefaults_Pod(obj *v1.Pod) { enableServiceLinks := v1.DefaultEnableServiceLinks obj.Spec.EnableServiceLinks = &enableServiceLinks } + + if obj.Spec.HostNetwork { + defaultHostNetworkPorts(&obj.Spec.Containers) + defaultHostNetworkPorts(&obj.Spec.InitContainers) + } } func SetDefaults_PodSpec(obj *v1.PodSpec) { // New fields added here will break upgrade tests: @@ -211,9 +216,11 @@ func SetDefaults_PodSpec(obj *v1.PodSpec) { if obj.RestartPolicy == "" { obj.RestartPolicy = v1.RestartPolicyAlways } - if obj.HostNetwork { - defaultHostNetworkPorts(&obj.Containers) - defaultHostNetworkPorts(&obj.InitContainers) + if utilfeature.DefaultFeatureGate.Enabled(features.DefaultHostNetworkHostPortsInPodTemplates) { + if obj.HostNetwork { + defaultHostNetworkPorts(&obj.Containers) + defaultHostNetworkPorts(&obj.InitContainers) + } } if obj.SecurityContext == nil { obj.SecurityContext = &v1.PodSecurityContext{} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/v1/helper/helpers.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/v1/helper/helpers.go index 34aca4f2c528..932e3ac6921e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/v1/helper/helpers.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/v1/helper/helpers.go @@ -315,12 +315,6 @@ func AddOrUpdateTolerationInPodSpec(spec *v1.PodSpec, toleration *v1.Toleration) return true } -// AddOrUpdateTolerationInPod tries to add a toleration to the pod's toleration list. -// Returns true if something was updated, false otherwise. -func AddOrUpdateTolerationInPod(pod *v1.Pod, toleration *v1.Toleration) bool { - return AddOrUpdateTolerationInPodSpec(&pod.Spec, toleration) -} - // GetMatchingTolerations returns true and list of Tolerations matching all Taints if all are tolerated, or false otherwise. func GetMatchingTolerations(taints []v1.Taint, tolerations []v1.Toleration) (bool, []v1.Toleration) { if len(taints) == 0 { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/validation/OWNERS b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/validation/OWNERS index 054fa14fa74c..30b589bd6d8a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/validation/OWNERS +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/validation/OWNERS @@ -2,7 +2,6 @@ reviewers: - thockin - - lavalamp - smarterclayton - wojtek-t - deads2k diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/validation/validation.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/validation/validation.go index 465c92380a94..6502d9c210a3 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/validation/validation.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/validation/validation.go @@ -2970,7 +2970,7 @@ func validateTCPSocketAction(tcp *core.TCPSocketAction, fldPath *field.Path) fie return ValidatePortNumOrName(tcp.Port, fldPath.Child("port")) } func validateGRPCAction(grpc *core.GRPCAction, fldPath *field.Path) field.ErrorList { - return ValidatePortNumOrName(intstr.FromInt(int(grpc.Port)), fldPath.Child("port")) + return ValidatePortNumOrName(intstr.FromInt32(grpc.Port), fldPath.Child("port")) } func validateHandler(handler commonHandler, fldPath *field.Path) field.ErrorList { numHandlers := 0 @@ -3399,14 +3399,10 @@ const ( // restrictions in Linux libc name resolution handling. // Max number of DNS name servers. MaxDNSNameservers = 3 - // Expanded max number of domains in the search path list. - MaxDNSSearchPathsExpanded = 32 - // Expanded max number of characters in the search path. - MaxDNSSearchListCharsExpanded = 2048 // Max number of domains in the search path list. - MaxDNSSearchPathsLegacy = 6 - // Max number of characters in the search path list. - MaxDNSSearchListCharsLegacy = 256 + MaxDNSSearchPaths = 32 + // Max number of characters in the search path. + MaxDNSSearchListChars = 2048 ) func validateReadinessGates(readinessGates []core.PodReadinessGate, fldPath *field.Path) field.ErrorList { @@ -3455,16 +3451,12 @@ func validatePodDNSConfig(dnsConfig *core.PodDNSConfig, dnsPolicy *core.DNSPolic } } // Validate searches. - maxDNSSearchPaths, maxDNSSearchListChars := MaxDNSSearchPathsLegacy, MaxDNSSearchListCharsLegacy - if opts.AllowExpandedDNSConfig { - maxDNSSearchPaths, maxDNSSearchListChars = MaxDNSSearchPathsExpanded, MaxDNSSearchListCharsExpanded - } - if len(dnsConfig.Searches) > maxDNSSearchPaths { - allErrs = append(allErrs, field.Invalid(fldPath.Child("searches"), dnsConfig.Searches, fmt.Sprintf("must not have more than %v search paths", maxDNSSearchPaths))) + if len(dnsConfig.Searches) > MaxDNSSearchPaths { + allErrs = append(allErrs, field.Invalid(fldPath.Child("searches"), dnsConfig.Searches, fmt.Sprintf("must not have more than %v search paths", MaxDNSSearchPaths))) } // Include the space between search paths. - if len(strings.Join(dnsConfig.Searches, " ")) > maxDNSSearchListChars { - allErrs = append(allErrs, field.Invalid(fldPath.Child("searches"), dnsConfig.Searches, fmt.Sprintf("must not have more than %v characters (including spaces) in the search list", maxDNSSearchListChars))) + if len(strings.Join(dnsConfig.Searches, " ")) > MaxDNSSearchListChars { + allErrs = append(allErrs, field.Invalid(fldPath.Child("searches"), dnsConfig.Searches, fmt.Sprintf("must not have more than %v characters (including spaces) in the search list", MaxDNSSearchListChars))) } for i, search := range dnsConfig.Searches { // it is fine to have a trailing dot @@ -3481,15 +3473,35 @@ func validatePodDNSConfig(dnsConfig *core.PodDNSConfig, dnsPolicy *core.DNSPolic return allErrs } -func validateHostNetwork(hostNetwork bool, containers []core.Container, fldPath *field.Path) field.ErrorList { +// validatePodHostNetworkDeps checks fields which depend on whether HostNetwork is +// true or not. It should be called on all PodSpecs, but opts can change what +// is enforce. E.g. opts.ResourceIsPod should only be set when called in the +// context of a Pod, and not on PodSpecs which are embedded in other resources +// (e.g. Deployments). +func validatePodHostNetworkDeps(spec *core.PodSpec, fldPath *field.Path, opts PodValidationOptions) field.ErrorList { + // For we keep `.HostNetwork` in .SecurityContext on the internal + // version of Pod. + hostNetwork := false + if spec.SecurityContext != nil { + hostNetwork = spec.SecurityContext.HostNetwork + } + allErrors := field.ErrorList{} + if hostNetwork { - for i, container := range containers { + fldPath := fldPath.Child("containers") + for i, container := range spec.Containers { portsPath := fldPath.Index(i).Child("ports") for i, port := range container.Ports { idxPath := portsPath.Index(i) - if port.HostPort != port.ContainerPort { - allErrors = append(allErrors, field.Invalid(idxPath.Child("containerPort"), port.ContainerPort, "must match `hostPort` when `hostNetwork` is true")) + // At this point, we know that HostNetwork is true. If this + // PodSpec is in a Pod (opts.ResourceIsPod), then HostPort must + // be the same value as ContainerPort. If this PodSpec is in + // some other resource (e.g. Deployment) we allow 0 (i.e. + // unspecified) because it will be defaulted when the Pod is + // ultimately created, but we do not allow any other values. + if hp, cp := port.HostPort, port.ContainerPort; (opts.ResourceIsPod || hp != 0) && hp != cp { + allErrors = append(allErrors, field.Invalid(idxPath.Child("hostPort"), port.HostPort, "must match `containerPort` when `hostNetwork` is true")) } } } @@ -3688,25 +3700,27 @@ type PodValidationOptions struct { AllowInvalidLabelValueInSelector bool // Allow pod spec to use non-integer multiple of huge page unit size AllowIndivisibleHugePagesValues bool - // Allow more DNSSearchPaths and longer DNSSearchListChars - AllowExpandedDNSConfig bool // Allow invalid topologySpreadConstraint labelSelector for backward compatibility AllowInvalidTopologySpreadConstraintLabelSelector bool // Allow node selector additions for gated pods. AllowMutableNodeSelectorAndNodeAffinity bool + // The top-level resource being validated is a Pod, not just a PodSpec + // embedded in some other resource. + ResourceIsPod bool } // validatePodMetadataAndSpec tests if required fields in the pod.metadata and pod.spec are set, // and is called by ValidatePodCreate and ValidatePodUpdate. func validatePodMetadataAndSpec(pod *core.Pod, opts PodValidationOptions) field.ErrorList { - fldPath := field.NewPath("metadata") - allErrs := ValidateObjectMeta(&pod.ObjectMeta, true, ValidatePodName, fldPath) - allErrs = append(allErrs, ValidatePodSpecificAnnotations(pod.ObjectMeta.Annotations, &pod.Spec, fldPath.Child("annotations"), opts)...) - allErrs = append(allErrs, ValidatePodSpec(&pod.Spec, &pod.ObjectMeta, field.NewPath("spec"), opts)...) + metaPath := field.NewPath("metadata") + specPath := field.NewPath("spec") + + allErrs := ValidateObjectMeta(&pod.ObjectMeta, true, ValidatePodName, metaPath) + allErrs = append(allErrs, ValidatePodSpecificAnnotations(pod.ObjectMeta.Annotations, &pod.Spec, metaPath.Child("annotations"), opts)...) + allErrs = append(allErrs, ValidatePodSpec(&pod.Spec, &pod.ObjectMeta, specPath, opts)...) // we do additional validation only pertinent for pods and not pod templates // this was done to preserve backwards compatibility - specPath := field.NewPath("spec") if pod.Spec.ServiceAccountName == "" { for vi, volume := range pod.Spec.Volumes { @@ -3790,10 +3804,11 @@ func ValidatePodSpec(spec *core.PodSpec, podMeta *metav1.ObjectMeta, fldPath *fi allErrs = append(allErrs, validateContainers(spec.Containers, vols, podClaimNames, fldPath.Child("containers"), opts)...) allErrs = append(allErrs, validateInitContainers(spec.InitContainers, spec.Containers, vols, podClaimNames, fldPath.Child("initContainers"), opts)...) allErrs = append(allErrs, validateEphemeralContainers(spec.EphemeralContainers, spec.Containers, spec.InitContainers, vols, podClaimNames, fldPath.Child("ephemeralContainers"), opts)...) + allErrs = append(allErrs, validatePodHostNetworkDeps(spec, fldPath, opts)...) allErrs = append(allErrs, validateRestartPolicy(&spec.RestartPolicy, fldPath.Child("restartPolicy"))...) allErrs = append(allErrs, validateDNSPolicy(&spec.DNSPolicy, fldPath.Child("dnsPolicy"))...) allErrs = append(allErrs, unversionedvalidation.ValidateLabels(spec.NodeSelector, fldPath.Child("nodeSelector"))...) - allErrs = append(allErrs, ValidatePodSecurityContext(spec.SecurityContext, spec, fldPath, fldPath.Child("securityContext"), opts)...) + allErrs = append(allErrs, validatePodSpecSecurityContext(spec.SecurityContext, spec, fldPath, fldPath.Child("securityContext"), opts)...) allErrs = append(allErrs, validateImagePullSecrets(spec.ImagePullSecrets, fldPath.Child("imagePullSecrets"))...) allErrs = append(allErrs, validateAffinity(spec.Affinity, opts, fldPath.Child("affinity"))...) allErrs = append(allErrs, validatePodDNSConfig(spec.DNSConfig, &spec.DNSPolicy, fldPath.Child("dnsConfig"), opts)...) @@ -4396,12 +4411,13 @@ func validateSysctls(sysctls []core.Sysctl, fldPath *field.Path) field.ErrorList return allErrs } -// ValidatePodSecurityContext test that the specified PodSecurityContext has valid data. -func ValidatePodSecurityContext(securityContext *core.PodSecurityContext, spec *core.PodSpec, specPath, fldPath *field.Path, opts PodValidationOptions) field.ErrorList { +// validatePodSpecSecurityContext verifies the SecurityContext of a PodSpec, +// whether that is defined in a Pod or in an embedded PodSpec (e.g. a +// Deployment's pod template). +func validatePodSpecSecurityContext(securityContext *core.PodSecurityContext, spec *core.PodSpec, specPath, fldPath *field.Path, opts PodValidationOptions) field.ErrorList { allErrs := field.ErrorList{} if securityContext != nil { - allErrs = append(allErrs, validateHostNetwork(securityContext.HostNetwork, spec.Containers, specPath.Child("containers"))...) if securityContext.FSGroup != nil { for _, msg := range validation.IsValidGroupID(*securityContext.FSGroup) { allErrs = append(allErrs, field.Invalid(fldPath.Child("fsGroup"), *(securityContext.FSGroup), msg)) @@ -4727,7 +4743,14 @@ func ValidatePodUpdate(newPod, oldPod *core.Pod, opts PodValidationOptions) fiel // already effectively nil, no change needed case mungedPodSpec.Affinity == nil && oldNodeAffinity != nil: mungedPodSpec.Affinity = &core.Affinity{NodeAffinity: oldNodeAffinity} // +k8s:verify-mutation:reason=clone + case mungedPodSpec.Affinity != nil && oldPod.Spec.Affinity == nil && + mungedPodSpec.Affinity.PodAntiAffinity == nil && mungedPodSpec.Affinity.PodAffinity == nil: + // We ensure no other fields are being changed, but the NodeAffinity. If that's the case, and the + // old pod's affinity is nil, we set the mungedPodSpec's affinity to nil. + mungedPodSpec.Affinity = nil // +k8s:verify-mutation:reason=clone default: + // The node affinity is being updated and the old pod Affinity is not nil. + // We set the mungedPodSpec's node affinity to the old pod's node affinity. mungedPodSpec.Affinity.NodeAffinity = oldNodeAffinity // +k8s:verify-mutation:reason=clone } } @@ -5238,14 +5261,14 @@ func ValidateServiceStatusUpdate(service, oldService *core.Service) field.ErrorL // ValidateReplicationController tests if required fields in the replication controller are set. func ValidateReplicationController(controller *core.ReplicationController, opts PodValidationOptions) field.ErrorList { allErrs := ValidateObjectMeta(&controller.ObjectMeta, true, ValidateReplicationControllerName, field.NewPath("metadata")) - allErrs = append(allErrs, ValidateReplicationControllerSpec(&controller.Spec, field.NewPath("spec"), opts)...) + allErrs = append(allErrs, ValidateReplicationControllerSpec(&controller.Spec, nil, field.NewPath("spec"), opts)...) return allErrs } // ValidateReplicationControllerUpdate tests if required fields in the replication controller are set. func ValidateReplicationControllerUpdate(controller, oldController *core.ReplicationController, opts PodValidationOptions) field.ErrorList { allErrs := ValidateObjectMetaUpdate(&controller.ObjectMeta, &oldController.ObjectMeta, field.NewPath("metadata")) - allErrs = append(allErrs, ValidateReplicationControllerSpec(&controller.Spec, field.NewPath("spec"), opts)...) + allErrs = append(allErrs, ValidateReplicationControllerSpec(&controller.Spec, &oldController.Spec, field.NewPath("spec"), opts)...) return allErrs } @@ -5290,7 +5313,7 @@ func ValidateNonEmptySelector(selectorMap map[string]string, fldPath *field.Path } // Validates the given template and ensures that it is in accordance with the desired selector and replicas. -func ValidatePodTemplateSpecForRC(template *core.PodTemplateSpec, selectorMap map[string]string, replicas int32, fldPath *field.Path, opts PodValidationOptions) field.ErrorList { +func ValidatePodTemplateSpecForRC(template, oldTemplate *core.PodTemplateSpec, selectorMap map[string]string, replicas int32, fldPath *field.Path, opts PodValidationOptions) field.ErrorList { allErrs := field.ErrorList{} if template == nil { allErrs = append(allErrs, field.Required(fldPath, "")) @@ -5304,8 +5327,13 @@ func ValidatePodTemplateSpecForRC(template *core.PodTemplateSpec, selectorMap ma } } allErrs = append(allErrs, ValidatePodTemplateSpec(template, fldPath, opts)...) + // get rid of apivalidation.ValidateReadOnlyPersistentDisks,stop passing oldTemplate to this function + var oldVols []core.Volume + if oldTemplate != nil { + oldVols = oldTemplate.Spec.Volumes // +k8s:verify-mutation:reason=clone + } if replicas > 1 { - allErrs = append(allErrs, ValidateReadOnlyPersistentDisks(template.Spec.Volumes, fldPath.Child("spec", "volumes"))...) + allErrs = append(allErrs, ValidateReadOnlyPersistentDisks(template.Spec.Volumes, oldVols, fldPath.Child("spec", "volumes"))...) } // RestartPolicy has already been first-order validated as per ValidatePodTemplateSpec(). if template.Spec.RestartPolicy != core.RestartPolicyAlways { @@ -5319,12 +5347,17 @@ func ValidatePodTemplateSpecForRC(template *core.PodTemplateSpec, selectorMap ma } // ValidateReplicationControllerSpec tests if required fields in the replication controller spec are set. -func ValidateReplicationControllerSpec(spec *core.ReplicationControllerSpec, fldPath *field.Path, opts PodValidationOptions) field.ErrorList { +func ValidateReplicationControllerSpec(spec, oldSpec *core.ReplicationControllerSpec, fldPath *field.Path, opts PodValidationOptions) field.ErrorList { allErrs := field.ErrorList{} allErrs = append(allErrs, ValidateNonnegativeField(int64(spec.MinReadySeconds), fldPath.Child("minReadySeconds"))...) allErrs = append(allErrs, ValidateNonEmptySelector(spec.Selector, fldPath.Child("selector"))...) allErrs = append(allErrs, ValidateNonnegativeField(int64(spec.Replicas), fldPath.Child("replicas"))...) - allErrs = append(allErrs, ValidatePodTemplateSpecForRC(spec.Template, spec.Selector, spec.Replicas, fldPath.Child("template"), opts)...) + // oldSpec is not empty, pass oldSpec.template. + var oldTemplate *core.PodTemplateSpec + if oldSpec != nil { + oldTemplate = oldSpec.Template // +k8s:verify-mutation:reason=clone + } + allErrs = append(allErrs, ValidatePodTemplateSpecForRC(spec.Template, oldTemplate, spec.Selector, spec.Replicas, fldPath.Child("template"), opts)...) return allErrs } @@ -5344,17 +5377,29 @@ func ValidatePodTemplateSpec(spec *core.PodTemplateSpec, fldPath *field.Path, op return allErrs } -func ValidateReadOnlyPersistentDisks(volumes []core.Volume, fldPath *field.Path) field.ErrorList { +// ValidateReadOnlyPersistentDisks stick this AFTER the short-circuit checks +func ValidateReadOnlyPersistentDisks(volumes, oldVolumes []core.Volume, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} + + if utilfeature.DefaultFeatureGate.Enabled(features.SkipReadOnlyValidationGCE) { + return field.ErrorList{} + } + + isWriteablePD := func(vol *core.Volume) bool { + return vol.GCEPersistentDisk != nil && !vol.GCEPersistentDisk.ReadOnly + } + + for i := range oldVolumes { + if isWriteablePD(&oldVolumes[i]) { + return field.ErrorList{} + } + } + for i := range volumes { - vol := &volumes[i] idxPath := fldPath.Index(i) - if vol.GCEPersistentDisk != nil { - if !vol.GCEPersistentDisk.ReadOnly { - allErrs = append(allErrs, field.Invalid(idxPath.Child("gcePersistentDisk", "readOnly"), false, "must be true for replicated pods > 1; GCE PD can only be mounted on multiple machines if it is read-only")) - } + if isWriteablePD(&volumes[i]) { + allErrs = append(allErrs, field.Invalid(idxPath.Child("gcePersistentDisk", "readOnly"), false, "must be true for replicated pods > 1; GCE PD can only be mounted on multiple machines if it is read-only")) } - // TODO: What to do for AWS? It doesn't support replicas } return allErrs } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/extensions/OWNERS b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/extensions/OWNERS index e559bf2aeeec..7911d9af4a1f 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/extensions/OWNERS +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/extensions/OWNERS @@ -2,7 +2,6 @@ reviewers: - thockin - - lavalamp - smarterclayton - wojtek-t - deads2k @@ -19,5 +18,6 @@ reviewers: - mwielgus - soltysh - dims + - jpbetz labels: - sig/apps diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/networking/types.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/networking/types.go index 6c9eaa7a4844..9ec17540baef 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/networking/types.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/networking/types.go @@ -35,11 +35,6 @@ type NetworkPolicy struct { // spec represents the specification of the desired behavior for this NetworkPolicy. // +optional Spec NetworkPolicySpec - - // status represents the current state of the NetworkPolicy. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - // +optional - Status NetworkPolicyStatus } // PolicyType describes the NetworkPolicy type @@ -201,42 +196,6 @@ type NetworkPolicyPeer struct { IPBlock *IPBlock } -// NetworkPolicyConditionType is the type for status conditions on -// a NetworkPolicy. This type should be used with the -// NetworkPolicyStatus.Conditions field. -type NetworkPolicyConditionType string - -const ( - // NetworkPolicyConditionStatusAccepted represents status of a Network Policy that could be properly parsed by - // the Network Policy provider and will be implemented in the cluster - NetworkPolicyConditionStatusAccepted NetworkPolicyConditionType = "Accepted" - - // NetworkPolicyConditionStatusPartialFailure represents status of a Network Policy that could be partially - // parsed by the Network Policy provider and may not be completely implemented due to a lack of a feature or some - // other condition - NetworkPolicyConditionStatusPartialFailure NetworkPolicyConditionType = "PartialFailure" - - // NetworkPolicyConditionStatusFailure represents status of a Network Policy that could not be parsed by the - // Network Policy provider and will not be implemented in the cluster - NetworkPolicyConditionStatusFailure NetworkPolicyConditionType = "Failure" -) - -// NetworkPolicyConditionReason defines the set of reasons that explain why a -// particular NetworkPolicy condition type has been raised. -type NetworkPolicyConditionReason string - -const ( - // NetworkPolicyConditionReasonFeatureNotSupported represents a reason where the Network Policy may not have been - // implemented in the cluster due to a lack of some feature not supported by the Network Policy provider - NetworkPolicyConditionReasonFeatureNotSupported NetworkPolicyConditionReason = "FeatureNotSupported" -) - -// NetworkPolicyStatus describes the current state of the NetworkPolicy. -type NetworkPolicyStatus struct { - // conditions holds an array of metav1.Condition that describes the state of the NetworkPolicy. - Conditions []metav1.Condition -} - // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // NetworkPolicyList is a list of NetworkPolicy objects. diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/networking/zz_generated.deepcopy.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/networking/zz_generated.deepcopy.go index e6a47cc1b0c9..3a39c6cac408 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/networking/zz_generated.deepcopy.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/networking/zz_generated.deepcopy.go @@ -661,7 +661,6 @@ func (in *NetworkPolicy) DeepCopyInto(out *NetworkPolicy) { out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) return } @@ -874,29 +873,6 @@ func (in *NetworkPolicySpec) DeepCopy() *NetworkPolicySpec { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NetworkPolicyStatus) DeepCopyInto(out *NetworkPolicyStatus) { - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]v1.Condition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyStatus. -func (in *NetworkPolicyStatus) DeepCopy() *NetworkPolicyStatus { - if in == nil { - return nil - } - out := new(NetworkPolicyStatus) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ParentReference) DeepCopyInto(out *ParentReference) { *out = *in diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/deployment/util/deployment_util.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/deployment/util/deployment_util.go index 347c284ccb99..31ae7a98bc9c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/deployment/util/deployment_util.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/deployment/util/deployment_util.go @@ -849,11 +849,11 @@ func WaitForObservedDeployment(getDeploymentFunc func() (*apps.Deployment, error // 2 desired, max unavailable 0%, surge 1% - should scale new(+1), then old(-1), then new(+1), then old(-1) // 1 desired, max unavailable 0%, surge 1% - should scale new(+1), then old(-1) func ResolveFenceposts(maxSurge, maxUnavailable *intstrutil.IntOrString, desired int32) (int32, int32, error) { - surge, err := intstrutil.GetScaledValueFromIntOrPercent(intstrutil.ValueOrDefault(maxSurge, intstrutil.FromInt(0)), int(desired), true) + surge, err := intstrutil.GetScaledValueFromIntOrPercent(intstrutil.ValueOrDefault(maxSurge, intstrutil.FromInt32(0)), int(desired), true) if err != nil { return 0, 0, err } - unavailable, err := intstrutil.GetScaledValueFromIntOrPercent(intstrutil.ValueOrDefault(maxUnavailable, intstrutil.FromInt(0)), int(desired), false) + unavailable, err := intstrutil.GetScaledValueFromIntOrPercent(intstrutil.ValueOrDefault(maxUnavailable, intstrutil.FromInt32(0)), int(desired), false) if err != nil { return 0, 0, err } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/lookup_cache.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/lookup_cache.go deleted file mode 100644 index 160aa6e08624..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/lookup_cache.go +++ /dev/null @@ -1,92 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "hash/fnv" - "sync" - - "github.com/golang/groupcache/lru" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - hashutil "k8s.io/kubernetes/pkg/util/hash" -) - -type objectWithMeta interface { - metav1.Object -} - -// keyFunc returns the key of an object, which is used to look up in the cache for it's matching object. -// Since we match objects by namespace and Labels/Selector, so if two objects have the same namespace and labels, -// they will have the same key. -func keyFunc(obj objectWithMeta) uint64 { - hash := fnv.New32a() - hashutil.DeepHashObject(hash, &equivalenceLabelObj{ - namespace: obj.GetNamespace(), - labels: obj.GetLabels(), - }) - return uint64(hash.Sum32()) -} - -type equivalenceLabelObj struct { - namespace string - labels map[string]string -} - -// MatchingCache save label and selector matching relationship -type MatchingCache struct { - mutex sync.RWMutex - cache *lru.Cache -} - -// NewMatchingCache return a NewMatchingCache, which save label and selector matching relationship. -func NewMatchingCache(maxCacheEntries int) *MatchingCache { - return &MatchingCache{ - cache: lru.New(maxCacheEntries), - } -} - -// Add will add matching information to the cache. -func (c *MatchingCache) Add(labelObj objectWithMeta, selectorObj objectWithMeta) { - key := keyFunc(labelObj) - c.mutex.Lock() - defer c.mutex.Unlock() - c.cache.Add(key, selectorObj) -} - -// GetMatchingObject lookup the matching object for a given object. -// Note: the cache information may be invalid since the controller may be deleted or updated, -// we need check in the external request to ensure the cache data is not dirty. -func (c *MatchingCache) GetMatchingObject(labelObj objectWithMeta) (controller interface{}, exists bool) { - key := keyFunc(labelObj) - // NOTE: we use Lock() instead of RLock() here because lru's Get() method also modifies state( - // it need update the least recently usage information). So we can not call it concurrently. - c.mutex.Lock() - defer c.mutex.Unlock() - return c.cache.Get(key) -} - -// Update update the cached matching information. -func (c *MatchingCache) Update(labelObj objectWithMeta, selectorObj objectWithMeta) { - c.Add(labelObj, selectorObj) -} - -// InvalidateAll invalidate the whole cache. -func (c *MatchingCache) InvalidateAll() { - c.mutex.Lock() - defer c.mutex.Unlock() - c.cache = lru.New(c.cache.MaxEntries) -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/azure/azure_acr_helper.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/azure/azure_acr_helper.go index bcde4f385f2f..3051c04a12c8 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/azure/azure_acr_helper.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/azure/azure_acr_helper.go @@ -53,7 +53,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "net/http" "net/url" "strconv" @@ -185,7 +184,7 @@ func performTokenExchange( var content []byte limitedReader := &io.LimitedReader{R: exchange.Body, N: maxReadLength} - if content, err = ioutil.ReadAll(limitedReader); err != nil { + if content, err = io.ReadAll(limitedReader); err != nil { return "", fmt.Errorf("Www-Authenticate: error reading response from %s", authEndpoint) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/azure/azure_credentials.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/azure/azure_credentials.go index 8302ebb47e8c..d3b3f1932677 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/azure/azure_credentials.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/azure/azure_credentials.go @@ -23,7 +23,6 @@ import ( "context" "errors" "io" - "io/ioutil" "os" "regexp" "strings" @@ -115,7 +114,7 @@ func parseConfig(configReader io.Reader) (*auth.AzureAuthConfig, error) { } limitedReader := &io.LimitedReader{R: configReader, N: maxReadLength} - configContents, err := ioutil.ReadAll(limitedReader) + configContents, err := io.ReadAll(limitedReader) if err != nil { return nil, err } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/config.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/config.go index 86ce18542c85..203ad14dfa61 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/config.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/config.go @@ -22,7 +22,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "net/http" "os" "path/filepath" @@ -108,7 +107,7 @@ func ReadDockercfgFile(searchPaths []string) (cfg DockerConfig, err error) { continue } klog.V(4).Infof("looking for .dockercfg at %s", absDockerConfigFileLocation) - contents, err := ioutil.ReadFile(absDockerConfigFileLocation) + contents, err := os.ReadFile(absDockerConfigFileLocation) if os.IsNotExist(err) { continue } @@ -160,7 +159,7 @@ func ReadDockerConfigJSONFile(searchPaths []string) (cfg DockerConfig, err error func ReadSpecificDockerConfigJSONFile(filePath string) (cfg DockerConfig, err error) { var contents []byte - if contents, err = ioutil.ReadFile(filePath); err != nil { + if contents, err = os.ReadFile(filePath); err != nil { return nil, err } return readDockerConfigJSONFileFromBytes(contents) @@ -211,7 +210,7 @@ func ReadURL(url string, client *http.Client, header *http.Header) (body []byte, } limitedReader := &io.LimitedReader{R: resp.Body, N: maxReadLength} - contents, err := ioutil.ReadAll(limitedReader) + contents, err := io.ReadAll(limitedReader) if err != nil { return nil, err } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/gcp/metadata.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/gcp/metadata.go index e71cb2f24d6e..f11b09a69fd4 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/gcp/metadata.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/gcp/metadata.go @@ -18,8 +18,8 @@ package gcp import ( "encoding/json" - "io/ioutil" "net/http" + "os" "os/exec" "runtime" "strings" @@ -134,7 +134,7 @@ func onGCEVM() bool { } name = fields[1] } else { - data, err := ioutil.ReadFile(gceProductNameFile) + data, err := os.ReadFile(gceProductNameFile) if err != nil { klog.V(2).Infof("Error while reading product_name: %v", err) return false diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/plugin/config.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/plugin/config.go index c65f3bb472f7..841d5b22123f 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/plugin/config.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/plugin/config.go @@ -18,9 +18,10 @@ package plugin import ( "fmt" - "io/ioutil" "strings" + "os" + "k8s.io/apimachinery/pkg/util/validation/field" "k8s.io/kubernetes/pkg/credentialprovider" kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config" @@ -33,7 +34,7 @@ func readCredentialProviderConfigFile(configPath string) (*kubeletconfig.Credent return nil, fmt.Errorf("credential provider config path is empty") } - data, err := ioutil.ReadFile(configPath) + data, err := os.ReadFile(configPath) if err != nil { return nil, fmt.Errorf("unable to read external registry credential provider configuration from %q: %w", configPath, err) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/plugin/plugin.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/plugin/plugin.go index 3eccb0006205..95bb18222d06 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/plugin/plugin.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/plugin/plugin.go @@ -410,7 +410,7 @@ func (e *execPlugin) ExecPlugin(ctx context.Context, image string) (*credentialp cmd.Env = mergeEnvVars(e.environ(), configEnvVars) if err = e.runPlugin(ctx, cmd, image); err != nil { - return nil, err + return nil, fmt.Errorf("%w: %s", err, stderr.String()) } data = stdout.Bytes() diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/features/kube_features.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/features/kube_features.go index dae179775042..c10ef871ac04 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/features/kube_features.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/features/kube_features.go @@ -53,6 +53,7 @@ const ( // owner: @nabokihms // alpha: v1.26 // beta: v1.27 + // GA: v1.28 // // Enables API to get self subject attributes after authentication. APISelfSubjectReview featuregate.Feature = "APISelfSubjectReview" @@ -137,6 +138,12 @@ const ( // Enables the GCE PD in-tree driver to GCE CSI Driver migration feature. CSIMigrationGCE featuregate.Feature = "CSIMigrationGCE" + // owner: @mfordjody + // alpha: v1.26 + // + // Skip validation Enable in next version + SkipReadOnlyValidationGCE featuregate.Feature = "SkipReadOnlyValidationGCE" + // owner: @trierra // alpha: v1.23 // @@ -163,14 +170,6 @@ const ( // Enables SecretRef field in CSI NodeExpandVolume request. CSINodeExpandSecret featuregate.Feature = "CSINodeExpandSecret" - // owner: @pohly - // alpha: v1.19 - // beta: v1.21 - // GA: v1.24 - // - // Enables tracking of available storage capacity that CSI drivers provide. - CSIStorageCapacity featuregate.Feature = "CSIStorageCapacity" - // owner: @fengzixu // alpha: v1.21 // @@ -205,30 +204,14 @@ const ( // Enables support for time zones in CronJobs. CronJobTimeZone featuregate.Feature = "CronJobTimeZone" - // owner: @gnufied, @verult, @bertinatto - // alpha: v1.22 - // beta: v1.23 - // GA: v1.26 - // If supported by the CSI driver, delegates the role of applying FSGroup to - // the driver by passing FSGroup through the NodeStageVolume and - // NodePublishVolume calls. - DelegateFSGroupToCSIDriver featuregate.Feature = "DelegateFSGroupToCSIDriver" - - // owner: @jiayingz, @swatisehgal (for GA graduation) - // alpha: v1.8 - // beta: v1.10 - // GA: v1.26 + // owner: @thockin + // deprecated: v1.28 // - // Enables support for Device Plugins - DevicePlugins featuregate.Feature = "DevicePlugins" - - // owner: @RenaudWasTaken @dashpole - // alpha: v1.19 - // beta: v1.20 - // ga: v1.25 - // - // Disables Accelerator Metrics Collected by Kubelet - DisableAcceleratorUsageMetrics featuregate.Feature = "DisableAcceleratorUsageMetrics" + // Changes when the default value of PodSpec.containers[].ports[].hostPort + // is assigned. The default is to only set a default value in Pods. + // Enabling this means a default will be assigned even to embeddedPodSpecs + // (e.g. in a Deployment), which is the historical default. + DefaultHostNetworkHostPortsInPodTemplates featuregate.Feature = "DefaultHostNetworkHostPortsInPodTemplates" // owner: @andrewsykim // alpha: v1.22 @@ -258,15 +241,6 @@ const ( // that is independent of a Pod. DynamicResourceAllocation featuregate.Feature = "DynamicResourceAllocation" - // owner: @andrewsykim - // kep: https://kep.k8s.io/1672 - // alpha: v1.20 - // beta: v1.22 - // GA: v1.26 - // - // Enable Terminating condition in Endpoint Slices. - EndpointSliceTerminatingCondition featuregate.Feature = "EndpointSliceTerminatingCondition" - // owner: @harche // kep: http://kep.k8s.io/3386 // alpha: v1.25 @@ -288,16 +262,16 @@ const ( // kep: https://kep.k8s.io/2595 // alpha: v1.22 // beta: v1.26 + // GA: v1.28 // // Enables apiserver and kubelet to allow up to 32 DNSSearchPaths and up to 2048 DNSSearchListChars. ExpandedDNSConfig featuregate.Feature = "ExpandedDNSConfig" // owner: @pweil- // alpha: v1.5 + // deprecated: v1.28 // - // Default userns=host for containers that are using other host namespaces, host mounts, the pod - // contains a privileged container, or specific non-namespaced capabilities (MKNOD, SYS_MODULE, - // SYS_TIME). This should only be enabled if user namespace remapping is enabled in the docker daemon. + // This flag used to be needed for dockershim CRI and currently does nothing. ExperimentalHostUserNamespaceDefaultingGate featuregate.Feature = "ExperimentalHostUserNamespaceDefaulting" // owner: @yuzhiquan, @bowei, @PxyUp, @SergeyKanzhelev @@ -436,14 +410,6 @@ const ( // yet. JobTrackingWithFinalizers featuregate.Feature = "JobTrackingWithFinalizers" - // owner: @andrewsykim @adisky @ndixita - // alpha: v1.20 - // beta: v1.24 - // GA: v1.26 - // - // Enable kubelet exec plugins for image pull credentials. - KubeletCredentialProviders featuregate.Feature = "KubeletCredentialProviders" - // owner: @AkihiroSuda // alpha: v1.22 // @@ -452,9 +418,10 @@ const ( // All the node components such as CRI need to be running in the same user namespace. KubeletInUserNamespace featuregate.Feature = "KubeletInUserNamespace" - // owner: @dashpole + // owner: @dashpole, @ffromani (only for GA graduation) // alpha: v1.13 // beta: v1.15 + // GA: v1.28 // // Enables the kubelet's pod resources grpc endpoint KubeletPodResources featuregate.Feature = "KubeletPodResources" @@ -501,6 +468,13 @@ const ( // Enables tracking of secret-based service account tokens usage. LegacyServiceAccountTokenTracking featuregate.Feature = "LegacyServiceAccountTokenTracking" + // owner: @yt2985 + // kep: http://kep.k8s.io/2800 + // alpha: v1.28 + // + // Enables cleaning up of secret-based service account tokens. + LegacyServiceAccountTokenCleanUp featuregate.Feature = "LegacyServiceAccountTokenCleanUp" + // owner: @RobertKrawitz // alpha: v1.15 // @@ -558,15 +532,6 @@ const ( // Enables new performance-improving code in kube-proxy iptables mode MinimizeIPTablesRestore featuregate.Feature = "MinimizeIPTablesRestore" - // owner: @janosi @bridgetkromhout - // kep: https://kep.k8s.io/1435 - // alpha: v1.20 - // beta: v1.24 - // ga: v1.26 - // - // Enables the usage of different protocols in the same Service with type=LoadBalancer - MixedProtocolLBService featuregate.Feature = "MixedProtocolLBService" - // owner: @sarveshr7 // kep: https://kep.k8s.io/2593 // alpha: v1.25 @@ -581,13 +546,6 @@ const ( // Enables the dynamic configuration of Service IP ranges MultiCIDRServiceAllocator featuregate.Feature = "MultiCIDRServiceAllocator" - // owner: @rikatz - // kep: https://kep.k8s.io/2943 - // alpha: v1.24 - // - // Enables NetworkPolicy status subresource - NetworkPolicyStatus featuregate.Feature = "NetworkPolicyStatus" - // owner: @jsafrane // kep: https://kep.k8s.io/3756 // alpha: v1.25 (as part of SELinuxMountReadWriteOncePod) @@ -663,14 +621,6 @@ const ( // Enable users to specify when a Pod is ready for scheduling. PodSchedulingReadiness featuregate.Feature = "PodSchedulingReadiness" - // owner: @liggitt, @tallclair, sig-auth - // alpha: v1.22 - // beta: v1.23 - // ga: v1.25 - // - // Enables the PodSecurity admission plugin - PodSecurity featuregate.Feature = "PodSecurity" - // owner: @ehashman // alpha: v1.21 // beta: v1.22 @@ -688,6 +638,7 @@ const ( // kep: https://kep.k8s.io/1669 // alpha: v1.22 // beta: v1.26 + // GA: v1.28 // // Enable kube-proxy to handle terminating ednpoints when externalTrafficPolicy=Local ProxyTerminatingEndpoints featuregate.Feature = "ProxyTerminatingEndpoints" @@ -756,27 +707,10 @@ const ( // https://github.com/kubernetes/kubernetes/issues/111516 SecurityContextDeny featuregate.Feature = "SecurityContextDeny" - // owner: @maplain @andrewsykim - // kep: https://kep.k8s.io/2086 - // alpha: v1.21 - // beta: v1.22 - // GA: v1.26 - // - // Enables node-local routing for Service internal traffic - ServiceInternalTrafficPolicy featuregate.Feature = "ServiceInternalTrafficPolicy" - - // owner: @aojea - // kep: https://kep.k8s.io/3070 - // alpha: v1.24 - // beta: v1.25 - // ga: v1.26 - // - // Subdivide the ClusterIP range for dynamic and static IP allocation. - ServiceIPStaticSubrange featuregate.Feature = "ServiceIPStaticSubrange" - // owner: @xuzhenglun // kep: http://kep.k8s.io/3682 // alpha: v1.27 + // beta: v1.28 // // Subdivide the NodePort range for dynamic and static port allocation. ServiceNodePortStaticSubrange featuregate.Feature = "ServiceNodePortStaticSubrange" @@ -885,14 +819,6 @@ const ( // Enables support for joining Windows containers to a hosts' network namespace. WindowsHostNetwork featuregate.Feature = "WindowsHostNetwork" - // owner: @marosset - // alpha: v1.22 - // beta: v1.23 - // GA: v1.26 - // - // Enables support for 'HostProcess' containers on Windows nodes. - WindowsHostProcessContainers featuregate.Feature = "WindowsHostProcessContainers" - // owner: @kerthcet // kep: https://kep.k8s.io/3094 // alpha: v1.25 @@ -934,7 +860,7 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS AnyVolumeDataSource: {Default: true, PreRelease: featuregate.Beta}, // on by default in 1.24 - APISelfSubjectReview: {Default: true, PreRelease: featuregate.Beta}, // on by default in 1.27 + APISelfSubjectReview: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // GA in 1.28; remove in 1.30 AppArmor: {Default: true, PreRelease: featuregate.Beta}, @@ -964,10 +890,10 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS CSINodeExpandSecret: {Default: true, PreRelease: featuregate.Beta}, - CSIStorageCapacity: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.26 - CSIVolumeHealth: {Default: false, PreRelease: featuregate.Alpha}, + SkipReadOnlyValidationGCE: {Default: false, PreRelease: featuregate.Alpha}, + CloudControllerManagerWebhook: {Default: false, PreRelease: featuregate.Alpha}, ContainerCheckpoint: {Default: false, PreRelease: featuregate.Alpha}, @@ -976,11 +902,7 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS CronJobTimeZone: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29 - DelegateFSGroupToCSIDriver: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.28 - - DevicePlugins: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // GA in 1.26 - - DisableAcceleratorUsageMetrics: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, + DefaultHostNetworkHostPortsInPodTemplates: {Default: false, PreRelease: featuregate.Deprecated}, DisableCloudProviders: {Default: false, PreRelease: featuregate.Alpha}, @@ -988,17 +910,15 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS DownwardAPIHugePages: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in v1.29 - EndpointSliceTerminatingCondition: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in v1.28 - DynamicResourceAllocation: {Default: false, PreRelease: featuregate.Alpha}, EventedPLEG: {Default: false, PreRelease: featuregate.Beta}, // off by default, requires CRI Runtime support ExecProbeTimeout: {Default: true, PreRelease: featuregate.GA}, // lock to default and remove after v1.22 based on KEP #1972 update - ExpandedDNSConfig: {Default: true, PreRelease: featuregate.Beta}, + ExpandedDNSConfig: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.30 - ExperimentalHostUserNamespaceDefaultingGate: {Default: false, PreRelease: featuregate.Beta}, + ExperimentalHostUserNamespaceDefaultingGate: {Default: false, PreRelease: featuregate.Deprecated, LockToDefault: true}, // remove in 1.30 GRPCContainerProbe: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, //remove in 1.29 @@ -1036,11 +956,9 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS JobTrackingWithFinalizers: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.28 - KubeletCredentialProviders: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.28 - KubeletInUserNamespace: {Default: false, PreRelease: featuregate.Alpha}, - KubeletPodResources: {Default: true, PreRelease: featuregate.Beta}, + KubeletPodResources: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // GA in 1.28, remove in 1.30 KubeletPodResourcesDynamicResources: {Default: false, PreRelease: featuregate.Alpha}, @@ -1052,7 +970,9 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS LegacyServiceAccountTokenNoAutoGeneration: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29 - LegacyServiceAccountTokenTracking: {Default: true, PreRelease: featuregate.Beta}, + LegacyServiceAccountTokenTracking: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.30 + + LegacyServiceAccountTokenCleanUp: {Default: false, PreRelease: featuregate.Alpha}, LocalStorageCapacityIsolationFSQuotaMonitoring: {Default: false, PreRelease: featuregate.Alpha}, @@ -1068,17 +988,13 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS MinDomainsInPodTopologySpread: {Default: true, PreRelease: featuregate.Beta}, - MinimizeIPTablesRestore: {Default: true, PreRelease: featuregate.Beta}, - - MixedProtocolLBService: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.28 + MinimizeIPTablesRestore: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.30 MultiCIDRRangeAllocator: {Default: false, PreRelease: featuregate.Alpha}, MultiCIDRServiceAllocator: {Default: false, PreRelease: featuregate.Alpha}, - NetworkPolicyStatus: {Default: false, PreRelease: featuregate.Alpha}, - - NewVolumeManagerReconstruction: {Default: true, PreRelease: featuregate.Beta}, + NewVolumeManagerReconstruction: {Default: false, PreRelease: featuregate.Beta}, // disabled for https://github.com/kubernetes/kubernetes/issues/117745 NodeLogQuery: {Default: false, PreRelease: featuregate.Alpha}, @@ -1098,13 +1014,11 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS PodSchedulingReadiness: {Default: true, PreRelease: featuregate.Beta}, - PodSecurity: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, - ProbeTerminationGracePeriod: {Default: true, PreRelease: featuregate.Beta}, // Default to true in beta 1.25 ProcMountType: {Default: false, PreRelease: featuregate.Alpha}, - ProxyTerminatingEndpoints: {Default: true, PreRelease: featuregate.Beta}, + ProxyTerminatingEndpoints: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.30 QOSReserved: {Default: false, PreRelease: featuregate.Alpha}, @@ -1122,11 +1036,7 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS SecurityContextDeny: {Default: false, PreRelease: featuregate.Alpha}, - ServiceIPStaticSubrange: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.28 - - ServiceInternalTrafficPolicy: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.28 - - ServiceNodePortStaticSubrange: {Default: false, PreRelease: featuregate.Alpha}, + ServiceNodePortStaticSubrange: {Default: true, PreRelease: featuregate.Beta}, SizeMemoryBackedVolumes: {Default: true, PreRelease: featuregate.Beta}, @@ -1156,11 +1066,9 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS WindowsHostNetwork: {Default: true, PreRelease: featuregate.Alpha}, - WindowsHostProcessContainers: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.28 - NodeInclusionPolicyInPodTopologySpread: {Default: true, PreRelease: featuregate.Beta}, - SELinuxMountReadWriteOncePod: {Default: true, PreRelease: featuregate.Beta}, + SELinuxMountReadWriteOncePod: {Default: false, PreRelease: featuregate.Beta}, // disabled for https://github.com/kubernetes/kubernetes/issues/117745 InPlacePodVerticalScaling: {Default: false, PreRelease: featuregate.Alpha}, @@ -1183,8 +1091,6 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS genericfeatures.CustomResourceValidationExpressions: {Default: true, PreRelease: featuregate.Beta}, - genericfeatures.DryRun: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.28 - genericfeatures.OpenAPIEnums: {Default: true, PreRelease: featuregate.Beta}, genericfeatures.OpenAPIV3: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29 diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/types.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/types.go index 91e3b9ba5e9b..8021a6e06678 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/types.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/types.go @@ -123,6 +123,7 @@ type KubeletConfiguration struct { // tlsPrivateKeyFile is the file containing x509 private key matching tlsCertFile TLSPrivateKeyFile string // TLSCipherSuites is the list of allowed cipher suites for the server. + // Note that TLS 1.3 ciphersuites are not configurable. // Values are from tls package constants (https://golang.org/pkg/crypto/tls/#pkg-constants). TLSCipherSuites []string // TLSMinVersion is the minimum TLS version supported. diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/validation/validation.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/validation/validation.go index 81fdf241936c..7d9b452b4fdc 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/validation/validation.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/validation/validation.go @@ -30,9 +30,9 @@ import ( tracingapi "k8s.io/component-base/tracing/api/v1" "k8s.io/kubernetes/pkg/features" kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config" - "k8s.io/kubernetes/pkg/kubelet/cm/cpuset" kubetypes "k8s.io/kubernetes/pkg/kubelet/types" utiltaints "k8s.io/kubernetes/pkg/util/taints" + "k8s.io/utils/cpuset" ) var ( diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/podresources/server_v1.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/podresources/server_v1.go index ad6734ae729b..dcbf8ed4f31d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/podresources/server_v1.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/podresources/server_v1.go @@ -79,9 +79,10 @@ func (p *v1PodResourcesServer) List(ctx context.Context, req *v1.ListPodResource podResources[i] = &pRes } - return &v1.ListPodResourcesResponse{ + response := &v1.ListPodResourcesResponse{ PodResources: podResources, - }, nil + } + return response, nil } // GetAllocatableResources returns information about all the resources known by the server - this more like the capacity, not like the current amount of free resources. @@ -94,11 +95,13 @@ func (p *v1PodResourcesServer) GetAllocatableResources(ctx context.Context, req return nil, fmt.Errorf("PodResources API GetAllocatableResources disabled") } - return &v1.AllocatableResourcesResponse{ + response := &v1.AllocatableResourcesResponse{ Devices: p.devicesProvider.GetAllocatableDevices(), CpuIds: p.cpusProvider.GetAllocatableCPUs(), Memory: p.memoryProvider.GetAllocatableMemory(), - }, nil + } + + return response, nil } // Get returns information about the resources assigned to a specific pod diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager.go index 80c1af0aa83c..2c03bb59ee92 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager.go @@ -31,7 +31,6 @@ import ( podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1" kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config" "k8s.io/kubernetes/pkg/kubelet/apis/podresources" - "k8s.io/kubernetes/pkg/kubelet/cm/cpuset" "k8s.io/kubernetes/pkg/kubelet/cm/devicemanager" "k8s.io/kubernetes/pkg/kubelet/config" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" @@ -40,6 +39,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache" "k8s.io/kubernetes/pkg/kubelet/status" schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" + "k8s.io/utils/cpuset" ) type ActivePodsFunc func() []*v1.Pod diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/cpu_assignment.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/cpu_assignment.go index cdf5e5197c8b..eba774e8f624 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/cpu_assignment.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/cpu_assignment.go @@ -24,7 +24,7 @@ import ( "k8s.io/klog/v2" "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology" - "k8s.io/kubernetes/pkg/kubelet/cm/cpuset" + "k8s.io/utils/cpuset" ) // LoopControl controls the behavior of the cpu accumulator loop logic diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/cpu_manager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/cpu_manager.go index 443eecd2d360..8b5049d7d747 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/cpu_manager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/cpu_manager.go @@ -32,11 +32,11 @@ import ( "k8s.io/kubernetes/pkg/kubelet/cm/containermap" "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state" "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology" - "k8s.io/kubernetes/pkg/kubelet/cm/cpuset" "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager" "k8s.io/kubernetes/pkg/kubelet/config" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/status" + "k8s.io/utils/cpuset" ) // ActivePodsFunc is a function that returns a list of pods to reconcile. diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/fake_cpu_manager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/fake_cpu_manager.go index 933697051355..4a03f3dd23ff 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/fake_cpu_manager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/fake_cpu_manager.go @@ -21,10 +21,10 @@ import ( "k8s.io/klog/v2" "k8s.io/kubernetes/pkg/kubelet/cm/containermap" "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state" - "k8s.io/kubernetes/pkg/kubelet/cm/cpuset" "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager" "k8s.io/kubernetes/pkg/kubelet/config" "k8s.io/kubernetes/pkg/kubelet/status" + "k8s.io/utils/cpuset" ) type fakeManager struct { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy.go index 31473686548c..a80da9427cef 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy.go @@ -20,8 +20,8 @@ import ( "k8s.io/api/core/v1" "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state" - "k8s.io/kubernetes/pkg/kubelet/cm/cpuset" "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager" + "k8s.io/utils/cpuset" ) // Policy implements logic for pod container to CPU assignment. diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy_none.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy_none.go index c5c151a78b3e..ff86498fd926 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy_none.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy_none.go @@ -22,8 +22,8 @@ import ( "k8s.io/api/core/v1" "k8s.io/klog/v2" "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state" - "k8s.io/kubernetes/pkg/kubelet/cm/cpuset" "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager" + "k8s.io/utils/cpuset" ) type nonePolicy struct{} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy_static.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy_static.go index 7a82de03da84..0f72e64dbc8a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy_static.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy_static.go @@ -27,10 +27,10 @@ import ( "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state" "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology" - "k8s.io/kubernetes/pkg/kubelet/cm/cpuset" "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager" "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/bitmask" "k8s.io/kubernetes/pkg/kubelet/metrics" + "k8s.io/utils/cpuset" ) const ( @@ -584,7 +584,7 @@ func (p *staticPolicy) GetPodTopologyHints(s state.State, pod *v1.Pod) map[strin } } -// generateCPUtopologyHints generates a set of TopologyHints given the set of +// generateCPUTopologyHints generates a set of TopologyHints given the set of // available CPUs and the number of CPUs being requested. // // It follows the convention of marking all hints that have the same number of diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/checkpoint.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/checkpoint.go index ca6d2fc90a30..eb2bfa27eaf1 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/checkpoint.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/checkpoint.go @@ -18,11 +18,11 @@ package state import ( "encoding/json" + "fmt" "hash/fnv" "strings" - "github.com/davecgh/go-spew/spew" - + "k8s.io/apimachinery/pkg/util/dump" "k8s.io/kubernetes/pkg/kubelet/checkpointmanager" "k8s.io/kubernetes/pkg/kubelet/checkpointmanager/checksum" "k8s.io/kubernetes/pkg/kubelet/checkpointmanager/errors" @@ -102,21 +102,14 @@ func (cp *CPUManagerCheckpointV1) VerifyChecksum() error { return nil } - printer := spew.ConfigState{ - Indent: " ", - SortKeys: true, - DisableMethods: true, - SpewKeys: true, - } - ck := cp.Checksum cp.Checksum = 0 - object := printer.Sprintf("%#v", cp) + object := dump.ForHash(cp) object = strings.Replace(object, "CPUManagerCheckpointV1", "CPUManagerCheckpoint", 1) cp.Checksum = ck hash := fnv.New32a() - printer.Fprintf(hash, "%v", object) + fmt.Fprintf(hash, "%v", object) if cp.Checksum != checksum.Checksum(hash.Sum32()) { return errors.ErrCorruptCheckpoint } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state.go index a9bd906fcb23..96efbcfa4729 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state.go @@ -17,7 +17,7 @@ limitations under the License. package state import ( - "k8s.io/kubernetes/pkg/kubelet/cm/cpuset" + "k8s.io/utils/cpuset" ) // ContainerCPUAssignments type used in cpu manager state diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state_checkpoint.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state_checkpoint.go index 9297f23757dd..e6b8fd4718dd 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state_checkpoint.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state_checkpoint.go @@ -25,7 +25,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/checkpointmanager" "k8s.io/kubernetes/pkg/kubelet/checkpointmanager/errors" "k8s.io/kubernetes/pkg/kubelet/cm/containermap" - "k8s.io/kubernetes/pkg/kubelet/cm/cpuset" + "k8s.io/utils/cpuset" ) var _ State = &stateCheckpoint{} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state_mem.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state_mem.go index 8f3a10d95b2d..cb01ea92609b 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state_mem.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state_mem.go @@ -20,7 +20,7 @@ import ( "sync" "k8s.io/klog/v2" - "k8s.io/kubernetes/pkg/kubelet/cm/cpuset" + "k8s.io/utils/cpuset" ) type stateMemory struct { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology/topology.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology/topology.go index 4c4147556ea0..62d91a5dee5d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology/topology.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology/topology.go @@ -21,7 +21,7 @@ import ( cadvisorapi "github.com/google/cadvisor/info/v1" "k8s.io/klog/v2" - "k8s.io/kubernetes/pkg/kubelet/cm/cpuset" + "k8s.io/utils/cpuset" ) // NUMANodeInfo is a map from NUMANode ID to a list of CPU IDs associated with diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/checkpoint/checkpointv1.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/checkpoint/checkpointv1.go index 9014ebfc1fdc..d26b972f7be2 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/checkpoint/checkpointv1.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/checkpoint/checkpointv1.go @@ -18,11 +18,11 @@ package checkpoint import ( "encoding/json" + "fmt" "hash/fnv" "strings" - "github.com/davecgh/go-spew/spew" - + "k8s.io/apimachinery/pkg/util/dump" "k8s.io/klog/v2" "k8s.io/kubernetes/pkg/kubelet/checkpointmanager/checksum" "k8s.io/kubernetes/pkg/kubelet/checkpointmanager/errors" @@ -48,18 +48,11 @@ type checkpointDataV1 struct { // We need this special code path to be able to correctly validate the checksum k8s 1.19 wrote. // credits to https://github.com/kubernetes/kubernetes/pull/102717/commits/353f93895118d2ffa2d59a29a1fbc225160ea1d6 func (cp checkpointDataV1) checksum() checksum.Checksum { - printer := spew.ConfigState{ - Indent: " ", - SortKeys: true, - DisableMethods: true, - SpewKeys: true, - } - - object := printer.Sprintf("%#v", cp) + object := dump.ForHash(cp) object = strings.Replace(object, "checkpointDataV1", "checkpointData", 1) object = strings.Replace(object, "PodDevicesEntryV1", "PodDevicesEntry", -1) hash := fnv.New32a() - printer.Fprintf(hash, "%v", object) + fmt.Fprintf(hash, "%v", object) return checksum.Checksum(hash.Sum32()) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/manager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/manager.go index 8cb57aa8190f..7499de4460f2 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/manager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/manager.go @@ -544,15 +544,29 @@ func (m *ManagerImpl) devicesToAllocate(podUID, contName, resource string, requi return nil, fmt.Errorf("pod %q container %q changed request for resource %q from %d to %d", string(podUID), contName, resource, devices.Len(), required) } } + + klog.V(3).InfoS("Need devices to allocate for pod", "deviceNumber", needed, "resourceName", resource, "podUID", string(podUID), "containerName", contName) + healthyDevices, hasRegistered := m.healthyDevices[resource] + + // Check if resource registered with devicemanager + if !hasRegistered { + return nil, fmt.Errorf("cannot allocate unregistered device %s", resource) + } + + // Check if registered resource has healthy devices + if healthyDevices.Len() == 0 { + return nil, fmt.Errorf("no healthy devices present; cannot allocate unhealthy devices %s", resource) + } + + // Check if all the previously allocated devices are healthy + if !healthyDevices.IsSuperset(devices) { + return nil, fmt.Errorf("previously allocated devices are no longer healthy; cannot allocate unhealthy devices %s", resource) + } + if needed == 0 { // No change, no work. return nil, nil } - klog.V(3).InfoS("Need devices to allocate for pod", "deviceNumber", needed, "resourceName", resource, "podUID", string(podUID), "containerName", contName) - // Check if resource registered with devicemanager - if _, ok := m.healthyDevices[resource]; !ok { - return nil, fmt.Errorf("can't allocate unregistered device %s", resource) - } // Declare the list of allocated devices. // This will be populated and returned below. diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/dra/manager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/dra/manager.go index ea171f0b7b60..4eca61103116 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/dra/manager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/dra/manager.go @@ -66,112 +66,107 @@ func NewManagerImpl(kubeClient clientset.Interface, stateFileDirectory string) ( // for each new resource requirement, process their responses and update the cached // containerResources on success. func (m *ManagerImpl) PrepareResources(pod *v1.Pod) error { - // Process resources for each resource claim referenced by container - for _, container := range append(pod.Spec.InitContainers, pod.Spec.Containers...) { - for range container.Resources.Claims { - for i := range pod.Spec.ResourceClaims { - claimName := resourceclaim.Name(pod, &pod.Spec.ResourceClaims[i]) - klog.V(3).InfoS("Processing resource", "claim", claimName, "pod", pod.Name) - - // Resource is already prepared, add pod UID to it - if claimInfo := m.cache.get(claimName, pod.Namespace); claimInfo != nil { - // We delay checkpointing of this change until this call - // returns successfully. It is OK to do this because we - // will only return successfully from this call if the - // checkpoint has succeeded. That means if the kubelet is - // ever restarted before this checkpoint succeeds, the pod - // whose resources are being prepared would never have - // started, so it's OK (actually correct) to not include it - // in the cache. - claimInfo.addPodReference(pod.UID) - continue - } + for i := range pod.Spec.ResourceClaims { + claimName := resourceclaim.Name(pod, &pod.Spec.ResourceClaims[i]) + klog.V(3).InfoS("Processing resource", "claim", claimName, "pod", pod.Name) + + // Resource is already prepared, add pod UID to it + if claimInfo := m.cache.get(claimName, pod.Namespace); claimInfo != nil { + // We delay checkpointing of this change until this call + // returns successfully. It is OK to do this because we + // will only return successfully from this call if the + // checkpoint has succeeded. That means if the kubelet is + // ever restarted before this checkpoint succeeds, the pod + // whose resources are being prepared would never have + // started, so it's OK (actually correct) to not include it + // in the cache. + claimInfo.addPodReference(pod.UID) + continue + } - // Query claim object from the API server - resourceClaim, err := m.kubeClient.ResourceV1alpha2().ResourceClaims(pod.Namespace).Get( - context.TODO(), - claimName, - metav1.GetOptions{}) - if err != nil { - return fmt.Errorf("failed to fetch ResourceClaim %s referenced by pod %s: %+v", claimName, pod.Name, err) - } + // Query claim object from the API server + resourceClaim, err := m.kubeClient.ResourceV1alpha2().ResourceClaims(pod.Namespace).Get( + context.TODO(), + claimName, + metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("failed to fetch ResourceClaim %s referenced by pod %s: %+v", claimName, pod.Name, err) + } - // Check if pod is in the ReservedFor for the claim - if !resourceclaim.IsReservedForPod(pod, resourceClaim) { - return fmt.Errorf("pod %s(%s) is not allowed to use resource claim %s(%s)", - pod.Name, pod.UID, claimName, resourceClaim.UID) - } + // Check if pod is in the ReservedFor for the claim + if !resourceclaim.IsReservedForPod(pod, resourceClaim) { + return fmt.Errorf("pod %s(%s) is not allowed to use resource claim %s(%s)", + pod.Name, pod.UID, claimName, resourceClaim.UID) + } - // Grab the allocation.resourceHandles. If there are no - // allocation.resourceHandles, create a single resourceHandle with no - // content. This will trigger processing of this claim by a single - // kubelet plugin whose name matches resourceClaim.Status.DriverName. - resourceHandles := resourceClaim.Status.Allocation.ResourceHandles - if len(resourceHandles) == 0 { - resourceHandles = make([]resourcev1alpha2.ResourceHandle, 1) - } + // Grab the allocation.resourceHandles. If there are no + // allocation.resourceHandles, create a single resourceHandle with no + // content. This will trigger processing of this claim by a single + // kubelet plugin whose name matches resourceClaim.Status.DriverName. + resourceHandles := resourceClaim.Status.Allocation.ResourceHandles + if len(resourceHandles) == 0 { + resourceHandles = make([]resourcev1alpha2.ResourceHandle, 1) + } - // Create a claimInfo object to store the relevant claim info. - claimInfo := newClaimInfo( - resourceClaim.Status.DriverName, - resourceClaim.Spec.ResourceClassName, - resourceClaim.UID, - resourceClaim.Name, - resourceClaim.Namespace, - sets.New(string(pod.UID)), - ) - - // Walk through each resourceHandle - for _, resourceHandle := range resourceHandles { - // If no DriverName is provided in the resourceHandle, we - // use the DriverName from the status - pluginName := resourceHandle.DriverName - if pluginName == "" { - pluginName = resourceClaim.Status.DriverName - } - - // Call NodePrepareResource RPC for each resourceHandle - client, err := dra.NewDRAPluginClient(pluginName) - if err != nil { - return fmt.Errorf("failed to get DRA Plugin client for plugin name %s, err=%+v", pluginName, err) - } - response, err := client.NodePrepareResource( - context.Background(), - resourceClaim.Namespace, - resourceClaim.UID, - resourceClaim.Name, - resourceHandle.Data) - if err != nil { - return fmt.Errorf("NodePrepareResource failed, claim UID: %s, claim name: %s, resource handle: %s, err: %+v", - resourceClaim.UID, resourceClaim.Name, resourceHandle.Data, err) - } - klog.V(3).InfoS("NodePrepareResource succeeded", "pluginName", pluginName, "response", response) - - // Add the CDI Devices returned by NodePrepareResource to - // the claimInfo object. - err = claimInfo.addCDIDevices(pluginName, response.CdiDevices) - if err != nil { - return fmt.Errorf("failed to add CDIDevices to claimInfo %+v: %+v", claimInfo, err) - } - - // TODO: We (re)add the claimInfo object to the cache and - // sync it to the checkpoint *after* the - // NodePrepareResource call has completed. This will cause - // issues if the kubelet gets restarted between - // NodePrepareResource and syncToCheckpoint. It will result - // in not calling NodeUnprepareResource for this claim - // because no claimInfo will be synced back to the cache - // for it after the restart. We need to resolve this issue - // before moving to beta. - m.cache.add(claimInfo) - - // Checkpoint to reduce redundant calls to - // NodePrepareResource() after a kubelet restart. - err = m.cache.syncToCheckpoint() - if err != nil { - return fmt.Errorf("failed to checkpoint claimInfo state, err: %+v", err) - } - } + // Create a claimInfo object to store the relevant claim info. + claimInfo := newClaimInfo( + resourceClaim.Status.DriverName, + resourceClaim.Spec.ResourceClassName, + resourceClaim.UID, + resourceClaim.Name, + resourceClaim.Namespace, + sets.New(string(pod.UID)), + ) + + // Walk through each resourceHandle + for _, resourceHandle := range resourceHandles { + // If no DriverName is provided in the resourceHandle, we + // use the DriverName from the status + pluginName := resourceHandle.DriverName + if pluginName == "" { + pluginName = resourceClaim.Status.DriverName + } + + // Call NodePrepareResource RPC for each resourceHandle + client, err := dra.NewDRAPluginClient(pluginName) + if err != nil { + return fmt.Errorf("failed to get DRA Plugin client for plugin name %s, err=%+v", pluginName, err) + } + response, err := client.NodePrepareResource( + context.Background(), + resourceClaim.Namespace, + resourceClaim.UID, + resourceClaim.Name, + resourceHandle.Data) + if err != nil { + return fmt.Errorf("NodePrepareResource failed, claim UID: %s, claim name: %s, resource handle: %s, err: %+v", + resourceClaim.UID, resourceClaim.Name, resourceHandle.Data, err) + } + klog.V(3).InfoS("NodePrepareResource succeeded", "pluginName", pluginName, "response", response) + + // Add the CDI Devices returned by NodePrepareResource to + // the claimInfo object. + err = claimInfo.addCDIDevices(pluginName, response.CdiDevices) + if err != nil { + return fmt.Errorf("failed to add CDIDevices to claimInfo %+v: %+v", claimInfo, err) + } + + // TODO: We (re)add the claimInfo object to the cache and + // sync it to the checkpoint *after* the + // NodePrepareResource call has completed. This will cause + // issues if the kubelet gets restarted between + // NodePrepareResource and syncToCheckpoint. It will result + // in not calling NodeUnprepareResource for this claim + // because no claimInfo will be synced back to the cache + // for it after the restart. We need to resolve this issue + // before moving to beta. + m.cache.add(claimInfo) + + // Checkpoint to reduce redundant calls to + // NodePrepareResource() after a kubelet restart. + err = m.cache.syncToCheckpoint() + if err != nil { + return fmt.Errorf("failed to checkpoint claimInfo state, err: %+v", err) } } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/container/runtime.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/container/runtime.go index 8a154f272c8e..7fa8f44ef736 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/container/runtime.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/container/runtime.go @@ -480,12 +480,6 @@ type RunContainerOptions struct { ReadOnly bool // hostname for pod containers Hostname string - // EnableHostUserNamespace sets userns=host when users request host namespaces (pid, ipc, net), - // are using non-namespaced capabilities (mknod, sys_time, sys_module), the pod contains a privileged container, - // or using host path volumes. - // This should only be enabled when the container runtime is performing user remapping AND if the - // experimental behavior is desired. - EnableHostUserNamespace bool } // VolumeInfo contains information about the volume. diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/container/testing/fake_runtime.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/container/testing/fake_runtime.go index 3b89f5c65608..bf82205303cf 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/container/testing/fake_runtime.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/container/testing/fake_runtime.go @@ -115,38 +115,6 @@ func (f *FakeRuntimeCache) ForceUpdateIfOlder(context.Context, time.Time) error return nil } -// ClearCalls resets the FakeRuntime to the initial state. -func (f *FakeRuntime) ClearCalls() { - f.Lock() - defer f.Unlock() - - f.CalledFunctions = []string{} - f.PodList = []*FakePod{} - f.AllPodList = []*FakePod{} - f.APIPodStatus = v1.PodStatus{} - f.StartedPods = []string{} - f.KilledPods = []string{} - f.StartedContainers = []string{} - f.KilledContainers = []string{} - f.RuntimeStatus = nil - f.VersionInfo = "" - f.RuntimeType = "" - f.Err = nil - f.InspectErr = nil - f.StatusErr = nil - f.BlockImagePulls = false - if f.imagePullTokenBucket != nil { - for { - select { - case f.imagePullTokenBucket <- true: - default: - f.imagePullTokenBucket = nil - return - } - } - } -} - // UpdatePodCIDR fulfills the cri interface. func (f *FakeRuntime) UpdatePodCIDR(_ context.Context, c string) error { return nil diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cri/remote/remote_image.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cri/remote/remote_image.go index 1deff550fd8c..f053345a06c1 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cri/remote/remote_image.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cri/remote/remote_image.go @@ -25,7 +25,9 @@ import ( "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" "go.opentelemetry.io/otel/trace" "google.golang.org/grpc" + "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/status" utilfeature "k8s.io/apiserver/pkg/util/feature" tracing "k8s.io/component-base/tracing" "k8s.io/klog/v2" @@ -165,6 +167,17 @@ func (r *remoteImageService) pullImageV1(ctx context.Context, image *runtimeapi. }) if err != nil { klog.ErrorS(err, "PullImage from image service failed", "image", image.Image) + + // We can strip the code from unknown status errors since they add no value + // and will make them easier to read in the logs/events. + // + // It also ensures that checking custom error types from pkg/kubelet/images/types.go + // works in `imageManager.EnsureImageExists` (pkg/kubelet/images/image_manager.go). + statusErr, ok := status.FromError(err) + if ok && statusErr.Code() == codes.Unknown { + return "", errors.New(statusErr.Message()) + } + return "", err } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/eviction_manager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/eviction_manager.go index 2c23d4241d75..e47b37a0d05e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/eviction_manager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/eviction_manager.go @@ -66,8 +66,6 @@ type managerImpl struct { config Config // the function to invoke to kill a pod killPodFunc KillPodFunc - // the function to get the mirror pod by a given static pod - mirrorPodFunc MirrorPodFunc // the interface that knows how to do image gc imageGC ImageGC // the interface that knows how to do container gc @@ -112,7 +110,6 @@ func NewManager( summaryProvider stats.SummaryProvider, config Config, killPodFunc KillPodFunc, - mirrorPodFunc MirrorPodFunc, imageGC ImageGC, containerGC ContainerGC, recorder record.EventRecorder, @@ -123,7 +120,6 @@ func NewManager( manager := &managerImpl{ clock: clock, killPodFunc: killPodFunc, - mirrorPodFunc: mirrorPodFunc, imageGC: imageGC, containerGC: containerGC, config: config, diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/images/image_manager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/images/image_manager.go index 8798f023e954..6090b3732a53 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/images/image_manager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/images/image_manager.go @@ -19,9 +19,9 @@ package images import ( "context" "fmt" + "strings" "time" - dockerref "github.com/docker/distribution/reference" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" @@ -29,8 +29,10 @@ import ( "k8s.io/klog/v2" runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" + crierrors "k8s.io/cri-api/pkg/errors" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/events" + "k8s.io/kubernetes/pkg/util/parsers" ) type ImagePodPullingTimeRecorder interface { @@ -157,35 +159,63 @@ func (m *imageManager) EnsureImageExists(ctx context.Context, pod *v1.Pod, conta if imagePullResult.err != nil { m.logIt(ref, v1.EventTypeWarning, events.FailedToPullImage, logPrefix, fmt.Sprintf("Failed to pull image %q: %v", container.Image, imagePullResult.err), klog.Warning) m.backOff.Next(backOffKey, m.backOff.Clock.Now()) - if imagePullResult.err == ErrRegistryUnavailable { - msg := fmt.Sprintf("image pull failed for %s because the registry is unavailable.", container.Image) - return "", msg, imagePullResult.err - } - return "", imagePullResult.err.Error(), ErrImagePull + msg, err := evalCRIPullErr(container, imagePullResult.err) + return "", msg, err } m.podPullingTimeRecorder.RecordImageFinishedPulling(pod.UID) - m.logIt(ref, v1.EventTypeNormal, events.PulledImage, logPrefix, fmt.Sprintf("Successfully pulled image %q in %v (%v including waiting)", container.Image, imagePullResult.pullDuration, time.Since(startTime)), klog.Info) + m.logIt(ref, v1.EventTypeNormal, events.PulledImage, logPrefix, fmt.Sprintf("Successfully pulled image %q in %v (%v including waiting)", + container.Image, imagePullResult.pullDuration.Truncate(time.Millisecond), time.Since(startTime).Truncate(time.Millisecond)), klog.Info) m.backOff.GC() return imagePullResult.imageRef, "", nil } +func evalCRIPullErr(container *v1.Container, err error) (errMsg string, errRes error) { + // Error assertions via errors.Is is not supported by gRPC (remote runtime) errors right now. + // See https://github.com/grpc/grpc-go/issues/3616 + if strings.HasPrefix(err.Error(), crierrors.ErrRegistryUnavailable.Error()) { + errMsg = fmt.Sprintf( + "image pull failed for %s because the registry is unavailable%s", + container.Image, + // Trim the error name from the message to convert errors like: + // "RegistryUnavailable: a more detailed explanation" to: + // "...because the registry is unavailable: a more detailed explanation" + strings.TrimPrefix(err.Error(), crierrors.ErrRegistryUnavailable.Error()), + ) + return errMsg, crierrors.ErrRegistryUnavailable + } + + if strings.HasPrefix(err.Error(), crierrors.ErrSignatureValidationFailed.Error()) { + errMsg = fmt.Sprintf( + "image pull failed for %s because the signature validation failed%s", + container.Image, + // Trim the error name from the message to convert errors like: + // "SignatureValidationFailed: a more detailed explanation" to: + // "...because the signature validation failed: a more detailed explanation" + strings.TrimPrefix(err.Error(), crierrors.ErrSignatureValidationFailed.Error()), + ) + return errMsg, crierrors.ErrSignatureValidationFailed + } + + // Fallback for no specific error + return err.Error(), ErrImagePull +} + // applyDefaultImageTag parses a docker image string, if it doesn't contain any tag or digest, // a default tag will be applied. func applyDefaultImageTag(image string) (string, error) { - named, err := dockerref.ParseNormalizedNamed(image) + _, tag, digest, err := parsers.ParseImageName(image) if err != nil { - return "", fmt.Errorf("couldn't parse image reference %q: %v", image, err) + return "", err } - _, isTagged := named.(dockerref.Tagged) - _, isDigested := named.(dockerref.Digested) - if !isTagged && !isDigested { + // we just concatenate the image name with the default tag here instead + if len(digest) == 0 && len(tag) > 0 && !strings.HasSuffix(image, ":"+tag) { // we just concatenate the image name with the default tag here instead // of using dockerref.WithTag(named, ...) because that would cause the // image to be fully qualified as docker.io/$name if it's a short name // (e.g. just busybox). We don't want that to happen to keep the CRI // agnostic wrt image names and default hostnames. - image = image + ":latest" + image = image + ":" + tag } return image, nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/images/types.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/images/types.go index 3b0397faad49..52342b28ec15 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/images/types.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/images/types.go @@ -37,9 +37,6 @@ var ( // ErrImageNeverPull - Required Image is absent on host and PullPolicy is NeverPullImage ErrImageNeverPull = errors.New("ErrImageNeverPull") - // ErrRegistryUnavailable - Get http error when pulling image from registry - ErrRegistryUnavailable = errors.New("RegistryUnavailable") - // ErrInvalidImageName - Unable to parse the image name. ErrInvalidImageName = errors.New("InvalidImageName") ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet.go index 3afbd6b21e4d..709e35015fb2 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet.go @@ -32,6 +32,7 @@ import ( "time" cadvisorapi "github.com/google/cadvisor/info/v1" + "github.com/google/go-cmp/cmp" libcontaineruserns "github.com/opencontainers/runc/libcontainer/userns" "github.com/opencontainers/selinux/go-selinux" "go.opentelemetry.io/otel/attribute" @@ -47,7 +48,6 @@ import ( "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/diff" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" @@ -517,56 +517,55 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, tracer := kubeDeps.TracerProvider.Tracer(instrumentationScope) klet := &Kubelet{ - hostname: hostname, - hostnameOverridden: hostnameOverridden, - nodeName: nodeName, - kubeClient: kubeDeps.KubeClient, - heartbeatClient: kubeDeps.HeartbeatClient, - onRepeatedHeartbeatFailure: kubeDeps.OnHeartbeatFailure, - rootDirectory: filepath.Clean(rootDirectory), - resyncInterval: kubeCfg.SyncFrequency.Duration, - sourcesReady: config.NewSourcesReady(kubeDeps.PodConfig.SeenAllSources), - registerNode: registerNode, - registerWithTaints: registerWithTaints, - registerSchedulable: registerSchedulable, - dnsConfigurer: dns.NewConfigurer(kubeDeps.Recorder, nodeRef, nodeIPs, clusterDNS, kubeCfg.ClusterDomain, kubeCfg.ResolverConfig), - serviceLister: serviceLister, - serviceHasSynced: serviceHasSynced, - nodeLister: nodeLister, - nodeHasSynced: nodeHasSynced, - streamingConnectionIdleTimeout: kubeCfg.StreamingConnectionIdleTimeout.Duration, - recorder: kubeDeps.Recorder, - cadvisor: kubeDeps.CAdvisorInterface, - cloud: kubeDeps.Cloud, - externalCloudProvider: cloudprovider.IsExternal(cloudProvider), - providerID: providerID, - nodeRef: nodeRef, - nodeLabels: nodeLabels, - nodeStatusUpdateFrequency: kubeCfg.NodeStatusUpdateFrequency.Duration, - nodeStatusReportFrequency: kubeCfg.NodeStatusReportFrequency.Duration, - os: kubeDeps.OSInterface, - oomWatcher: oomWatcher, - cgroupsPerQOS: kubeCfg.CgroupsPerQOS, - cgroupRoot: kubeCfg.CgroupRoot, - mounter: kubeDeps.Mounter, - hostutil: kubeDeps.HostUtil, - subpather: kubeDeps.Subpather, - maxPods: int(kubeCfg.MaxPods), - podsPerCore: int(kubeCfg.PodsPerCore), - syncLoopMonitor: atomic.Value{}, - daemonEndpoints: daemonEndpoints, - containerManager: kubeDeps.ContainerManager, - nodeIPs: nodeIPs, - nodeIPValidator: validateNodeIP, - clock: clock.RealClock{}, - enableControllerAttachDetach: kubeCfg.EnableControllerAttachDetach, - makeIPTablesUtilChains: kubeCfg.MakeIPTablesUtilChains, - iptablesMasqueradeBit: int(kubeCfg.IPTablesMasqueradeBit), - iptablesDropBit: int(kubeCfg.IPTablesDropBit), - experimentalHostUserNamespaceDefaulting: utilfeature.DefaultFeatureGate.Enabled(features.ExperimentalHostUserNamespaceDefaultingGate), - keepTerminatedPodVolumes: keepTerminatedPodVolumes, - nodeStatusMaxImages: nodeStatusMaxImages, - tracer: tracer, + hostname: hostname, + hostnameOverridden: hostnameOverridden, + nodeName: nodeName, + kubeClient: kubeDeps.KubeClient, + heartbeatClient: kubeDeps.HeartbeatClient, + onRepeatedHeartbeatFailure: kubeDeps.OnHeartbeatFailure, + rootDirectory: filepath.Clean(rootDirectory), + resyncInterval: kubeCfg.SyncFrequency.Duration, + sourcesReady: config.NewSourcesReady(kubeDeps.PodConfig.SeenAllSources), + registerNode: registerNode, + registerWithTaints: registerWithTaints, + registerSchedulable: registerSchedulable, + dnsConfigurer: dns.NewConfigurer(kubeDeps.Recorder, nodeRef, nodeIPs, clusterDNS, kubeCfg.ClusterDomain, kubeCfg.ResolverConfig), + serviceLister: serviceLister, + serviceHasSynced: serviceHasSynced, + nodeLister: nodeLister, + nodeHasSynced: nodeHasSynced, + streamingConnectionIdleTimeout: kubeCfg.StreamingConnectionIdleTimeout.Duration, + recorder: kubeDeps.Recorder, + cadvisor: kubeDeps.CAdvisorInterface, + cloud: kubeDeps.Cloud, + externalCloudProvider: cloudprovider.IsExternal(cloudProvider), + providerID: providerID, + nodeRef: nodeRef, + nodeLabels: nodeLabels, + nodeStatusUpdateFrequency: kubeCfg.NodeStatusUpdateFrequency.Duration, + nodeStatusReportFrequency: kubeCfg.NodeStatusReportFrequency.Duration, + os: kubeDeps.OSInterface, + oomWatcher: oomWatcher, + cgroupsPerQOS: kubeCfg.CgroupsPerQOS, + cgroupRoot: kubeCfg.CgroupRoot, + mounter: kubeDeps.Mounter, + hostutil: kubeDeps.HostUtil, + subpather: kubeDeps.Subpather, + maxPods: int(kubeCfg.MaxPods), + podsPerCore: int(kubeCfg.PodsPerCore), + syncLoopMonitor: atomic.Value{}, + daemonEndpoints: daemonEndpoints, + containerManager: kubeDeps.ContainerManager, + nodeIPs: nodeIPs, + nodeIPValidator: validateNodeIP, + clock: clock.RealClock{}, + enableControllerAttachDetach: kubeCfg.EnableControllerAttachDetach, + makeIPTablesUtilChains: kubeCfg.MakeIPTablesUtilChains, + iptablesMasqueradeBit: int(kubeCfg.IPTablesMasqueradeBit), + iptablesDropBit: int(kubeCfg.IPTablesDropBit), + keepTerminatedPodVolumes: keepTerminatedPodVolumes, + nodeStatusMaxImages: nodeStatusMaxImages, + tracer: tracer, } if klet.cloud != nil { @@ -596,10 +595,6 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, klet.configMapManager = configMapManager } - if klet.experimentalHostUserNamespaceDefaulting { - klog.InfoS("Experimental host user namespace defaulting is enabled") - } - machineInfo, err := klet.cadvisor.MachineInfo() if err != nil { return nil, err @@ -616,9 +611,8 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, klet.startupManager = proberesults.NewManager() klet.podCache = kubecontainer.NewCache() - // podManager is also responsible for keeping secretManager and configMapManager contents up-to-date. - mirrorPodClient := kubepod.NewBasicMirrorClient(klet.kubeClient, string(nodeName), nodeLister) - klet.podManager = kubepod.NewBasicPodManager(mirrorPodClient) + klet.mirrorPodClient = kubepod.NewBasicMirrorClient(klet.kubeClient, string(nodeName), nodeLister) + klet.podManager = kubepod.NewBasicPodManager() klet.statusManager = status.NewManager(klet.kubeClient, klet.podManager, klet, kubeDeps.PodStartupLatencyTracker, klet.getRootDir()) @@ -842,7 +836,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, // setup eviction manager evictionManager, evictionAdmitHandler := eviction.NewManager(klet.resourceAnalyzer, evictionConfig, - killPodNow(klet.podWorkers, kubeDeps.Recorder), klet.podManager.GetMirrorPodByPod, klet.imageManager, klet.containerGC, kubeDeps.Recorder, nodeRef, klet.clock, kubeCfg.LocalStorageCapacityIsolation) + killPodNow(klet.podWorkers, kubeDeps.Recorder), klet.imageManager, klet.containerGC, kubeDeps.Recorder, nodeRef, klet.clock, kubeCfg.LocalStorageCapacityIsolation) klet.evictionManager = evictionManager klet.admitHandlers.AddPodAdmitHandler(evictionAdmitHandler) @@ -941,7 +935,11 @@ type Kubelet struct { runtimeCache kubecontainer.RuntimeCache kubeClient clientset.Interface heartbeatClient clientset.Interface - rootDirectory string + // mirrorPodClient is used to create and delete mirror pods in the API for static + // pods. + mirrorPodClient kubepod.MirrorClient + + rootDirectory string lastObservedNodeAddressesMux sync.RWMutex lastObservedNodeAddresses []v1.NodeAddress @@ -949,9 +947,90 @@ type Kubelet struct { // onRepeatedHeartbeatFailure is called when a heartbeat operation fails more than once. optional. onRepeatedHeartbeatFailure func() - // podWorkers handle syncing Pods in response to events. + // podManager stores the desired set of admitted pods and mirror pods that the kubelet should be + // running. The actual set of running pods is stored on the podWorkers. The manager is populated + // by the kubelet config loops which abstracts receiving configuration from many different sources + // (api for regular pods, local filesystem or http for static pods). The manager may be consulted + // by other components that need to see the set of desired pods. Note that not all desired pods are + // running, and not all running pods are in the podManager - for instance, force deleting a pod + // from the apiserver will remove it from the podManager, but the pod may still be terminating and + // tracked by the podWorkers. Components that need to know the actual consumed resources of the + // node or are driven by podWorkers and the sync*Pod methods (status, volume, stats) should also + // consult the podWorkers when reconciling. + // + // TODO: review all kubelet components that need the actual set of pods (vs the desired set) + // and update them to use podWorkers instead of podManager. This may introduce latency in some + // methods, but avoids race conditions and correctly accounts for terminating pods that have + // been force deleted or static pods that have been updated. + // https://github.com/kubernetes/kubernetes/issues/116970 + podManager kubepod.Manager + + // podWorkers is responsible for driving the lifecycle state machine of each pod. The worker is + // notified of config changes, updates, periodic reconciliation, container runtime updates, and + // evictions of all desired pods and will invoke reconciliation methods per pod in separate + // goroutines. The podWorkers are authoritative in the kubelet for what pods are actually being + // run and their current state: + // + // * syncing: pod should be running (syncPod) + // * terminating: pod should be stopped (syncTerminatingPod) + // * terminated: pod should have all resources cleaned up (syncTerminatedPod) + // + // and invoke the handler methods that correspond to each state. Components within the + // kubelet that need to know the phase of the pod in order to correctly set up or tear down + // resources must consult the podWorkers. + // + // Once a pod has been accepted by the pod workers, no other pod with that same UID (and + // name+namespace, for static pods) will be started until the first pod has fully terminated + // and been cleaned up by SyncKnownPods. This means a pod may be desired (in API), admitted + // (in pod manager), and requested (by invoking UpdatePod) but not start for an arbitrarily + // long interval because a prior pod is still terminating. + // + // As an event-driven (by UpdatePod) controller, the podWorkers must periodically be resynced + // by the kubelet invoking SyncKnownPods with the desired state (admitted pods in podManager). + // Since the podManager may be unaware of some running pods due to force deletion, the + // podWorkers are responsible for triggering a sync of pods that are no longer desired but + // must still run to completion. podWorkers PodWorkers + // evictionManager observes the state of the node for situations that could impact node stability + // and evicts pods (sets to phase Failed with reason Evicted) to reduce resource pressure. The + // eviction manager acts on the actual state of the node and considers the podWorker to be + // authoritative. + evictionManager eviction.Manager + + // probeManager tracks the set of running pods and ensures any user-defined periodic checks are + // run to introspect the state of each pod. The probe manager acts on the actual state of the node + // and is notified of pods by the podWorker. The probe manager is the authoritative source of the + // most recent probe status and is responsible for notifying the status manager, which + // synthesizes them into the overall pod status. + probeManager prober.Manager + + // secretManager caches the set of secrets used by running pods on this node. The podWorkers + // notify the secretManager when pods are started and terminated, and the secretManager must + // then keep the needed secrets up-to-date as they change. + secretManager secret.Manager + + // configMapManager caches the set of config maps used by running pods on this node. The + // podWorkers notify the configMapManager when pods are started and terminated, and the + // configMapManager must then keep the needed config maps up-to-date as they change. + configMapManager configmap.Manager + + // volumeManager observes the set of running pods and is responsible for attaching, mounting, + // unmounting, and detaching as those pods move through their lifecycle. It periodically + // synchronizes the set of known volumes to the set of actually desired volumes and cleans up + // any orphaned volumes. The volume manager considers the podWorker to be authoritative for + // which pods are running. + volumeManager volumemanager.VolumeManager + + // statusManager receives updated pod status updates from the podWorker and updates the API + // status of those pods to match. The statusManager is authoritative for the synthesized + // status of the pod from the kubelet's perspective (other components own the individual + // elements of status) and should be consulted by components in preference to assembling + // that status themselves. Note that the status manager is downstream of the pod worker + // and components that need to check whether a pod is still running should instead directly + // consult the pod worker. + statusManager status.Manager + // resyncInterval is the interval between periodic full reconciliations of // pods on this node. resyncInterval time.Duration @@ -959,13 +1038,6 @@ type Kubelet struct { // sourcesReady records the sources seen by the kubelet, it is thread-safe. sourcesReady config.SourcesReady - // podManager is a facade that abstracts away the various sources of pods - // this Kubelet services. - podManager kubepod.Manager - - // Needed to observe and respond to situations that could impact node stability - evictionManager eviction.Manager - // Optional, defaults to /logs/ from /var/log logServer http.Handler // Optional, defaults to simple Docker implementation @@ -1006,8 +1078,6 @@ type Kubelet struct { // Volume plugins. volumePluginMgr *volume.VolumePluginMgr - // Handles container probing. - probeManager prober.Manager // Manages container health check results. livenessManager proberesults.Manager readinessManager proberesults.Manager @@ -1029,12 +1099,6 @@ type Kubelet struct { // Manager for container logs. containerLogManager logs.ContainerLogManager - // Secret manager. - secretManager secret.Manager - - // ConfigMap manager. - configMapManager configmap.Manager - // Cached MachineInfo returned by cadvisor. machineInfoLock sync.RWMutex machineInfo *cadvisorapi.MachineInfo @@ -1042,14 +1106,6 @@ type Kubelet struct { // Handles certificate rotations. serverCertificateManager certificate.Manager - // Syncs pods statuses with apiserver; also used as a cache of statuses. - statusManager status.Manager - - // VolumeManager runs a set of asynchronous loops that figure out which - // volumes need to be attached/mounted/unmounted/detached based on the pods - // scheduled on this node and makes it so. - volumeManager volumemanager.VolumeManager - // Cloud provider interface. cloud cloudprovider.Interface // Handles requests to cloud provider with timeout @@ -1115,10 +1171,12 @@ type Kubelet struct { // nodeLeaseController claims and renews the node lease for this Kubelet nodeLeaseController lease.Controller - // Generates pod events. + // pleg observes the state of the container runtime and notifies the kubelet of changes to containers, which + // notifies the podWorkers to reconcile the state of the pod (for instance, if a container dies and needs to + // be restarted). pleg pleg.PodLifecycleEventGenerator - // Evented PLEG + // eventedPleg supplements the pleg to deliver edge-driven container changes with low-latency. eventedPleg pleg.PodLifecycleEventGenerator // Store kubecontainer.PodStatus for all pods. @@ -1227,13 +1285,6 @@ type Kubelet struct { // The AppArmor validator for checking whether AppArmor is supported. appArmorValidator apparmor.Validator - // experimentalHostUserNamespaceDefaulting sets userns=true when users request host namespaces (pid, ipc, net), - // are using non-namespaced capabilities (mknod, sys_time, sys_module), the pod contains a privileged container, - // or using host path volumes. - // This should only be enabled when the container runtime is performing user remapping AND if the - // experimental behavior is desired. - experimentalHostUserNamespaceDefaulting bool - // StatsProvider provides the node and the container stats. StatsProvider *stats.Provider @@ -1653,10 +1704,8 @@ func (kl *Kubelet) Run(updates <-chan kubetypes.PodUpdate) { // This operation writes all events that are dispatched in order to provide // the most accurate information possible about an error situation to aid debugging. // Callers should not write an event if this operation returns an error. -func (kl *Kubelet) SyncPod(_ context.Context, updateType kubetypes.SyncPodType, pod, mirrorPod *v1.Pod, podStatus *kubecontainer.PodStatus) (isTerminal bool, err error) { - // TODO(#113606): connect this with the incoming context parameter, which comes from the pod worker. - // Currently, using that context causes test failures. - ctx, otelSpan := kl.tracer.Start(context.TODO(), "syncPod", trace.WithAttributes( +func (kl *Kubelet) SyncPod(ctx context.Context, updateType kubetypes.SyncPodType, pod, mirrorPod *v1.Pod, podStatus *kubecontainer.PodStatus) (isTerminal bool, err error) { + ctx, otelSpan := kl.tracer.Start(ctx, "syncPod", trace.WithAttributes( attribute.String("k8s.pod.uid", string(pod.UID)), attribute.String("k8s.pod", klog.KObj(pod).String()), attribute.String("k8s.pod.name", pod.Name), @@ -1751,13 +1800,15 @@ func (kl *Kubelet) SyncPod(_ context.Context, updateType kubetypes.SyncPodType, var syncErr error p := kubecontainer.ConvertPodStatusToRunningPod(kl.getRuntime().Type(), podStatus) if err := kl.killPod(ctx, pod, p, nil); err != nil { - kl.recorder.Eventf(pod, v1.EventTypeWarning, events.FailedToKillPod, "error killing pod: %v", err) - syncErr = fmt.Errorf("error killing pod: %v", err) - utilruntime.HandleError(syncErr) + if !wait.Interrupted(err) { + kl.recorder.Eventf(pod, v1.EventTypeWarning, events.FailedToKillPod, "error killing pod: %v", err) + syncErr = fmt.Errorf("error killing pod: %w", err) + utilruntime.HandleError(syncErr) + } } else { // There was no error killing the pod, but the pod cannot be run. // Return an error to signal that the sync loop should back off. - syncErr = fmt.Errorf("pod cannot be run: %s", runnable.Message) + syncErr = fmt.Errorf("pod cannot be run: %v", runnable.Message) } return false, syncErr } @@ -1803,6 +1854,9 @@ func (kl *Kubelet) SyncPod(_ context.Context, updateType kubetypes.SyncPodType, if !pcm.Exists(pod) && !firstSync { p := kubecontainer.ConvertPodStatusToRunningPod(kl.getRuntime().Type(), podStatus) if err := kl.killPod(ctx, pod, p, nil); err == nil { + if wait.Interrupted(err) { + return false, err + } podKilled = true } else { klog.ErrorS(err, "KillPod failed", "pod", klog.KObj(pod), "podStatus", podStatus) @@ -1832,13 +1886,13 @@ func (kl *Kubelet) SyncPod(_ context.Context, updateType kubetypes.SyncPodType, if kubetypes.IsStaticPod(pod) { deleted := false if mirrorPod != nil { - if mirrorPod.DeletionTimestamp != nil || !kl.podManager.IsMirrorPodOf(mirrorPod, pod) { + if mirrorPod.DeletionTimestamp != nil || !kubepod.IsMirrorPodOf(mirrorPod, pod) { // The mirror pod is semantically different from the static pod. Remove // it. The mirror pod will get recreated later. klog.InfoS("Trying to delete pod", "pod", klog.KObj(pod), "podUID", mirrorPod.ObjectMeta.UID) podFullName := kubecontainer.GetPodFullName(pod) var err error - deleted, err = kl.podManager.DeleteMirrorPod(podFullName, &mirrorPod.ObjectMeta.UID) + deleted, err = kl.mirrorPodClient.DeleteMirrorPod(podFullName, &mirrorPod.ObjectMeta.UID) if deleted { klog.InfoS("Deleted mirror pod because it is outdated", "pod", klog.KObj(mirrorPod)) } else if err != nil { @@ -1852,7 +1906,7 @@ func (kl *Kubelet) SyncPod(_ context.Context, updateType kubetypes.SyncPodType, klog.V(4).InfoS("No need to create a mirror pod, since node has been removed from the cluster", "node", klog.KRef("", string(kl.nodeName))) } else { klog.V(4).InfoS("Creating a mirror pod for static pod", "pod", klog.KObj(pod)) - if err := kl.podManager.CreateMirrorPod(pod); err != nil { + if err := kl.mirrorPodClient.CreateMirrorPod(pod); err != nil { klog.ErrorS(err, "Failed creating a mirror pod for", "pod", klog.KObj(pod)) } } @@ -1866,15 +1920,13 @@ func (kl *Kubelet) SyncPod(_ context.Context, updateType kubetypes.SyncPodType, return false, err } - // Volume manager will not mount volumes for terminating pods - // TODO: once context cancellation is added this check can be removed - if !kl.podWorkers.IsPodTerminationRequested(pod.UID) { - // Wait for volumes to attach/mount - if err := kl.volumeManager.WaitForAttachAndMount(pod); err != nil { + // Wait for volumes to attach/mount + if err := kl.volumeManager.WaitForAttachAndMount(ctx, pod); err != nil { + if !wait.Interrupted(err) { kl.recorder.Eventf(pod, v1.EventTypeWarning, events.FailedMountVolume, "Unable to attach or mount volumes: %v", err) klog.ErrorS(err, "Unable to attach or mount volumes for pod; skipping pod", "pod", klog.KObj(pod)) - return false, err } + return false, err } // Fetch the pull secrets for the pod @@ -1893,8 +1945,13 @@ func (kl *Kubelet) SyncPod(_ context.Context, updateType kubetypes.SyncPodType, } } + // TODO(#113606): connect this with the incoming context parameter, which comes from the pod worker. + // Currently, using that context causes test failures. To remove this todoCtx, any wait.Interrupted + // errors need to be filtered from result and bypass the reasonCache - cancelling the context for + // SyncPod is a known and deliberate error, not a generic error. + todoCtx := context.TODO() // Call the container runtime's SyncPod callback - result := kl.containerRuntime.SyncPod(ctx, pod, podStatus, pullSecrets, kl.backOff) + result := kl.containerRuntime.SyncPod(todoCtx, pod, podStatus, pullSecrets, kl.backOff) kl.reasonCache.Update(pod.UID, result) if err := result.Error(); err != nil { // Do not return error if the only failures were pods in backoff @@ -2068,7 +2125,7 @@ func (kl *Kubelet) SyncTerminatingRuntimePod(_ context.Context, runningPod *kube // This typically occurs when a pod is force deleted from configuration (local disk or API) and the // kubelet restarts in the middle of the action. func (kl *Kubelet) SyncTerminatedPod(ctx context.Context, pod *v1.Pod, podStatus *kubecontainer.PodStatus) error { - _, otelSpan := kl.tracer.Start(context.Background(), "syncTerminatedPod", trace.WithAttributes( + ctx, otelSpan := kl.tracer.Start(ctx, "syncTerminatedPod", trace.WithAttributes( attribute.String("k8s.pod.uid", string(pod.UID)), attribute.String("k8s.pod", klog.KObj(pod).String()), attribute.String("k8s.pod.name", pod.Name), @@ -2086,7 +2143,7 @@ func (kl *Kubelet) SyncTerminatedPod(ctx context.Context, pod *v1.Pod, podStatus // volumes are unmounted after the pod worker reports ShouldPodRuntimeBeRemoved (which is satisfied // before syncTerminatedPod is invoked) - if err := kl.volumeManager.WaitForUnmount(pod); err != nil { + if err := kl.volumeManager.WaitForUnmount(ctx, pod); err != nil { return err } klog.V(4).InfoS("Pod termination unmounted volumes", "pod", klog.KObj(pod), "podUID", pod.UID) @@ -2140,6 +2197,15 @@ func (kl *Kubelet) SyncTerminatedPod(ctx context.Context, pod *v1.Pod, podStatus // Get pods which should be resynchronized. Currently, the following pod should be resynchronized: // - pod whose work is ready. // - internal modules that request sync of a pod. +// +// This method does not return orphaned pods (those known only to the pod worker that may have +// been deleted from configuration). Those pods are synced by HandlePodCleanups as a consequence +// of driving the state machine to completion. +// +// TODO: Consider synchronizing all pods which have not recently been acted on to be resilient +// to bugs that might prevent updates from being delivered (such as the previous bug with +// orphaned pods). Instead of asking the work queue for pending work, consider asking the +// PodWorker which pods should be synced. func (kl *Kubelet) getPodsToSync() []*v1.Pod { allPods := kl.podManager.GetPods() podUIDs := kl.workQueue.GetWork() @@ -2448,32 +2514,6 @@ func handleProbeSync(kl *Kubelet, update proberesults.Update, handler SyncHandle handler.HandlePodSyncs([]*v1.Pod{pod}) } -// dispatchWork starts the asynchronous sync of the pod in a pod worker. -// If the pod has completed termination, dispatchWork will perform no action. -func (kl *Kubelet) dispatchWork(pod *v1.Pod, syncType kubetypes.SyncPodType, mirrorPod *v1.Pod, start time.Time) { - // Run the sync in an async worker. - kl.podWorkers.UpdatePod(UpdatePodOptions{ - Pod: pod, - MirrorPod: mirrorPod, - UpdateType: syncType, - StartTime: start, - }) - // Note the number of containers for new pods. - if syncType == kubetypes.SyncPodCreate { - metrics.ContainersPerPodCount.Observe(float64(len(pod.Spec.Containers))) - } -} - -// TODO: handle mirror pods in a separate component (issue #17251) -func (kl *Kubelet) handleMirrorPod(mirrorPod *v1.Pod, start time.Time) { - // Mirror pod ADD/UPDATE/DELETE operations are considered an UPDATE to the - // corresponding static pod. Send update to the pod worker if the static - // pod exists. - if pod, ok := kl.podManager.GetPodByMirrorPod(mirrorPod); ok { - kl.dispatchWork(pod, kubetypes.SyncPodUpdate, mirrorPod, start) - } -} - // HandlePodAdditions is the callback in SyncHandler for pods being added from // a config source. func (kl *Kubelet) HandlePodAdditions(pods []*v1.Pod) { @@ -2491,8 +2531,18 @@ func (kl *Kubelet) HandlePodAdditions(pods []*v1.Pod) { // the apiserver and no action (other than cleanup) is required. kl.podManager.AddPod(pod) - if kubetypes.IsMirrorPod(pod) { - kl.handleMirrorPod(pod, start) + pod, mirrorPod, wasMirror := kl.podManager.GetPodAndMirrorPod(pod) + if wasMirror { + if pod == nil { + klog.V(2).InfoS("Unable to find pod for mirror pod, skipping", "mirrorPod", klog.KObj(mirrorPod), "mirrorPodUID", mirrorPod.UID) + continue + } + kl.podWorkers.UpdatePod(UpdatePodOptions{ + Pod: pod, + MirrorPod: mirrorPod, + UpdateType: kubetypes.SyncPodUpdate, + StartTime: start, + }) continue } @@ -2536,8 +2586,12 @@ func (kl *Kubelet) HandlePodAdditions(pods []*v1.Pod) { } } } - mirrorPod, _ := kl.podManager.GetMirrorPodByPod(pod) - kl.dispatchWork(pod, kubetypes.SyncPodCreate, mirrorPod, start) + kl.podWorkers.UpdatePod(UpdatePodOptions{ + Pod: pod, + MirrorPod: mirrorPod, + UpdateType: kubetypes.SyncPodCreate, + StartTime: start, + }) } } @@ -2547,12 +2601,21 @@ func (kl *Kubelet) HandlePodUpdates(pods []*v1.Pod) { start := kl.clock.Now() for _, pod := range pods { kl.podManager.UpdatePod(pod) - if kubetypes.IsMirrorPod(pod) { - kl.handleMirrorPod(pod, start) - continue + + pod, mirrorPod, wasMirror := kl.podManager.GetPodAndMirrorPod(pod) + if wasMirror { + if pod == nil { + klog.V(2).InfoS("Unable to find pod for mirror pod, skipping", "mirrorPod", klog.KObj(mirrorPod), "mirrorPodUID", mirrorPod.UID) + continue + } } - mirrorPod, _ := kl.podManager.GetMirrorPodByPod(pod) - kl.dispatchWork(pod, kubetypes.SyncPodUpdate, mirrorPod, start) + + kl.podWorkers.UpdatePod(UpdatePodOptions{ + Pod: pod, + MirrorPod: mirrorPod, + UpdateType: kubetypes.SyncPodUpdate, + StartTime: start, + }) } } @@ -2561,11 +2624,23 @@ func (kl *Kubelet) HandlePodUpdates(pods []*v1.Pod) { func (kl *Kubelet) HandlePodRemoves(pods []*v1.Pod) { start := kl.clock.Now() for _, pod := range pods { - kl.podManager.DeletePod(pod) - if kubetypes.IsMirrorPod(pod) { - kl.handleMirrorPod(pod, start) + kl.podManager.RemovePod(pod) + + pod, mirrorPod, wasMirror := kl.podManager.GetPodAndMirrorPod(pod) + if wasMirror { + if pod == nil { + klog.V(2).InfoS("Unable to find pod for mirror pod, skipping", "mirrorPod", klog.KObj(mirrorPod), "mirrorPodUID", mirrorPod.UID) + continue + } + kl.podWorkers.UpdatePod(UpdatePodOptions{ + Pod: pod, + MirrorPod: mirrorPod, + UpdateType: kubetypes.SyncPodUpdate, + StartTime: start, + }) continue } + // Deletion is allowed to fail because the periodic cleanup routine // will trigger deletion again. if err := kl.deletePod(pod); err != nil { @@ -2575,7 +2650,8 @@ func (kl *Kubelet) HandlePodRemoves(pods []*v1.Pod) { } // HandlePodReconcile is the callback in the SyncHandler interface for pods -// that should be reconciled. +// that should be reconciled. Pods are reconciled when only the status of the +// pod is updated in the API. func (kl *Kubelet) HandlePodReconcile(pods []*v1.Pod) { start := kl.clock.Now() for _, pod := range pods { @@ -2583,13 +2659,37 @@ func (kl *Kubelet) HandlePodReconcile(pods []*v1.Pod) { // to the pod manager. kl.podManager.UpdatePod(pod) + pod, mirrorPod, wasMirror := kl.podManager.GetPodAndMirrorPod(pod) + if wasMirror { + if pod == nil { + klog.V(2).InfoS("Unable to find pod for mirror pod, skipping", "mirrorPod", klog.KObj(mirrorPod), "mirrorPodUID", mirrorPod.UID) + continue + } + // Static pods should be reconciled the same way as regular pods + } + + // TODO: reconcile being calculated in the config manager is questionable, and avoiding + // extra syncs may no longer be necessary. Reevaluate whether Reconcile and Sync can be + // merged (after resolving the next two TODOs). + // Reconcile Pod "Ready" condition if necessary. Trigger sync pod for reconciliation. + // TODO: this should be unnecessary today - determine what is the cause for this to + // be different than Sync, or if there is a better place for it. For instance, we have + // needsReconcile in kubelet/config, here, and in status_manager. if status.NeedToReconcilePodReadiness(pod) { - mirrorPod, _ := kl.podManager.GetMirrorPodByPod(pod) - kl.dispatchWork(pod, kubetypes.SyncPodSync, mirrorPod, start) + kl.podWorkers.UpdatePod(UpdatePodOptions{ + Pod: pod, + MirrorPod: mirrorPod, + UpdateType: kubetypes.SyncPodSync, + StartTime: start, + }) } // After an evicted pod is synced, all dead containers in the pod can be removed. + // TODO: this is questionable - status read is async and during eviction we already + // expect to not have some container info. The pod worker knows whether a pod has + // been evicted, so if this is about minimizing the time to react to an eviction we + // can do better. If it's about preserving pod status info we can also do better. if eviction.PodIsEvicted(pod.Status) { if podStatus, err := kl.podCache.Get(pod.UID); err == nil { kl.containerDeletor.deleteContainersInPod("", podStatus, true) @@ -2603,8 +2703,24 @@ func (kl *Kubelet) HandlePodReconcile(pods []*v1.Pod) { func (kl *Kubelet) HandlePodSyncs(pods []*v1.Pod) { start := kl.clock.Now() for _, pod := range pods { - mirrorPod, _ := kl.podManager.GetMirrorPodByPod(pod) - kl.dispatchWork(pod, kubetypes.SyncPodSync, mirrorPod, start) + pod, mirrorPod, wasMirror := kl.podManager.GetPodAndMirrorPod(pod) + if wasMirror { + if pod == nil { + klog.V(2).InfoS("Unable to find pod for mirror pod, skipping", "mirrorPod", klog.KObj(mirrorPod), "mirrorPodUID", mirrorPod.UID) + continue + } + // Syncing a mirror pod is a programmer error since the intent of sync is to + // batch notify all pending work. We should make it impossible to double sync, + // but for now log a programmer error to prevent accidental introduction. + klog.V(3).InfoS("Programmer error, HandlePodSyncs does not expect to receive mirror pods", "podUID", pod.UID, "mirrorPodUID", mirrorPod.UID) + continue + } + kl.podWorkers.UpdatePod(UpdatePodOptions{ + Pod: pod, + MirrorPod: mirrorPod, + UpdateType: kubetypes.SyncPodSync, + StartTime: start, + }) } } @@ -2614,8 +2730,7 @@ func isPodResizeInProgress(pod *v1.Pod, podStatus *v1.PodStatus) bool { if cs.Resources == nil { continue } - if diff.ObjectDiff(c.Resources.Limits, cs.Resources.Limits) != "" || - diff.ObjectDiff(cs.AllocatedResources, cs.Resources.Requests) != "" { + if !cmp.Equal(c.Resources.Limits, cs.Resources.Limits) || !cmp.Equal(cs.AllocatedResources, cs.Resources.Requests) { return true } } @@ -2685,7 +2800,7 @@ func (kl *Kubelet) handlePodResourcesResize(pod *v1.Pod) *v1.Pod { klog.V(5).InfoS("ContainerStatus.AllocatedResources length mismatch", "pod", pod.Name, "container", container.Name) break } - if len(diff.ObjectDiff(container.Resources.Requests, containerStatus.AllocatedResources)) > 0 { + if !cmp.Equal(container.Resources.Requests, containerStatus.AllocatedResources) { podResized = true break } @@ -2805,7 +2920,7 @@ func (kl *Kubelet) ListenAndServeReadOnly(address net.IP, port uint) { // ListenAndServePodResources runs the kubelet podresources grpc service func (kl *Kubelet) ListenAndServePodResources() { - socket, err := util.LocalEndpoint(kl.getPodResourcesDir(), podresources.Socket) + endpoint, err := util.LocalEndpoint(kl.getPodResourcesDir(), podresources.Socket) if err != nil { klog.V(2).InfoS("Failed to get local endpoint for PodResources endpoint", "err", err) return @@ -2819,7 +2934,7 @@ func (kl *Kubelet) ListenAndServePodResources() { DynamicResources: kl.containerManager, } - server.ListenAndServePodResources(socket, providers) + server.ListenAndServePodResources(endpoint, providers) } // Delete the eligible dead container instances in a pod. Depending on the configuration, the latest dead containers may be kept around. diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_getters.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_getters.go index daee529f1096..d6aa8732fedc 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_getters.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_getters.go @@ -181,11 +181,14 @@ func (kl *Kubelet) GetPods() []*v1.Pod { pods := kl.podManager.GetPods() // a kubelet running without apiserver requires an additional // update of the static pod status. See #57106 - for _, p := range pods { + for i, p := range pods { if kubelettypes.IsStaticPod(p) { if status, ok := kl.statusManager.GetPodStatus(p.UID); ok { klog.V(2).InfoS("Pod status updated", "pod", klog.KObj(p), "status", status.Phase) + // do not mutate the cache + p = p.DeepCopy() p.Status = status + pods[i] = p } } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_network_linux.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_network_linux.go index 8f767817a0f5..9d03adf14997 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_network_linux.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_network_linux.go @@ -87,6 +87,12 @@ func (kl *Kubelet) syncIPTablesRules(iptClient utiliptables.Interface) bool { if !iptClient.IsIPv6() { // ipv6 doesn't have this issue // Set up the KUBE-FIREWALL chain and martian packet protection rule. // (See below.) + + // NOTE: kube-proxy (in iptables mode) creates an identical copy of this + // rule. If you want to change this rule in the future, you MUST do so in + // a way that will interoperate correctly with skewed versions of the rule + // created by kube-proxy. + if _, err := iptClient.EnsureChain(utiliptables.TableFilter, KubeFirewallChain); err != nil { klog.ErrorS(err, "Failed to ensure that filter table KUBE-FIREWALL chain exists") return false @@ -178,8 +184,12 @@ func (kl *Kubelet) syncIPTablesRulesDeprecated(iptClient utiliptables.Interface) } // Set up KUBE-POSTROUTING to unmark and masquerade marked packets - // NB: THIS MUST MATCH the corresponding code in the iptables and ipvs - // modes of kube-proxy + + // NOTE: kube-proxy (in iptables and ipvs modes) creates identical copies of these + // rules. If you want to change these rules in the future, you MUST do so in a way + // that will interoperate correctly with skewed versions of the rules created by + // kube-proxy. + if _, err := iptClient.EnsureRule(utiliptables.Append, utiliptables.TableNAT, KubePostroutingChain, "-m", "mark", "!", "--mark", fmt.Sprintf("%s/%s", masqueradeMark, masqueradeMark), "-j", "RETURN"); err != nil { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_pods.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_pods.go index fbc9104c4281..10e49e49f7a4 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_pods.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_pods.go @@ -29,18 +29,19 @@ import ( "sort" "strings" + "github.com/google/go-cmp/cmp" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/diff" "k8s.io/apimachinery/pkg/util/sets" utilvalidation "k8s.io/apimachinery/pkg/util/validation" utilfeature "k8s.io/apiserver/pkg/util/feature" - "k8s.io/component-helpers/storage/ephemeral" runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" "k8s.io/klog/v2" + "k8s.io/kubelet/pkg/cri/streaming/portforward" + remotecommandserver "k8s.io/kubelet/pkg/cri/streaming/remotecommand" podutil "k8s.io/kubernetes/pkg/api/v1/pod" "k8s.io/kubernetes/pkg/api/v1/resource" podshelper "k8s.io/kubernetes/pkg/apis/core/pods" @@ -50,8 +51,6 @@ import ( "k8s.io/kubernetes/pkg/fieldpath" "k8s.io/kubernetes/pkg/kubelet/cm" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" - "k8s.io/kubernetes/pkg/kubelet/cri/streaming/portforward" - remotecommandserver "k8s.io/kubernetes/pkg/kubelet/cri/streaming/remotecommand" "k8s.io/kubernetes/pkg/kubelet/envvars" "k8s.io/kubernetes/pkg/kubelet/images" "k8s.io/kubernetes/pkg/kubelet/metrics" @@ -345,7 +344,11 @@ func ensureHostsFile(fileName string, hostIPs []string, hostName, hostDomainName hostsFileContent = managedHostsFileContent(hostIPs, hostName, hostDomainName, hostAliases) } - return os.WriteFile(fileName, hostsFileContent, 0644) + hostsFilePerm := os.FileMode(0644) + if err := os.WriteFile(fileName, hostsFileContent, hostsFilePerm); err != nil { + return err + } + return os.Chmod(fileName, hostsFilePerm) } // nodeHostsFileContent reads the content of node's hosts file. @@ -515,11 +518,6 @@ func (kl *Kubelet) GenerateRunContainerOptions(ctx context.Context, pod *v1.Pod, } } - // only do this check if the experimental behavior is enabled, otherwise allow it to default to false - if kl.experimentalHostUserNamespaceDefaulting { - opts.EnableHostUserNamespace = kl.enableHostUserNamespace(ctx, pod) - } - return opts, cleanupAction, nil } @@ -986,23 +984,6 @@ func (kl *Kubelet) removeOrphanedPodStatuses(pods []*v1.Pod, mirrorPods []*v1.Po kl.statusManager.RemoveOrphanedStatuses(podUIDs) } -// deleteOrphanedMirrorPods checks whether pod killer has done with orphaned mirror pod. -// If pod killing is done, podManager.DeleteMirrorPod() is called to delete mirror pod -// from the API server -func (kl *Kubelet) deleteOrphanedMirrorPods() { - mirrorPods := kl.podManager.GetOrphanedMirrorPodNames() - for _, podFullname := range mirrorPods { - if !kl.podWorkers.IsPodForMirrorPodTerminatingByFullName(podFullname) { - _, err := kl.podManager.DeleteMirrorPod(podFullname, nil) - if err != nil { - klog.ErrorS(err, "Encountered error when deleting mirror pod", "podName", podFullname) - } else { - klog.V(3).InfoS("Deleted mirror pod", "podName", podFullname) - } - } - } -} - // HandlePodCleanups performs a series of cleanup work, including terminating // pod workers, killing unwanted pods, and removing orphaned volumes/pod // directories. No config changes are sent to pod workers while this method @@ -1033,15 +1014,7 @@ func (kl *Kubelet) HandlePodCleanups(ctx context.Context) error { } } - allPods, mirrorPods := kl.podManager.GetPodsAndMirrorPods() - activePods := kl.filterOutInactivePods(allPods) - allRegularPods, allStaticPods := splitPodsByStatic(allPods) - activeRegularPods, activeStaticPods := splitPodsByStatic(activePods) - metrics.DesiredPodCount.WithLabelValues("").Set(float64(len(allRegularPods))) - metrics.DesiredPodCount.WithLabelValues("true").Set(float64(len(allStaticPods))) - metrics.ActivePodCount.WithLabelValues("").Set(float64(len(activeRegularPods))) - metrics.ActivePodCount.WithLabelValues("true").Set(float64(len(activeStaticPods))) - metrics.MirrorPodCount.Set(float64(len(mirrorPods))) + allPods, mirrorPods, orphanedMirrorPodFullnames := kl.podManager.GetPodsAndMirrorPods() // Pod phase progresses monotonically. Once a pod has reached a final state, // it should never leave regardless of the restart policy. The statuses @@ -1137,7 +1110,27 @@ func (kl *Kubelet) HandlePodCleanups(ctx context.Context) error { // Remove any orphaned mirror pods (mirror pods are tracked by name via the // pod worker) klog.V(3).InfoS("Clean up orphaned mirror pods") - kl.deleteOrphanedMirrorPods() + for _, podFullname := range orphanedMirrorPodFullnames { + if !kl.podWorkers.IsPodForMirrorPodTerminatingByFullName(podFullname) { + _, err := kl.mirrorPodClient.DeleteMirrorPod(podFullname, nil) + if err != nil { + klog.ErrorS(err, "Encountered error when deleting mirror pod", "podName", podFullname) + } else { + klog.V(3).InfoS("Deleted mirror pod", "podName", podFullname) + } + } + } + + // After pruning pod workers for terminated pods get the list of active pods for + // metrics and to determine restarts. + activePods := kl.filterOutInactivePods(allPods) + allRegularPods, allStaticPods := splitPodsByStatic(allPods) + activeRegularPods, activeStaticPods := splitPodsByStatic(activePods) + metrics.DesiredPodCount.WithLabelValues("").Set(float64(len(allRegularPods))) + metrics.DesiredPodCount.WithLabelValues("true").Set(float64(len(allStaticPods))) + metrics.ActivePodCount.WithLabelValues("").Set(float64(len(activeRegularPods))) + metrics.ActivePodCount.WithLabelValues("true").Set(float64(len(activeStaticPods))) + metrics.MirrorPodCount.Set(float64(len(mirrorPods))) // At this point, the pod worker is aware of which pods are not desired (SyncKnownPods). // We now look through the set of active pods for those that the pod worker is not aware of @@ -1155,10 +1148,14 @@ func (kl *Kubelet) HandlePodCleanups(ctx context.Context) error { klog.V(3).InfoS("Pod will be restarted because it is in the desired set and not known to the pod workers (likely due to UID reuse)", "podUID", desiredPod.UID) isStatic := kubetypes.IsStaticPod(desiredPod) - mirrorPod, _ := kl.podManager.GetMirrorPodByPod(desiredPod) + pod, mirrorPod, wasMirror := kl.podManager.GetPodAndMirrorPod(desiredPod) + if pod == nil || wasMirror { + klog.V(2).InfoS("Programmer error, restartable pod was a mirror pod but activePods should never contain a mirror pod", "podUID", desiredPod.UID) + continue + } kl.podWorkers.UpdatePod(UpdatePodOptions{ UpdateType: kubetypes.SyncPodCreate, - Pod: desiredPod, + Pod: pod, MirrorPod: mirrorPod, }) @@ -1245,7 +1242,6 @@ func (kl *Kubelet) HandlePodCleanups(ctx context.Context) error { // Cleanup any backoff entries. kl.backOff.GC() - return nil } @@ -1351,15 +1347,26 @@ func (kl *Kubelet) GetKubeletContainerLogs(ctx context.Context, podFullName, con return fmt.Errorf("pod %q cannot be found - no logs available", name) } - podUID := pod.UID - if mirrorPod, ok := kl.podManager.GetMirrorPodByPod(pod); ok { + // TODO: this should be using the podWorker's pod store as authoritative, since + // the mirrorPod might still exist, the pod may have been force deleted but + // is still terminating (users should be able to view logs of force deleted static pods + // based on full name). + var podUID types.UID + pod, mirrorPod, wasMirror := kl.podManager.GetPodAndMirrorPod(pod) + if wasMirror { + if pod == nil { + return fmt.Errorf("mirror pod %q does not have a corresponding pod", name) + } podUID = mirrorPod.UID + } else { + podUID = pod.UID } + podStatus, found := kl.statusManager.GetPodStatus(podUID) if !found { // If there is no cached status, use the status from the - // apiserver. This is useful if kubelet has recently been - // restarted. + // config source (apiserver). This is useful if kubelet + // has recently been restarted. podStatus = pod.Status } @@ -1503,7 +1510,7 @@ func (kl *Kubelet) determinePodResizeStatus(pod *v1.Pod, podStatus *v1.PodStatus specStatusDiffer := false for _, c := range pod.Spec.Containers { if cs, ok := podutil.GetContainerStatus(podStatus.ContainerStatuses, c.Name); ok { - if cs.Resources != nil && diff.ObjectDiff(c.Resources, *cs.Resources) != "" { + if cs.Resources != nil && !cmp.Equal(c.Resources, *cs.Resources) { specStatusDiffer = true break } @@ -2166,82 +2173,3 @@ func (kl *Kubelet) cleanupOrphanedPodCgroups(pcm cm.PodContainerManager, cgroupP go pcm.Destroy(val) } } - -// enableHostUserNamespace determines if the host user namespace should be used by the container runtime. -// Returns true if the pod is using a host pid, pic, or network namespace, the pod is using a non-namespaced -// capability, the pod contains a privileged container, or the pod has a host path volume. -// -// NOTE: when if a container shares any namespace with another container it must also share the user namespace -// or it will not have the correct capabilities in the namespace. This means that host user namespace -// is enabled per pod, not per container. -func (kl *Kubelet) enableHostUserNamespace(ctx context.Context, pod *v1.Pod) bool { - if kubecontainer.HasPrivilegedContainer(pod) || hasHostNamespace(pod) || - hasHostVolume(pod) || hasNonNamespacedCapability(pod) || kl.hasHostMountPVC(ctx, pod) { - return true - } - return false -} - -// hasNonNamespacedCapability returns true if MKNOD, SYS_TIME, or SYS_MODULE is requested for any container. -func hasNonNamespacedCapability(pod *v1.Pod) bool { - for _, c := range pod.Spec.Containers { - if c.SecurityContext != nil && c.SecurityContext.Capabilities != nil { - for _, cap := range c.SecurityContext.Capabilities.Add { - if cap == "MKNOD" || cap == "SYS_TIME" || cap == "SYS_MODULE" { - return true - } - } - } - } - - return false -} - -// hasHostVolume returns true if the pod spec has a HostPath volume. -func hasHostVolume(pod *v1.Pod) bool { - for _, v := range pod.Spec.Volumes { - if v.HostPath != nil { - return true - } - } - return false -} - -// hasHostNamespace returns true if hostIPC, hostNetwork, or hostPID are set to true. -func hasHostNamespace(pod *v1.Pod) bool { - if pod.Spec.SecurityContext == nil { - return false - } - return pod.Spec.HostIPC || pod.Spec.HostNetwork || pod.Spec.HostPID -} - -// hasHostMountPVC returns true if a PVC is referencing a HostPath volume. -func (kl *Kubelet) hasHostMountPVC(ctx context.Context, pod *v1.Pod) bool { - for _, volume := range pod.Spec.Volumes { - pvcName := "" - switch { - case volume.PersistentVolumeClaim != nil: - pvcName = volume.PersistentVolumeClaim.ClaimName - case volume.Ephemeral != nil: - pvcName = ephemeral.VolumeClaimName(pod, &volume) - default: - continue - } - pvc, err := kl.kubeClient.CoreV1().PersistentVolumeClaims(pod.Namespace).Get(ctx, pvcName, metav1.GetOptions{}) - if err != nil { - klog.InfoS("Unable to retrieve pvc", "pvc", klog.KRef(pod.Namespace, pvcName), "err", err) - continue - } - if pvc != nil { - referencedVolume, err := kl.kubeClient.CoreV1().PersistentVolumes().Get(ctx, pvc.Spec.VolumeName, metav1.GetOptions{}) - if err != nil { - klog.InfoS("Unable to retrieve pv", "pvName", pvc.Spec.VolumeName, "err", err) - continue - } - if referencedVolume != nil && referencedVolume.Spec.HostPath != nil { - return true - } - } - } - return false -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/helpers.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/helpers.go index 0605ab4d328d..1e6359f56874 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/helpers.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/helpers.go @@ -212,32 +212,36 @@ func toKubeRuntimeStatus(status *runtimeapi.RuntimeStatus) *kubecontainer.Runtim return &kubecontainer.RuntimeStatus{Conditions: conditions} } -func fieldProfile(scmp *v1.SeccompProfile, profileRootPath string, fallbackToRuntimeDefault bool) string { +func fieldProfile(scmp *v1.SeccompProfile, profileRootPath string, fallbackToRuntimeDefault bool) (string, error) { if scmp == nil { if fallbackToRuntimeDefault { - return v1.SeccompProfileRuntimeDefault + return v1.SeccompProfileRuntimeDefault, nil } - return "" + return "", nil } if scmp.Type == v1.SeccompProfileTypeRuntimeDefault { - return v1.SeccompProfileRuntimeDefault + return v1.SeccompProfileRuntimeDefault, nil } - if scmp.Type == v1.SeccompProfileTypeLocalhost && scmp.LocalhostProfile != nil && len(*scmp.LocalhostProfile) > 0 { - fname := filepath.Join(profileRootPath, *scmp.LocalhostProfile) - return v1.SeccompLocalhostProfileNamePrefix + fname + if scmp.Type == v1.SeccompProfileTypeLocalhost { + if scmp.LocalhostProfile != nil && len(*scmp.LocalhostProfile) > 0 { + fname := filepath.Join(profileRootPath, *scmp.LocalhostProfile) + return v1.SeccompLocalhostProfileNamePrefix + fname, nil + } else { + return "", fmt.Errorf("localhostProfile must be set if seccompProfile type is Localhost.") + } } if scmp.Type == v1.SeccompProfileTypeUnconfined { - return v1.SeccompProfileNameUnconfined + return v1.SeccompProfileNameUnconfined, nil } if fallbackToRuntimeDefault { - return v1.SeccompProfileRuntimeDefault + return v1.SeccompProfileRuntimeDefault, nil } - return "" + return "", nil } func (m *kubeGenericRuntimeManager) getSeccompProfilePath(annotations map[string]string, containerName string, - podSecContext *v1.PodSecurityContext, containerSecContext *v1.SecurityContext, fallbackToRuntimeDefault bool) string { + podSecContext *v1.PodSecurityContext, containerSecContext *v1.SecurityContext, fallbackToRuntimeDefault bool) (string, error) { // container fields are applied first if containerSecContext != nil && containerSecContext.SeccompProfile != nil { return fieldProfile(containerSecContext.SeccompProfile, m.seccompProfileRoot, fallbackToRuntimeDefault) @@ -249,42 +253,46 @@ func (m *kubeGenericRuntimeManager) getSeccompProfilePath(annotations map[string } if fallbackToRuntimeDefault { - return v1.SeccompProfileRuntimeDefault + return v1.SeccompProfileRuntimeDefault, nil } - return "" + return "", nil } -func fieldSeccompProfile(scmp *v1.SeccompProfile, profileRootPath string, fallbackToRuntimeDefault bool) *runtimeapi.SecurityProfile { +func fieldSeccompProfile(scmp *v1.SeccompProfile, profileRootPath string, fallbackToRuntimeDefault bool) (*runtimeapi.SecurityProfile, error) { if scmp == nil { if fallbackToRuntimeDefault { return &runtimeapi.SecurityProfile{ ProfileType: runtimeapi.SecurityProfile_RuntimeDefault, - } + }, nil } return &runtimeapi.SecurityProfile{ ProfileType: runtimeapi.SecurityProfile_Unconfined, - } + }, nil } if scmp.Type == v1.SeccompProfileTypeRuntimeDefault { return &runtimeapi.SecurityProfile{ ProfileType: runtimeapi.SecurityProfile_RuntimeDefault, - } + }, nil } - if scmp.Type == v1.SeccompProfileTypeLocalhost && scmp.LocalhostProfile != nil && len(*scmp.LocalhostProfile) > 0 { - fname := filepath.Join(profileRootPath, *scmp.LocalhostProfile) - return &runtimeapi.SecurityProfile{ - ProfileType: runtimeapi.SecurityProfile_Localhost, - LocalhostRef: fname, + if scmp.Type == v1.SeccompProfileTypeLocalhost { + if scmp.LocalhostProfile != nil && len(*scmp.LocalhostProfile) > 0 { + fname := filepath.Join(profileRootPath, *scmp.LocalhostProfile) + return &runtimeapi.SecurityProfile{ + ProfileType: runtimeapi.SecurityProfile_Localhost, + LocalhostRef: fname, + }, nil + } else { + return nil, fmt.Errorf("localhostProfile must be set if seccompProfile type is Localhost.") } } return &runtimeapi.SecurityProfile{ ProfileType: runtimeapi.SecurityProfile_Unconfined, - } + }, nil } func (m *kubeGenericRuntimeManager) getSeccompProfile(annotations map[string]string, containerName string, - podSecContext *v1.PodSecurityContext, containerSecContext *v1.SecurityContext, fallbackToRuntimeDefault bool) *runtimeapi.SecurityProfile { + podSecContext *v1.PodSecurityContext, containerSecContext *v1.SecurityContext, fallbackToRuntimeDefault bool) (*runtimeapi.SecurityProfile, error) { // container fields are applied first if containerSecContext != nil && containerSecContext.SeccompProfile != nil { return fieldSeccompProfile(containerSecContext.SeccompProfile, m.seccompProfileRoot, fallbackToRuntimeDefault) @@ -298,10 +306,10 @@ func (m *kubeGenericRuntimeManager) getSeccompProfile(annotations map[string]str if fallbackToRuntimeDefault { return &runtimeapi.SecurityProfile{ ProfileType: runtimeapi.SecurityProfile_RuntimeDefault, - } + }, nil } return &runtimeapi.SecurityProfile{ ProfileType: runtimeapi.SecurityProfile_Unconfined, - } + }, nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_container.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_container.go index 3fd849e4ae2e..b364fdc5cd58 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_container.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_container.go @@ -136,7 +136,6 @@ func calcRestartCountByLogDir(path string) (int, error) { if _, err := os.Stat(path); err != nil { return 0, nil } - restartCount := int(0) files, err := os.ReadDir(path) if err != nil { return 0, err @@ -144,6 +143,7 @@ func calcRestartCountByLogDir(path string) (int, error) { if len(files) == 0 { return 0, nil } + restartCount := 0 restartCountLogFileRegex := regexp.MustCompile(`^(\d+)\.log(\..*)?`) for _, file := range files { if file.IsDir() { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_gc.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_gc.go index a91e190ee750..35a19704b951 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_gc.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_gc.go @@ -293,7 +293,7 @@ func (cgc *containerGC) evictSandboxes(ctx context.Context, evictNonDeletedPods sandboxIDs.Insert(container.PodSandboxId) } - sandboxesByPod := make(sandboxesByPodUID) + sandboxesByPod := make(sandboxesByPodUID, len(sandboxes)) for _, sandbox := range sandboxes { podUID := types.UID(sandbox.Metadata.Uid) sandboxInfo := sandboxGCInfo{ @@ -301,13 +301,8 @@ func (cgc *containerGC) evictSandboxes(ctx context.Context, evictNonDeletedPods createTime: time.Unix(0, sandbox.CreatedAt), } - // Set ready sandboxes to be active. - if sandbox.State == runtimeapi.PodSandboxState_SANDBOX_READY { - sandboxInfo.active = true - } - - // Set sandboxes that still have containers to be active. - if sandboxIDs.Has(sandbox.Id) { + // Set ready sandboxes and sandboxes that still have containers to be active. + if sandbox.State == runtimeapi.PodSandboxState_SANDBOX_READY || sandboxIDs.Has(sandbox.Id) { sandboxInfo.active = true } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_manager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_manager.go index 9c9cc56d5ae9..16c08f8d2ec9 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_manager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_manager.go @@ -26,6 +26,7 @@ import ( "time" cadvisorapi "github.com/google/cadvisor/info/v1" + "github.com/google/go-cmp/cmp" "go.opentelemetry.io/otel/trace" crierror "k8s.io/cri-api/pkg/errors" "k8s.io/klog/v2" @@ -34,7 +35,6 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kubetypes "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/diff" utilruntime "k8s.io/apimachinery/pkg/util/runtime" utilversion "k8s.io/apimachinery/pkg/util/version" utilfeature "k8s.io/apiserver/pkg/util/feature" @@ -553,7 +553,7 @@ func (m *kubeGenericRuntimeManager) computePodResizeAction(pod *v1.Pod, containe if !exists || apiContainerStatus.State.Running == nil || apiContainerStatus.Resources == nil || kubeContainerStatus.State != kubecontainer.ContainerStateRunning || kubeContainerStatus.ID.String() != apiContainerStatus.ContainerID || - len(diff.ObjectDiff(container.Resources.Requests, apiContainerStatus.AllocatedResources)) != 0 { + !cmp.Equal(container.Resources.Requests, apiContainerStatus.AllocatedResources) { return true } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/security_context.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/security_context.go index 5e6f05b4e187..7db21ed74f0e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/security_context.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/security_context.go @@ -34,12 +34,12 @@ func (m *kubeGenericRuntimeManager) determineEffectiveSecurityContext(pod *v1.Po ReadonlyPaths: securitycontext.ConvertToRuntimeReadonlyPaths(effectiveSc.ProcMount), } } + var err error - // TODO: Deprecated, remove after we switch to Seccomp field - // set SeccompProfilePath. - synthesized.SeccompProfilePath = m.getSeccompProfilePath(pod.Annotations, container.Name, pod.Spec.SecurityContext, container.SecurityContext, m.seccompDefault) - - synthesized.Seccomp = m.getSeccompProfile(pod.Annotations, container.Name, pod.Spec.SecurityContext, container.SecurityContext, m.seccompDefault) + synthesized.Seccomp, err = m.getSeccompProfile(pod.Annotations, container.Name, pod.Spec.SecurityContext, container.SecurityContext, m.seccompDefault) + if err != nil { + return nil, err + } // set ApparmorProfile. synthesized.ApparmorProfile = apparmor.GetProfileNameFromPodAnnotations(pod.Annotations, container.Name) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/metrics/metrics.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/metrics/metrics.go index e0395d3292b0..f0dad71fea1d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/metrics/metrics.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/metrics/metrics.go @@ -613,7 +613,7 @@ var ( &metrics.CounterOpts{ Subsystem: KubeletSubsystem, Name: StartedHostProcessContainersTotalKey, - Help: "Cumulative number of hostprocess containers started. This metric will only be collected on Windows and requires WindowsHostProcessContainers feature gate to be enabled.", + Help: "Cumulative number of hostprocess containers started. This metric will only be collected on Windows.", StabilityLevel: metrics.ALPHA, }, []string{"container_type"}, @@ -623,7 +623,7 @@ var ( &metrics.CounterOpts{ Subsystem: KubeletSubsystem, Name: StartedHostProcessContainersErrorsTotalKey, - Help: "Cumulative number of errors when starting hostprocess containers. This metric will only be collected on Windows and requires WindowsHostProcessContainers feature gate to be enabled.", + Help: "Cumulative number of errors when starting hostprocess containers. This metric will only be collected on Windows.", StabilityLevel: metrics.ALPHA, }, []string{"container_type", "code"}, @@ -776,19 +776,16 @@ func Register(collectors ...metrics.StableCollector) { legacyregistry.MustRegister(OrphanedRuntimePodTotal) legacyregistry.MustRegister(RestartedPodTotal) legacyregistry.MustRegister(ManagedEphemeralContainers) - if utilfeature.DefaultFeatureGate.Enabled(features.KubeletPodResources) { - legacyregistry.MustRegister(PodResourcesEndpointRequestsTotalCount) - - if utilfeature.DefaultFeatureGate.Enabled(features.KubeletPodResourcesGetAllocatable) { - legacyregistry.MustRegister(PodResourcesEndpointRequestsListCount) - legacyregistry.MustRegister(PodResourcesEndpointRequestsGetAllocatableCount) - legacyregistry.MustRegister(PodResourcesEndpointErrorsListCount) - legacyregistry.MustRegister(PodResourcesEndpointErrorsGetAllocatableCount) - } - if utilfeature.DefaultFeatureGate.Enabled(features.KubeletPodResourcesGet) { - legacyregistry.MustRegister(PodResourcesEndpointRequestsGetCount) - legacyregistry.MustRegister(PodResourcesEndpointErrorsGetCount) - } + legacyregistry.MustRegister(PodResourcesEndpointRequestsTotalCount) + if utilfeature.DefaultFeatureGate.Enabled(features.KubeletPodResourcesGetAllocatable) { + legacyregistry.MustRegister(PodResourcesEndpointRequestsListCount) + legacyregistry.MustRegister(PodResourcesEndpointRequestsGetAllocatableCount) + legacyregistry.MustRegister(PodResourcesEndpointErrorsListCount) + legacyregistry.MustRegister(PodResourcesEndpointErrorsGetAllocatableCount) + } + if utilfeature.DefaultFeatureGate.Enabled(features.KubeletPodResourcesGet) { + legacyregistry.MustRegister(PodResourcesEndpointRequestsGetCount) + legacyregistry.MustRegister(PodResourcesEndpointErrorsGetCount) } legacyregistry.MustRegister(StartedPodsTotal) legacyregistry.MustRegister(StartedPodsErrorsTotal) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/network/dns/dns.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/network/dns/dns.go index 0bb5ab1bb48a..1ffe9d20f8ca 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/network/dns/dns.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/network/dns/dns.go @@ -27,11 +27,9 @@ import ( v1 "k8s.io/api/core/v1" utilerrors "k8s.io/apimachinery/pkg/util/errors" utilvalidation "k8s.io/apimachinery/pkg/util/validation" - utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/client-go/tools/record" runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" "k8s.io/kubernetes/pkg/apis/core/validation" - "k8s.io/kubernetes/pkg/features" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/util/format" @@ -103,10 +101,7 @@ func omitDuplicates(strs []string) []string { func (c *Configurer) formDNSSearchFitsLimits(composedSearch []string, pod *v1.Pod) []string { limitsExceeded := false - maxDNSSearchPaths, maxDNSSearchListChars := validation.MaxDNSSearchPathsLegacy, validation.MaxDNSSearchListCharsLegacy - if utilfeature.DefaultFeatureGate.Enabled(features.ExpandedDNSConfig) { - maxDNSSearchPaths, maxDNSSearchListChars = validation.MaxDNSSearchPathsExpanded, validation.MaxDNSSearchListCharsExpanded - } + maxDNSSearchPaths, maxDNSSearchListChars := validation.MaxDNSSearchPaths, validation.MaxDNSSearchListChars if len(composedSearch) > maxDNSSearchPaths { composedSearch = composedSearch[:maxDNSSearchPaths] @@ -195,10 +190,7 @@ func (c *Configurer) CheckLimitsForResolvConf() { return } - domainCountLimit, maxDNSSearchListChars := validation.MaxDNSSearchPathsLegacy, validation.MaxDNSSearchListCharsLegacy - if utilfeature.DefaultFeatureGate.Enabled(features.ExpandedDNSConfig) { - domainCountLimit, maxDNSSearchListChars = validation.MaxDNSSearchPathsExpanded, validation.MaxDNSSearchListCharsExpanded - } + domainCountLimit, maxDNSSearchListChars := validation.MaxDNSSearchPaths, validation.MaxDNSSearchListChars if c.ClusterDomain != "" { domainCountLimit -= 3 diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pleg/evented.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pleg/evented.go index 1dd176489a5a..5c74db03c2b7 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pleg/evented.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pleg/evented.go @@ -46,16 +46,16 @@ var ( // e.g. Streaming data issues from the runtime or the runtime does not implement the // container events stream. func isEventedPLEGInUse() bool { - eventedPLEGUsageMu.Lock() - defer eventedPLEGUsageMu.Unlock() + eventedPLEGUsageMu.RLock() + defer eventedPLEGUsageMu.RUnlock() return eventedPLEGUsage } // setEventedPLEGUsage should only be accessed from // Start/Stop of Evented PLEG. func setEventedPLEGUsage(enable bool) { - eventedPLEGUsageMu.RLock() - defer eventedPLEGUsageMu.RUnlock() + eventedPLEGUsageMu.Lock() + defer eventedPLEGUsageMu.Unlock() eventedPLEGUsage = enable } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pod/pod_manager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pod/pod_manager.go index 69457e6c983b..e3cc4f760804 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pod/pod_manager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pod/pod_manager.go @@ -43,8 +43,6 @@ import ( // pod. When a static pod gets deleted, the associated orphaned mirror pod // will also be removed. type Manager interface { - // GetPods returns the regular pods bound to the kubelet and their spec. - GetPods() []*v1.Pod // GetPodByFullName returns the (non-mirror) pod that matches full name, as well as // whether the pod was found. GetPodByFullName(podFullName string) (*v1.Pod, bool) @@ -60,8 +58,18 @@ type Manager interface { // GetMirrorPodByPod returns the mirror pod for the given static pod and // whether it was known to the pod manager. GetMirrorPodByPod(*v1.Pod) (*v1.Pod, bool) - // GetPodsAndMirrorPods returns the both regular and mirror pods. - GetPodsAndMirrorPods() ([]*v1.Pod, []*v1.Pod) + // GetPodAndMirrorPod returns the complement for a pod - if a pod was provided + // and a mirror pod can be found, return it. If a mirror pod is provided and + // the pod can be found, return it and true for wasMirror. + GetPodAndMirrorPod(*v1.Pod) (pod, mirrorPod *v1.Pod, wasMirror bool) + + // GetPods returns the regular pods bound to the kubelet and their spec. + GetPods() []*v1.Pod + + // GetPodsAndMirrorPods returns the set of pods, the set of mirror pods, and + // the pod fullnames of any orphaned mirror pods. + GetPodsAndMirrorPods() (allPods []*v1.Pod, allMirrorPods []*v1.Pod, orphanedMirrorPodFullnames []string) + // SetPods replaces the internal pods with the new pods. // It is currently only used for testing. SetPods(pods []*v1.Pod) @@ -69,12 +77,11 @@ type Manager interface { AddPod(pod *v1.Pod) // UpdatePod updates the given pod in the manager. UpdatePod(pod *v1.Pod) - // DeletePod deletes the given pod from the manager. For mirror pods, + // RemovePod deletes the given pod from the manager. For mirror pods, // this means deleting the mappings related to mirror pods. For non- // mirror pods, this means deleting from indexes for all non-mirror pods. - DeletePod(pod *v1.Pod) - // GetOrphanedMirrorPodNames returns names of orphaned mirror pods - GetOrphanedMirrorPodNames() []string + RemovePod(pod *v1.Pod) + // TranslatePodUID returns the actual UID of a pod. If the UID belongs to // a mirror pod, returns the UID of its static pod. Otherwise, returns the // original UID. @@ -86,17 +93,12 @@ type Manager interface { // GetUIDTranslations returns the mappings of static pod UIDs to mirror pod // UIDs and mirror pod UIDs to static pod UIDs. GetUIDTranslations() (podToMirror map[kubetypes.ResolvedPodUID]kubetypes.MirrorPodUID, mirrorToPod map[kubetypes.MirrorPodUID]kubetypes.ResolvedPodUID) - // IsMirrorPodOf returns true if mirrorPod is a correct representation of - // pod; false otherwise. - IsMirrorPodOf(mirrorPod, pod *v1.Pod) bool - - MirrorClient } // basicManager is a functional Manager. // // All fields in basicManager are read-only and are updated calling SetPods, -// AddPod, UpdatePod, or DeletePod. +// AddPod, UpdatePod, or RemovePod. type basicManager struct { // Protects all internal maps. lock sync.RWMutex @@ -112,15 +114,11 @@ type basicManager struct { // Mirror pod UID to pod UID map. translationByUID map[kubetypes.MirrorPodUID]kubetypes.ResolvedPodUID - - // A mirror pod client to create/delete mirror pods. - MirrorClient } // NewBasicPodManager returns a functional Manager. -func NewBasicPodManager(client MirrorClient) Manager { +func NewBasicPodManager() Manager { pm := &basicManager{} - pm.MirrorClient = client pm.SetPods(nil) return pm } @@ -191,7 +189,7 @@ func (pm *basicManager) updatePodsInternal(pods ...*v1.Pod) { } } -func (pm *basicManager) DeletePod(pod *v1.Pod) { +func (pm *basicManager) RemovePod(pod *v1.Pod) { updateMetrics(pod, nil) pm.lock.Lock() defer pm.lock.Unlock() @@ -214,12 +212,18 @@ func (pm *basicManager) GetPods() []*v1.Pod { return podsMapToPods(pm.podByUID) } -func (pm *basicManager) GetPodsAndMirrorPods() ([]*v1.Pod, []*v1.Pod) { +func (pm *basicManager) GetPodsAndMirrorPods() (allPods []*v1.Pod, allMirrorPods []*v1.Pod, orphanedMirrorPodFullnames []string) { pm.lock.RLock() defer pm.lock.RUnlock() - pods := podsMapToPods(pm.podByUID) - mirrorPods := mirrorPodsMapToMirrorPods(pm.mirrorPodByUID) - return pods, mirrorPods + allPods = podsMapToPods(pm.podByUID) + allMirrorPods = mirrorPodsMapToMirrorPods(pm.mirrorPodByUID) + + for podFullName := range pm.mirrorPodByFullName { + if _, ok := pm.podByFullName[podFullName]; !ok { + orphanedMirrorPodFullnames = append(orphanedMirrorPodFullnames, podFullName) + } + } + return allPods, allMirrorPods, orphanedMirrorPodFullnames } func (pm *basicManager) GetPodByUID(uid types.UID) (*v1.Pod, bool) { @@ -280,19 +284,8 @@ func (pm *basicManager) GetUIDTranslations() (podToMirror map[kubetypes.Resolved return podToMirror, mirrorToPod } -func (pm *basicManager) GetOrphanedMirrorPodNames() []string { - pm.lock.RLock() - defer pm.lock.RUnlock() - var podFullNames []string - for podFullName := range pm.mirrorPodByFullName { - if _, ok := pm.podByFullName[podFullName]; !ok { - podFullNames = append(podFullNames, podFullName) - } - } - return podFullNames -} - -func (pm *basicManager) IsMirrorPodOf(mirrorPod, pod *v1.Pod) bool { +// IsMirrorPodOf returns true if pod and mirrorPod are associated with each other. +func IsMirrorPodOf(mirrorPod, pod *v1.Pod) bool { // Check name and namespace first. if pod.Name != mirrorPod.Name || pod.Namespace != mirrorPod.Namespace { return false @@ -333,3 +326,15 @@ func (pm *basicManager) GetPodByMirrorPod(mirrorPod *v1.Pod) (*v1.Pod, bool) { pod, ok := pm.podByFullName[kubecontainer.GetPodFullName(mirrorPod)] return pod, ok } + +func (pm *basicManager) GetPodAndMirrorPod(aPod *v1.Pod) (pod, mirrorPod *v1.Pod, wasMirror bool) { + pm.lock.RLock() + defer pm.lock.RUnlock() + + fullName := kubecontainer.GetPodFullName(aPod) + if kubetypes.IsMirrorPod(aPod) { + return pm.podByFullName[fullName], aPod, true + } + return aPod, pm.mirrorPodByFullName[fullName], false + +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pod_workers.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pod_workers.go index 8d72e1ad55ef..82e7cb93c2cc 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pod_workers.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pod_workers.go @@ -775,16 +775,23 @@ func (p *podWorkers) UpdatePod(options UpdatePodOptions) { } // if this pod is being synced for the first time, we need to make sure it is an active pod if options.Pod != nil && (options.Pod.Status.Phase == v1.PodFailed || options.Pod.Status.Phase == v1.PodSucceeded) { - // check to see if the pod is not running and the pod is terminal. - // If this succeeds then record in the podWorker that it is terminated. + // Check to see if the pod is not running and the pod is terminal; if this succeeds then record in the podWorker that it is terminated. + // This is needed because after a kubelet restart, we need to ensure terminal pods will NOT be considered active in Pod Admission. See http://issues.k8s.io/105523 + // However, `filterOutInactivePods`, considers pods that are actively terminating as active. As a result, `IsPodKnownTerminated()` needs to return true and thus `terminatedAt` needs to be set. if statusCache, err := p.podCache.Get(uid); err == nil { if isPodStatusCacheTerminal(statusCache) { + // At this point we know: + // (1) The pod is terminal based on the config source. + // (2) The pod is terminal based on the runtime cache. + // This implies that this pod had already completed `SyncTerminatingPod` sometime in the past. The pod is likely being synced for the first time due to a kubelet restart. + // These pods need to complete SyncTerminatedPod to ensure that all resources are cleaned and that the status manager makes the final status updates for the pod. + // As a result, set finished: false, to ensure a Terminated event will be sent and `SyncTerminatedPod` will run. status = &podSyncStatus{ terminatedAt: now, terminatingAt: now, syncedAt: now, startedTerminating: true, - finished: true, + finished: false, fullname: kubecontainer.BuildPodFullName(name, ns), } } @@ -1086,6 +1093,10 @@ func (p *podWorkers) cleanupUnstartedPod(pod *v1.Pod, status *podSyncStatus) { // or can be started, and updates the cached pod state so that downstream components can observe what the // pod worker goroutine is currently attempting to do. If ok is false, there is no available event. If any // of the boolean values is false, ensure the appropriate cleanup happens before returning. +// +// This method should ensure that either status.pendingUpdate is cleared and merged into status.activeUpdate, +// or when a pod cannot be started status.pendingUpdate remains the same. Pods that have not been started +// should never have an activeUpdate because that is exposed to downstream components on started pods. func (p *podWorkers) startPodSync(podUID types.UID) (ctx context.Context, update podWork, canStart, canEverStart, ok bool) { p.podLock.Lock() defer p.podLock.Unlock() @@ -1159,6 +1170,8 @@ func (p *podWorkers) startPodSync(podUID types.UID) (ctx context.Context, update klog.V(4).InfoS("Pod cannot start ever", "pod", klog.KObj(update.Options.Pod), "podUID", podUID, "updateType", update.WorkType) return ctx, update, canStart, canEverStart, true case !canStart: + // this is the only path we don't start the pod, so we need to put the change back in pendingUpdate + status.pendingUpdate = &update.Options status.working = false klog.V(4).InfoS("Pod cannot start yet", "pod", klog.KObj(update.Options.Pod), "podUID", podUID) return ctx, update, canStart, canEverStart, true @@ -1168,6 +1181,12 @@ func (p *podWorkers) startPodSync(podUID types.UID) (ctx context.Context, update status.startedAt = p.clock.Now() status.mergeLastUpdate(update.Options) + // If we are admitting the pod and it is new, record the count of containers + // TODO: We should probably move this into syncPod and add an execution count + // to the syncPod arguments, and this should be recorded on the first sync. + // Leaving it here complicates a particularly important loop. + metrics.ContainersPerPodCount.Observe(float64(len(update.Options.Pod.Spec.Containers))) + return ctx, update, true, true, true } @@ -1545,9 +1564,17 @@ func (p *podWorkers) SyncKnownPods(desiredPods []*v1.Pod) map[types.UID]PodWorke State: status.WorkType(), Orphan: orphan, } - if status.activeUpdate != nil && status.activeUpdate.Pod != nil { - sync.HasConfig = true - sync.Static = kubetypes.IsStaticPod(status.activeUpdate.Pod) + switch { + case status.activeUpdate != nil: + if status.activeUpdate.Pod != nil { + sync.HasConfig = true + sync.Static = kubetypes.IsStaticPod(status.activeUpdate.Pod) + } + case status.pendingUpdate != nil: + if status.pendingUpdate.Pod != nil { + sync.HasConfig = true + sync.Static = kubetypes.IsStaticPod(status.pendingUpdate.Pod) + } } workers[uid] = sync } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/preemption/preemption.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/preemption/preemption.go index 5f0fb5e03c00..e4d0cbd931b1 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/preemption/preemption.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/preemption/preemption.go @@ -21,10 +21,13 @@ import ( "math" v1 "k8s.io/api/core/v1" + utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/client-go/tools/record" "k8s.io/klog/v2" + podutil "k8s.io/kubernetes/pkg/api/v1/pod" "k8s.io/kubernetes/pkg/api/v1/resource" v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos" + "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/kubelet/events" "k8s.io/kubernetes/pkg/kubelet/eviction" "k8s.io/kubernetes/pkg/kubelet/lifecycle" @@ -103,6 +106,14 @@ func (c *CriticalPodAdmissionHandler) evictPodsToFreeRequests(admitPod *v1.Pod, status.Phase = v1.PodFailed status.Reason = events.PreemptContainer status.Message = message + if utilfeature.DefaultFeatureGate.Enabled(features.PodDisruptionConditions) { + podutil.UpdatePodCondition(status, &v1.PodCondition{ + Type: v1.DisruptionTarget, + Status: v1.ConditionTrue, + Reason: v1.PodReasonTerminationByKubelet, + Message: "Pod was preempted by Kubelet to accommodate a critical pod.", + }) + } }) if err != nil { klog.ErrorS(err, "Failed to evict pod", "pod", klog.KObj(pod)) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/runonce.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/runonce.go index 8b63d368d075..b11442ae902c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/runonce.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/runonce.go @@ -129,7 +129,7 @@ func (kl *Kubelet) runPod(ctx context.Context, pod *v1.Pod, retryDelay time.Dura klog.InfoS("Pod's containers not running: syncing", "pod", klog.KObj(pod)) klog.InfoS("Creating a mirror pod for static pod", "pod", klog.KObj(pod)) - if err := kl.podManager.CreateMirrorPod(pod); err != nil { + if err := kl.mirrorPodClient.CreateMirrorPod(pod); err != nil { klog.ErrorS(err, "Failed creating a mirror pod", "pod", klog.KObj(pod)) } mirrorPod, _ := kl.podManager.GetMirrorPodByPod(pod) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/server.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/server.go index 0c033c9d69b5..8e21d3fa50ab 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/server.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/server.go @@ -69,6 +69,9 @@ import ( runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1" podresourcesapiv1alpha1 "k8s.io/kubelet/pkg/apis/podresources/v1alpha1" + "k8s.io/kubelet/pkg/cri/streaming" + "k8s.io/kubelet/pkg/cri/streaming/portforward" + remotecommandserver "k8s.io/kubelet/pkg/cri/streaming/remotecommand" "k8s.io/kubernetes/pkg/api/legacyscheme" api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/apis/core/v1/validation" @@ -77,9 +80,6 @@ import ( "k8s.io/kubernetes/pkg/kubelet/apis/podresources" podresourcesgrpc "k8s.io/kubernetes/pkg/kubelet/apis/podresources/grpc" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" - "k8s.io/kubernetes/pkg/kubelet/cri/streaming" - "k8s.io/kubernetes/pkg/kubelet/cri/streaming/portforward" - remotecommandserver "k8s.io/kubernetes/pkg/kubelet/cri/streaming/remotecommand" "k8s.io/kubernetes/pkg/kubelet/prober" servermetrics "k8s.io/kubernetes/pkg/kubelet/server/metrics" "k8s.io/kubernetes/pkg/kubelet/server/stats" @@ -218,18 +218,19 @@ type PodResourcesProviders struct { } // ListenAndServePodResources initializes a gRPC server to serve the PodResources service -func ListenAndServePodResources(socket string, providers podresources.PodResourcesProviders) { +func ListenAndServePodResources(endpoint string, providers podresources.PodResourcesProviders) { server := grpc.NewServer(podresourcesgrpc.WithRateLimiter(podresourcesgrpc.DefaultQPS, podresourcesgrpc.DefaultBurstTokens)) podresourcesapiv1alpha1.RegisterPodResourcesListerServer(server, podresources.NewV1alpha1PodResourcesServer(providers)) podresourcesapi.RegisterPodResourcesListerServer(server, podresources.NewV1PodResourcesServer(providers)) - l, err := util.CreateListener(socket) + l, err := util.CreateListener(endpoint) if err != nil { klog.ErrorS(err, "Failed to create listener for podResources endpoint") os.Exit(1) } + klog.InfoS("Starting to serve the podresources API", "endpoint", endpoint) if err := server.Serve(l); err != nil { klog.ErrorS(err, "Failed to serve") os.Exit(1) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/stats/provider.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/stats/provider.go index 1241111236a6..09f82c609f51 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/stats/provider.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/stats/provider.go @@ -27,18 +27,24 @@ import ( statsapi "k8s.io/kubelet/pkg/apis/stats/v1alpha1" "k8s.io/kubernetes/pkg/kubelet/cadvisor" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" - kubepod "k8s.io/kubernetes/pkg/kubelet/pod" "k8s.io/kubernetes/pkg/kubelet/server/stats" "k8s.io/kubernetes/pkg/kubelet/stats/pidlimit" "k8s.io/kubernetes/pkg/kubelet/status" + kubetypes "k8s.io/kubernetes/pkg/kubelet/types" ) +// PodManager is the subset of methods the manager needs to observe the actual state of the kubelet. +// See pkg/k8s.io/kubernetes/pkg/kubelet/pod.Manager for method godoc. +type PodManager interface { + TranslatePodUID(uid types.UID) kubetypes.ResolvedPodUID +} + // NewCRIStatsProvider returns a Provider that provides the node stats // from cAdvisor and the container stats from CRI. func NewCRIStatsProvider( cadvisor cadvisor.Interface, resourceAnalyzer stats.ResourceAnalyzer, - podManager kubepod.Manager, + podManager PodManager, runtimeCache kubecontainer.RuntimeCache, runtimeService internalapi.RuntimeService, imageService internalapi.ImageManagerService, @@ -54,7 +60,7 @@ func NewCRIStatsProvider( func NewCadvisorStatsProvider( cadvisor cadvisor.Interface, resourceAnalyzer stats.ResourceAnalyzer, - podManager kubepod.Manager, + podManager PodManager, runtimeCache kubecontainer.RuntimeCache, imageService kubecontainer.ImageService, statusProvider status.PodStatusProvider, @@ -67,7 +73,7 @@ func NewCadvisorStatsProvider( // cAdvisor and the container stats using the containerStatsProvider. func newStatsProvider( cadvisor cadvisor.Interface, - podManager kubepod.Manager, + podManager PodManager, runtimeCache kubecontainer.RuntimeCache, containerStatsProvider containerStatsProvider, ) *Provider { @@ -82,7 +88,7 @@ func newStatsProvider( // Provider provides the stats of the node and the pod-managed containers. type Provider struct { cadvisor cadvisor.Interface - podManager kubepod.Manager + podManager PodManager runtimeCache kubecontainer.RuntimeCache containerStatsProvider } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/status/status_manager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/status/status_manager.go index 728b438a465f..70e2dc038127 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/status/status_manager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/status/status_manager.go @@ -25,6 +25,7 @@ import ( "sync" "time" + "github.com/google/go-cmp/cmp" clientset "k8s.io/client-go/kubernetes" v1 "k8s.io/api/core/v1" @@ -32,7 +33,6 @@ import ( "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/diff" "k8s.io/apimachinery/pkg/util/wait" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/klog/v2" @@ -40,7 +40,6 @@ import ( "k8s.io/kubernetes/pkg/features" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/metrics" - kubepod "k8s.io/kubernetes/pkg/kubelet/pod" "k8s.io/kubernetes/pkg/kubelet/status/state" kubetypes "k8s.io/kubernetes/pkg/kubelet/types" statusutil "k8s.io/kubernetes/pkg/util/pod" @@ -70,7 +69,7 @@ type versionedPodStatus struct { // All methods are thread-safe. type manager struct { kubeClient clientset.Interface - podManager kubepod.Manager + podManager PodManager // Map from pod UID to sync status of the corresponding pod. podStatuses map[types.UID]versionedPodStatus podStatusesLock sync.RWMutex @@ -87,8 +86,18 @@ type manager struct { stateFileDirectory string } -// PodStatusProvider knows how to provide status for a pod. It's intended to be used by other components -// that need to introspect status. +// PodManager is the subset of methods the manager needs to observe the actual state of the kubelet. +// See pkg/k8s.io/kubernetes/pkg/kubelet/pod.Manager for method godoc. +type PodManager interface { + GetPodByUID(types.UID) (*v1.Pod, bool) + GetMirrorPodByPod(*v1.Pod) (*v1.Pod, bool) + TranslatePodUID(uid types.UID) kubetypes.ResolvedPodUID + GetUIDTranslations() (podToMirror map[kubetypes.ResolvedPodUID]kubetypes.MirrorPodUID, mirrorToPod map[kubetypes.MirrorPodUID]kubetypes.ResolvedPodUID) +} + +// PodStatusProvider knows how to provide status for a pod. It is intended to be used by other components +// that need to introspect the authoritative status of a pod. The PodStatusProvider represents the actual +// status of a running pod as the kubelet sees it. type PodStatusProvider interface { // GetPodStatus returns the cached status for the provided pod UID, as well as whether it // was a cache hit. @@ -149,7 +158,7 @@ type Manager interface { const syncPeriod = 10 * time.Second // NewManager returns a functional Manager. -func NewManager(kubeClient clientset.Interface, podManager kubepod.Manager, podDeletionSafety PodDeletionSafetyProvider, podStartupLatencyHelper PodStartupLatencyStateHelper, stateFileDirectory string) Manager { +func NewManager(kubeClient clientset.Interface, podManager PodManager, podDeletionSafety PodDeletionSafetyProvider, podStartupLatencyHelper PodStartupLatencyStateHelper, stateFileDirectory string) Manager { return &manager{ kubeClient: kubeClient, podManager: podManager, @@ -937,7 +946,7 @@ func (m *manager) needsReconcile(uid types.UID, status v1.PodStatus) bool { } klog.V(3).InfoS("Pod status is inconsistent with cached status for pod, a reconciliation should be triggered", "pod", klog.KObj(pod), - "statusDiff", diff.ObjectDiff(podStatus, &status)) + "statusDiff", cmp.Diff(podStatus, &status)) return true } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/sysctl/allowlist.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/sysctl/allowlist.go index 16bc95fff08e..07daa528eac0 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/sysctl/allowlist.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/sysctl/allowlist.go @@ -116,13 +116,8 @@ func (w *patternAllowlist) Admit(attrs *lifecycle.PodAdmitAttributes) lifecycle. } } - var hostNet, hostIPC bool - if pod.Spec.SecurityContext != nil { - hostNet = pod.Spec.HostNetwork - hostIPC = pod.Spec.HostIPC - } for _, s := range pod.Spec.SecurityContext.Sysctls { - if err := w.validateSysctl(s.Name, hostNet, hostIPC); err != nil { + if err := w.validateSysctl(s.Name, pod.Spec.HostNetwork, pod.Spec.HostIPC); err != nil { return lifecycle.PodAdmitResult{ Admit: false, Reason: ForbiddenReason, diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/manager/cache_based_manager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/manager/cache_based_manager.go index 22531a28780b..160f9ca0d295 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/manager/cache_based_manager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/manager/cache_based_manager.go @@ -22,7 +22,7 @@ import ( "sync" "time" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/apiserver/pkg/storage" "k8s.io/kubernetes/pkg/kubelet/util" @@ -92,7 +92,7 @@ func isObjectOlder(newObject, oldObject runtime.Object) bool { return newVersion < oldVersion } -func (s *objectStore) AddReference(namespace, name string) { +func (s *objectStore) AddReference(namespace, name string, _ types.UID) { key := objectKey{namespace: namespace, name: name} // AddReference is called from RegisterPod, thus it needs to be efficient. @@ -114,7 +114,7 @@ func (s *objectStore) AddReference(namespace, name string) { item.data = nil } -func (s *objectStore) DeleteReference(namespace, name string) { +func (s *objectStore) DeleteReference(namespace, name string, _ types.UID) { key := objectKey{namespace: namespace, name: name} s.lock.Lock() @@ -225,7 +225,7 @@ func (c *cacheBasedManager) RegisterPod(pod *v1.Pod) { c.lock.Lock() defer c.lock.Unlock() for name := range names { - c.objectStore.AddReference(pod.Namespace, name) + c.objectStore.AddReference(pod.Namespace, name, pod.UID) } var prev *v1.Pod key := objectKey{namespace: pod.Namespace, name: pod.Name, uid: pod.UID} @@ -238,7 +238,7 @@ func (c *cacheBasedManager) RegisterPod(pod *v1.Pod) { // names and prev need to have their ref counts decremented. Any that // are only in prev need to be completely removed. This unconditional // call takes care of both cases. - c.objectStore.DeleteReference(prev.Namespace, name) + c.objectStore.DeleteReference(prev.Namespace, name, prev.UID) } } } @@ -252,7 +252,7 @@ func (c *cacheBasedManager) UnregisterPod(pod *v1.Pod) { delete(c.registeredPods, key) if prev != nil { for name := range c.getReferencedObjects(prev) { - c.objectStore.DeleteReference(prev.Namespace, name) + c.objectStore.DeleteReference(prev.Namespace, name, prev.UID) } } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/manager/manager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/manager/manager.go index 2c983d35d22b..99f10a1dfa1e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/manager/manager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/manager/manager.go @@ -17,8 +17,9 @@ limitations under the License. package manager import ( - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" ) // Manager is the interface for registering and unregistering @@ -46,15 +47,15 @@ type Manager interface { // Store is the interface for a object cache that // can be used by cacheBasedManager. type Store interface { - // AddReference adds a reference to the object to the store. + // AddReference adds a reference from referencedFrom to the object to the store. // Note that multiple additions to the store has to be allowed // in the implementations and effectively treated as refcounted. - AddReference(namespace, name string) - // DeleteReference deletes reference to the object from the store. + AddReference(namespace, name string, referencedFrom types.UID) + // DeleteReference deletes a reference from referencedFrom to the object from the store. // Note that object should be deleted only when there was a // corresponding Delete call for each of Add calls (effectively - // when refcount was reduced to zero). - DeleteReference(namespace, name string) + // when refcount of every referenceFrom was reduced to zero). + DeleteReference(namespace, name string, referencedFrom types.UID) // Get an object from a store. Get(namespace, name string) (runtime.Object, error) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/manager/watch_based_manager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/manager/watch_based_manager.go index e3a1d7e29d81..f04891fce539 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/manager/watch_based_manager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/manager/watch_based_manager.go @@ -21,7 +21,7 @@ import ( "sync" "time" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/client-go/tools/cache" "k8s.io/klog/v2" @@ -31,6 +31,7 @@ import ( "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/watch" @@ -44,7 +45,7 @@ type isImmutableFunc func(runtime.Object) bool // objectCacheItem is a single item stored in objectCache. type objectCacheItem struct { - refCount int + refMap map[types.UID]int store *cacheStore reflector *cache.Reflector @@ -231,7 +232,7 @@ func (c *objectCache) newReflectorLocked(namespace, name string) *objectCacheIte 0, ) item := &objectCacheItem{ - refCount: 0, + refMap: make(map[types.UID]int), store: store, reflector: reflector, hasSynced: func() (bool, error) { return store.hasSynced(), nil }, @@ -245,7 +246,7 @@ func (c *objectCache) newReflectorLocked(namespace, name string) *objectCacheIte return item } -func (c *objectCache) AddReference(namespace, name string) { +func (c *objectCache) AddReference(namespace, name string, referencedFrom types.UID) { key := objectKey{namespace: namespace, name: name} // AddReference is called from RegisterPod thus it needs to be efficient. @@ -260,17 +261,20 @@ func (c *objectCache) AddReference(namespace, name string) { item = c.newReflectorLocked(namespace, name) c.items[key] = item } - item.refCount++ + item.refMap[referencedFrom]++ } -func (c *objectCache) DeleteReference(namespace, name string) { +func (c *objectCache) DeleteReference(namespace, name string, referencedFrom types.UID) { key := objectKey{namespace: namespace, name: name} c.lock.Lock() defer c.lock.Unlock() if item, ok := c.items[key]; ok { - item.refCount-- - if item.refCount == 0 { + item.refMap[referencedFrom]-- + if item.refMap[referencedFrom] == 0 { + delete(item.refMap, referencedFrom) + } + if len(item.refMap) == 0 { // Stop the underlying reflector. item.stop() delete(c.items, key) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/util_windows.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/util_windows.go index af51d45c6055..b8fc558d2e2f 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/util_windows.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/util_windows.go @@ -24,6 +24,8 @@ import ( "fmt" "net" "net/url" + "os" + "path/filepath" "strings" "syscall" "time" @@ -117,9 +119,27 @@ func parseEndpoint(endpoint string) (string, string, error) { } } -// LocalEndpoint empty implementation +// LocalEndpoint returns the full path to a named pipe at the given endpoint - unlike on unix, we can't use sockets. func LocalEndpoint(path, file string) (string, error) { - return "", fmt.Errorf("LocalEndpoints are unsupported in this build") + // extract the podresources config name from the path. We only need this on windows because the preferred layout of pipes, + // this is why we have the extra logic in here instead of changing the function signature. Join the file to make sure the + // last path component is a file, so the operation chain works.. + podResourcesDir := filepath.Base(filepath.Dir(filepath.Join(path, file))) + if podResourcesDir == "" { + // should not happen because the user can configure a root directory, and we expected a subdirectory inside + // the user supplied root directory named like "pod-resources" or so. + return "", fmt.Errorf("cannot infer the podresources directory from path %q", path) + } + // windows pipes are expected to use forward slashes: https://learn.microsoft.com/windows/win32/ipc/pipe-names + // so using `url` like we do on unix gives us unclear benefits - see https://github.com/kubernetes/kubernetes/issues/78628 + // So we just construct the path from scratch. + // Format: \\ServerName\pipe\PipeName + // Where where ServerName is either the name of a remote computer or a period, to specify the local computer. + // We only consider PipeName as regular windows path, while the pipe path components are fixed, hence we use constants. + serverPart := `\\.` + pipePart := "pipe" + pipeName := "kubelet-" + podResourcesDir + return npipeProtocol + "://" + filepath.Join(serverPart, pipePart, pipeName), nil } var tickCount = syscall.NewLazyDLL("kernel32.dll").NewProc("GetTickCount64") @@ -146,6 +166,11 @@ func IsUnixDomainSocket(filePath string) (bool, error) { // does NOT work in 1809 if the socket file is created within a bind mounted directory by a container // and the FSCTL is issued in the host by the kubelet. + // If the file does not exist, it cannot be a Unix domain socket. + if _, err := os.Stat(filePath); os.IsNotExist(err) { + return false, fmt.Errorf("File %s not found. Err: %v", filePath, err) + } + klog.V(6).InfoS("Function IsUnixDomainSocket starts", "filePath", filePath) // As detailed in https://github.com/kubernetes/kubernetes/issues/104584 we cannot rely // on the Unix Domain socket working on the very first try, hence the potential need to diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go index 80ef98aafb27..8aab267d2c11 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go @@ -40,7 +40,6 @@ import ( "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/kubelet/config" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" - "k8s.io/kubernetes/pkg/kubelet/pod" "k8s.io/kubernetes/pkg/kubelet/volumemanager/cache" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/csimigration" @@ -70,12 +69,19 @@ type DesiredStateOfWorldPopulator interface { HasAddedPods() bool } -// podStateProvider can determine if a pod is going to be terminated. -type podStateProvider interface { +// PodStateProvider can determine if a pod is going to be terminated. +type PodStateProvider interface { ShouldPodContainersBeTerminating(types.UID) bool ShouldPodRuntimeBeRemoved(types.UID) bool } +// PodManager is the subset of methods the manager needs to observe the actual state of the kubelet. +// See pkg/k8s.io/kubernetes/pkg/kubelet/pod.Manager for method godoc. +type PodManager interface { + GetPodByUID(types.UID) (*v1.Pod, bool) + GetPods() []*v1.Pod +} + // NewDesiredStateOfWorldPopulator returns a new instance of // DesiredStateOfWorldPopulator. // @@ -90,8 +96,8 @@ type podStateProvider interface { func NewDesiredStateOfWorldPopulator( kubeClient clientset.Interface, loopSleepDuration time.Duration, - podManager pod.Manager, - podStateProvider podStateProvider, + podManager PodManager, + podStateProvider PodStateProvider, desiredStateOfWorld cache.DesiredStateOfWorld, actualStateOfWorld cache.ActualStateOfWorld, kubeContainerRuntime kubecontainer.Runtime, @@ -121,8 +127,8 @@ func NewDesiredStateOfWorldPopulator( type desiredStateOfWorldPopulator struct { kubeClient clientset.Interface loopSleepDuration time.Duration - podManager pod.Manager - podStateProvider podStateProvider + podManager PodManager + podStateProvider PodStateProvider desiredStateOfWorld cache.DesiredStateOfWorld actualStateOfWorld cache.ActualStateOfWorld pods processedPods @@ -207,7 +213,9 @@ func (dswp *desiredStateOfWorldPopulator) findAndAddNewPods() { // Iterate through all pods in desired state of world, and remove if they no // longer exist func (dswp *desiredStateOfWorldPopulator) findAndRemoveDeletedPods() { + podsFromCache := make(map[volumetypes.UniquePodName]struct{}) for _, volumeToMount := range dswp.desiredStateOfWorld.GetVolumesToMount() { + podsFromCache[volumetypes.UniquePodName(volumeToMount.Pod.UID)] = struct{}{} pod, podExists := dswp.podManager.GetPodByUID(volumeToMount.Pod.UID) if podExists { @@ -256,6 +264,23 @@ func (dswp *desiredStateOfWorldPopulator) findAndRemoveDeletedPods() { dswp.deleteProcessedPod(volumeToMount.PodName) } + // Cleanup orphanded entries from processedPods + dswp.pods.Lock() + orphanedPods := make([]volumetypes.UniquePodName, 0, len(dswp.pods.processedPods)) + for k := range dswp.pods.processedPods { + if _, ok := podsFromCache[k]; !ok { + orphanedPods = append(orphanedPods, k) + } + } + dswp.pods.Unlock() + for _, orphanedPod := range orphanedPods { + uid := types.UID(orphanedPod) + _, podExists := dswp.podManager.GetPodByUID(uid) + if !podExists && dswp.podStateProvider.ShouldPodRuntimeBeRemoved(uid) { + dswp.deleteProcessedPod(orphanedPod) + } + } + podsWithError := dswp.desiredStateOfWorld.GetPodsWithErrors() for _, podName := range podsWithError { if _, podExists := dswp.podManager.GetPodByUID(types.UID(podName)); !podExists { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/reconstruct_common.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/reconstruct_common.go index 220c917878e4..ca44dda18393 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/reconstruct_common.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/reconstruct_common.go @@ -93,7 +93,9 @@ func (gvi *globalVolumeInfo) addPodVolume(rcv *reconstructedVolume) { func (rc *reconciler) cleanupMounts(volume podVolume) { klog.V(2).InfoS("Reconciler sync states: could not find volume information in desired state, clean up the mount points", "podName", volume.podName, "volumeSpecName", volume.volumeSpecName) mountedVolume := operationexecutor.MountedVolume{ - PodName: volume.podName, + PodName: volume.podName, + // VolumeName should be generated by `GetUniqueVolumeNameFromSpec` or `GetUniqueVolumeNameFromSpecWithPod`. + // However, since we don't have the volume information in asw when cleanup mounts, it doesn't matter what we put here. VolumeName: v1.UniqueVolumeName(volume.volumeSpecName), InnerVolumeSpecName: volume.volumeSpecName, PluginName: volume.pluginName, @@ -256,8 +258,6 @@ func (rc *reconciler) reconstructVolume(volume podVolume) (rvolume *reconstructe var volumeMapper volumepkg.BlockVolumeMapper var volumeMounter volumepkg.Mounter var deviceMounter volumepkg.DeviceMounter - // Path to the mount or block device to check - var checkPath string if volume.volumeMode == v1.PersistentVolumeBlock { var newMapperErr error @@ -274,8 +274,6 @@ func (rc *reconciler) reconstructVolume(volume podVolume) (rvolume *reconstructe pod.UID, newMapperErr) } - mapDir, linkName := volumeMapper.GetPodDeviceMapPath() - checkPath = filepath.Join(mapDir, linkName) } else { var err error volumeMounter, err = plugin.NewMounter( @@ -291,7 +289,6 @@ func (rc *reconciler) reconstructVolume(volume podVolume) (rvolume *reconstructe pod.UID, err) } - checkPath = volumeMounter.GetPath() if deviceMountablePlugin != nil { deviceMounter, err = deviceMountablePlugin.NewDeviceMounter() if err != nil { @@ -305,16 +302,6 @@ func (rc *reconciler) reconstructVolume(volume podVolume) (rvolume *reconstructe } } - // Check existence of mount point for filesystem volume or symbolic link for block volume - isExist, checkErr := rc.operationExecutor.CheckVolumeExistenceOperation(volumeSpec, checkPath, volumeSpec.Name(), rc.mounter, uniqueVolumeName, volume.podName, pod.UID, attachablePlugin) - if checkErr != nil { - return nil, checkErr - } - // If mount or symlink doesn't exist, volume reconstruction should be failed - if !isExist { - return nil, fmt.Errorf("volume: %q is not mounted", uniqueVolumeName) - } - reconstructedVolume := &reconstructedVolume{ volumeName: uniqueVolumeName, podName: volume.podName, diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/volume_manager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/volume_manager.go index 7de1d03fbbe6..b34f21b62f6f 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/volume_manager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/volume_manager.go @@ -17,6 +17,7 @@ limitations under the License. package volumemanager import ( + "context" "errors" "fmt" "sort" @@ -38,7 +39,6 @@ import ( csitrans "k8s.io/csi-translation-lib" "k8s.io/kubernetes/pkg/kubelet/config" "k8s.io/kubernetes/pkg/kubelet/container" - "k8s.io/kubernetes/pkg/kubelet/pod" "k8s.io/kubernetes/pkg/kubelet/volumemanager/cache" "k8s.io/kubernetes/pkg/kubelet/volumemanager/metrics" "k8s.io/kubernetes/pkg/kubelet/volumemanager/populator" @@ -97,14 +97,14 @@ type VolumeManager interface { // actual state of the world). // An error is returned if all volumes are not attached and mounted within // the duration defined in podAttachAndMountTimeout. - WaitForAttachAndMount(pod *v1.Pod) error + WaitForAttachAndMount(ctx context.Context, pod *v1.Pod) error // WaitForUnmount processes the volumes referenced in the specified // pod and blocks until they are all unmounted (reflected in the actual // state of the world). // An error is returned if all volumes are not unmounted within // the duration defined in podAttachAndMountTimeout. - WaitForUnmount(pod *v1.Pod) error + WaitForUnmount(ctx context.Context, pod *v1.Pod) error // GetMountedVolumesForPod returns a VolumeMap containing the volumes // referenced by the specified pod that are successfully attached and @@ -150,11 +150,18 @@ type VolumeManager interface { } // podStateProvider can determine if a pod is going to be terminated -type podStateProvider interface { +type PodStateProvider interface { ShouldPodContainersBeTerminating(k8stypes.UID) bool ShouldPodRuntimeBeRemoved(k8stypes.UID) bool } +// PodManager is the subset of methods the manager needs to observe the actual state of the kubelet. +// See pkg/k8s.io/kubernetes/pkg/kubelet/pod.Manager for method godoc. +type PodManager interface { + GetPodByUID(k8stypes.UID) (*v1.Pod, bool) + GetPods() []*v1.Pod +} + // NewVolumeManager returns a new concrete instance implementing the // VolumeManager interface. // @@ -166,8 +173,8 @@ type podStateProvider interface { func NewVolumeManager( controllerAttachDetachEnabled bool, nodeName k8stypes.NodeName, - podManager pod.Manager, - podStateProvider podStateProvider, + podManager PodManager, + podStateProvider PodStateProvider, kubeClient clientset.Interface, volumePluginMgr *volume.VolumePluginMgr, kubeContainerRuntime container.Runtime, @@ -385,7 +392,7 @@ func (vm *volumeManager) MarkVolumesAsReportedInUse( vm.desiredStateOfWorld.MarkVolumesReportedInUse(volumesReportedAsInUse) } -func (vm *volumeManager) WaitForAttachAndMount(pod *v1.Pod) error { +func (vm *volumeManager) WaitForAttachAndMount(ctx context.Context, pod *v1.Pod) error { if pod == nil { return nil } @@ -404,9 +411,11 @@ func (vm *volumeManager) WaitForAttachAndMount(pod *v1.Pod) error { // like Downward API, depend on this to update the contents of the volume). vm.desiredStateOfWorldPopulator.ReprocessPod(uniquePodName) - err := wait.PollImmediate( + err := wait.PollUntilContextTimeout( + ctx, podAttachAndMountRetryInterval, podAttachAndMountTimeout, + true, vm.verifyVolumesMountedFunc(uniquePodName, expectedVolumes)) if err != nil { @@ -423,7 +432,7 @@ func (vm *volumeManager) WaitForAttachAndMount(pod *v1.Pod) error { } return fmt.Errorf( - "unmounted volumes=%v, unattached volumes=%v, failed to process volumes=%v: %s", + "unmounted volumes=%v, unattached volumes=%v, failed to process volumes=%v: %w", unmountedVolumes, unattachedVolumes, volumesNotInDSW, @@ -434,7 +443,7 @@ func (vm *volumeManager) WaitForAttachAndMount(pod *v1.Pod) error { return nil } -func (vm *volumeManager) WaitForUnmount(pod *v1.Pod) error { +func (vm *volumeManager) WaitForUnmount(ctx context.Context, pod *v1.Pod) error { if pod == nil { return nil } @@ -444,9 +453,11 @@ func (vm *volumeManager) WaitForUnmount(pod *v1.Pod) error { vm.desiredStateOfWorldPopulator.ReprocessPod(uniquePodName) - err := wait.PollImmediate( + err := wait.PollUntilContextTimeout( + ctx, podAttachAndMountRetryInterval, podAttachAndMountTimeout, + true, vm.verifyVolumesUnmountedFunc(uniquePodName)) if err != nil { @@ -461,7 +472,7 @@ func (vm *volumeManager) WaitForUnmount(pod *v1.Pod) error { } return fmt.Errorf( - "mounted volumes=%v: %s", + "mounted volumes=%v: %w", mountedVolumes, err) } @@ -493,14 +504,15 @@ func (vm *volumeManager) getUnattachedVolumes(uniquePodName types.UniquePodName) unattachedVolumes = append(unattachedVolumes, volumeToMount.OuterVolumeSpecName) } } + sort.Strings(unattachedVolumes) return unattachedVolumes } // verifyVolumesMountedFunc returns a method that returns true when all expected // volumes are mounted. -func (vm *volumeManager) verifyVolumesMountedFunc(podName types.UniquePodName, expectedVolumes []string) wait.ConditionFunc { - return func() (done bool, err error) { +func (vm *volumeManager) verifyVolumesMountedFunc(podName types.UniquePodName, expectedVolumes []string) wait.ConditionWithContextFunc { + return func(_ context.Context) (done bool, err error) { if errs := vm.desiredStateOfWorld.PopPodErrors(podName); len(errs) > 0 { return true, errors.New(strings.Join(errs, "; ")) } @@ -510,8 +522,8 @@ func (vm *volumeManager) verifyVolumesMountedFunc(podName types.UniquePodName, e // verifyVolumesUnmountedFunc returns a method that is true when there are no mounted volumes for this // pod. -func (vm *volumeManager) verifyVolumesUnmountedFunc(podName types.UniquePodName) wait.ConditionFunc { - return func() (done bool, err error) { +func (vm *volumeManager) verifyVolumesUnmountedFunc(podName types.UniquePodName) wait.ConditionWithContextFunc { + return func(_ context.Context) (done bool, err error) { if errs := vm.desiredStateOfWorld.PopPodErrors(podName); len(errs) > 0 { return true, errors.New(strings.Join(errs, "; ")) } @@ -540,6 +552,8 @@ func filterUnmountedVolumes(mountedVolumes sets.String, expectedVolumes []string unmountedVolumes = append(unmountedVolumes, expectedVolume) } } + sort.Strings(unmountedVolumes) + return unmountedVolumes } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/volume_manager_fake.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/volume_manager_fake.go index e3e56b4d853c..94a7ac5b2d54 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/volume_manager_fake.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/volume_manager_fake.go @@ -17,6 +17,8 @@ limitations under the License. package volumemanager import ( + "context" + v1 "k8s.io/api/core/v1" "k8s.io/kubernetes/pkg/kubelet/config" "k8s.io/kubernetes/pkg/kubelet/container" @@ -46,12 +48,12 @@ func (f *FakeVolumeManager) Run(sourcesReady config.SourcesReady, stopCh <-chan } // WaitForAttachAndMount is not implemented -func (f *FakeVolumeManager) WaitForAttachAndMount(pod *v1.Pod) error { +func (f *FakeVolumeManager) WaitForAttachAndMount(ctx context.Context, pod *v1.Pod) error { return nil } // WaitForUnmount is not implemented -func (f *FakeVolumeManager) WaitForUnmount(pod *v1.Pod) error { +func (f *FakeVolumeManager) WaitForUnmount(ctx context.Context, pod *v1.Pod) error { return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubemark/.import-restrictions b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubemark/.import-restrictions index 5bcdf7301be8..6390404bd0e9 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubemark/.import-restrictions +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubemark/.import-restrictions @@ -2,5 +2,4 @@ rules: # override pkg/ import restriction on cmd/ for kubemark - selectorRegexp: k8s[.]io/kubernetes/cmd allowedPrefixes: - - k8s.io/kubernetes/cmd/kube-proxy/app - k8s.io/kubernetes/cmd/kubelet/app diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubemark/hollow_proxy.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubemark/hollow_proxy.go deleted file mode 100644 index e7ba215e57f0..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubemark/hollow_proxy.go +++ /dev/null @@ -1,146 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubemark - -import ( - "fmt" - "time" - - v1 "k8s.io/api/core/v1" - discoveryv1 "k8s.io/api/discovery/v1" - "k8s.io/apimachinery/pkg/types" - clientset "k8s.io/client-go/kubernetes" - v1core "k8s.io/client-go/kubernetes/typed/core/v1" - "k8s.io/client-go/tools/events" - utilsysctl "k8s.io/component-helpers/node/util/sysctl" - proxyapp "k8s.io/kubernetes/cmd/kube-proxy/app" - "k8s.io/kubernetes/pkg/proxy" - proxyconfig "k8s.io/kubernetes/pkg/proxy/config" - "k8s.io/kubernetes/pkg/proxy/iptables" - proxyutiliptables "k8s.io/kubernetes/pkg/proxy/util/iptables" - utiliptables "k8s.io/kubernetes/pkg/util/iptables" - utilnode "k8s.io/kubernetes/pkg/util/node" - utilexec "k8s.io/utils/exec" - netutils "k8s.io/utils/net" - utilpointer "k8s.io/utils/pointer" - - "k8s.io/klog/v2" -) - -type HollowProxy struct { - ProxyServer *proxyapp.ProxyServer -} - -type FakeProxier struct { - proxyconfig.NoopNodeHandler -} - -func (*FakeProxier) Sync() {} -func (*FakeProxier) SyncLoop() { - select {} -} -func (*FakeProxier) OnServiceAdd(service *v1.Service) {} -func (*FakeProxier) OnServiceUpdate(oldService, service *v1.Service) {} -func (*FakeProxier) OnServiceDelete(service *v1.Service) {} -func (*FakeProxier) OnServiceSynced() {} -func (*FakeProxier) OnEndpointSliceAdd(slice *discoveryv1.EndpointSlice) {} -func (*FakeProxier) OnEndpointSliceUpdate(oldSlice, slice *discoveryv1.EndpointSlice) {} -func (*FakeProxier) OnEndpointSliceDelete(slice *discoveryv1.EndpointSlice) {} -func (*FakeProxier) OnEndpointSlicesSynced() {} - -func NewHollowProxyOrDie( - nodeName string, - client clientset.Interface, - eventClient v1core.EventsGetter, - iptInterface utiliptables.Interface, - sysctl utilsysctl.Interface, - execer utilexec.Interface, - broadcaster events.EventBroadcaster, - recorder events.EventRecorder, - useRealProxier bool, - proxierSyncPeriod time.Duration, - proxierMinSyncPeriod time.Duration, -) (*HollowProxy, error) { - // Create proxier and service/endpoint handlers. - var proxier proxy.Provider - var err error - - if useRealProxier { - nodeIP := utilnode.GetNodeIP(client, nodeName) - if nodeIP == nil { - klog.InfoS("Can't determine this node's IP, assuming 127.0.0.1") - nodeIP = netutils.ParseIPSloppy("127.0.0.1") - } - family := v1.IPv4Protocol - if iptInterface.IsIPv6() { - family = v1.IPv6Protocol - } - // Real proxier with fake iptables, sysctl, etc underneath it. - //var err error - proxier, err = iptables.NewProxier( - family, - iptInterface, - sysctl, - execer, - proxierSyncPeriod, - proxierMinSyncPeriod, - false, - false, - 0, - proxyutiliptables.NewNoOpLocalDetector(), - nodeName, - nodeIP, - recorder, - nil, - []string{}, - ) - if err != nil { - return nil, fmt.Errorf("unable to create proxier: %v", err) - } - } else { - proxier = &FakeProxier{} - } - - // Create a Hollow Proxy instance. - nodeRef := &v1.ObjectReference{ - Kind: "Node", - Name: nodeName, - UID: types.UID(nodeName), - Namespace: "", - } - return &HollowProxy{ - ProxyServer: &proxyapp.ProxyServer{ - Client: client, - EventClient: eventClient, - IptInterface: iptInterface, - Proxier: proxier, - Broadcaster: broadcaster, - Recorder: recorder, - ProxyMode: "fake", - NodeRef: nodeRef, - OOMScoreAdj: utilpointer.Int32Ptr(0), - ConfigSyncPeriod: 30 * time.Second, - }, - }, nil -} - -func (hp *HollowProxy) Run() error { - if err := hp.ProxyServer.Run(); err != nil { - return fmt.Errorf("Error while running proxy: %w", err) - } - return nil -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/probe/http/request.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/probe/http/request.go index 4285c0a4ccbe..fb7f818b2492 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/probe/http/request.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/probe/http/request.go @@ -113,7 +113,7 @@ func formatURL(scheme string, host string, port int, path string) *url.URL { func v1HeaderToHTTPHeader(headerList []v1.HTTPHeader) http.Header { headers := make(http.Header) for _, header := range headerList { - headers[header.Name] = append(headers[header.Name], header.Value) + headers.Add(header.Name, header.Value) } return headers } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/OWNERS b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/OWNERS index 675d11afb440..c2ea7233a532 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/OWNERS +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/OWNERS @@ -6,3 +6,4 @@ reviewers: - sig-network-reviewers labels: - sig/network + - area/kube-proxy diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/OWNERS b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/OWNERS deleted file mode 100644 index 08861f7d8521..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/OWNERS +++ /dev/null @@ -1,8 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -approvers: - - thockin -reviewers: - - sig-network-reviewers -labels: - - sig/network diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/doc.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/doc.go deleted file mode 100644 index 64cad5ced197..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package -// +groupName=kubeproxy.config.k8s.io - -package config // import "k8s.io/kubernetes/pkg/proxy/apis/config" diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/register.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/register.go deleted file mode 100644 index fda569221cbd..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/register.go +++ /dev/null @@ -1,43 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package config - -import ( - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// GroupName is the group name used in this package -const GroupName = "kubeproxy.config.k8s.io" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} - -var ( - // SchemeBuilder is the scheme builder with scheme init functions to run for this API package - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - // AddToScheme is a global function that registers this API group & version to a scheme - AddToScheme = SchemeBuilder.AddToScheme -) - -// addKnownTypes registers known types to the given scheme -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &KubeProxyConfiguration{}, - ) - return nil -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/scheme/scheme.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/scheme/scheme.go deleted file mode 100644 index 75e8082b25e1..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/scheme/scheme.go +++ /dev/null @@ -1,43 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package scheme - -import ( - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/serializer" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/kubernetes/pkg/proxy/apis/config" - "k8s.io/kubernetes/pkg/proxy/apis/config/v1alpha1" -) - -var ( - // Scheme defines methods for serializing and deserializing API objects. - Scheme = runtime.NewScheme() - // Codecs provides methods for retrieving codecs and serializers for specific - // versions and content types. - Codecs = serializer.NewCodecFactory(Scheme, serializer.EnableStrict) -) - -func init() { - AddToScheme(Scheme) -} - -// AddToScheme adds the types of this group into the given scheme. -func AddToScheme(scheme *runtime.Scheme) { - utilruntime.Must(v1alpha1.AddToScheme(scheme)) - utilruntime.Must(config.AddToScheme(scheme)) -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/types.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/types.go deleted file mode 100644 index eec55133ee94..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/types.go +++ /dev/null @@ -1,281 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package config - -import ( - "fmt" - "sort" - "strings" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - componentbaseconfig "k8s.io/component-base/config" -) - -// KubeProxyIPTablesConfiguration contains iptables-related configuration -// details for the Kubernetes proxy server. -type KubeProxyIPTablesConfiguration struct { - // masqueradeBit is the bit of the iptables fwmark space to use for SNAT if using - // the pure iptables proxy mode. Values must be within the range [0, 31]. - MasqueradeBit *int32 - // masqueradeAll tells kube-proxy to SNAT everything if using the pure iptables proxy mode. - MasqueradeAll bool - // LocalhostNodePorts tells kube-proxy to allow service NodePorts to be accessed via - // localhost (iptables mode only) - LocalhostNodePorts *bool - // syncPeriod is the period that iptables rules are refreshed (e.g. '5s', '1m', - // '2h22m'). Must be greater than 0. - SyncPeriod metav1.Duration - // minSyncPeriod is the minimum period that iptables rules are refreshed (e.g. '5s', '1m', - // '2h22m'). - MinSyncPeriod metav1.Duration -} - -// KubeProxyIPVSConfiguration contains ipvs-related configuration -// details for the Kubernetes proxy server. -type KubeProxyIPVSConfiguration struct { - // syncPeriod is the period that ipvs rules are refreshed (e.g. '5s', '1m', - // '2h22m'). Must be greater than 0. - SyncPeriod metav1.Duration - // minSyncPeriod is the minimum period that ipvs rules are refreshed (e.g. '5s', '1m', - // '2h22m'). - MinSyncPeriod metav1.Duration - // ipvs scheduler - Scheduler string - // excludeCIDRs is a list of CIDR's which the ipvs proxier should not touch - // when cleaning up ipvs services. - ExcludeCIDRs []string - // strict ARP configure arp_ignore and arp_announce to avoid answering ARP queries - // from kube-ipvs0 interface - StrictARP bool - // tcpTimeout is the timeout value used for idle IPVS TCP sessions. - // The default value is 0, which preserves the current timeout value on the system. - TCPTimeout metav1.Duration - // tcpFinTimeout is the timeout value used for IPVS TCP sessions after receiving a FIN. - // The default value is 0, which preserves the current timeout value on the system. - TCPFinTimeout metav1.Duration - // udpTimeout is the timeout value used for IPVS UDP packets. - // The default value is 0, which preserves the current timeout value on the system. - UDPTimeout metav1.Duration -} - -// KubeProxyConntrackConfiguration contains conntrack settings for -// the Kubernetes proxy server. -type KubeProxyConntrackConfiguration struct { - // maxPerCore is the maximum number of NAT connections to track - // per CPU core (0 to leave the limit as-is and ignore min). - MaxPerCore *int32 - // min is the minimum value of connect-tracking records to allocate, - // regardless of maxPerCore (set maxPerCore=0 to leave the limit as-is). - Min *int32 - // tcpEstablishedTimeout is how long an idle TCP connection will be kept open - // (e.g. '2s'). Must be greater than 0 to set. - TCPEstablishedTimeout *metav1.Duration - // tcpCloseWaitTimeout is how long an idle conntrack entry - // in CLOSE_WAIT state will remain in the conntrack - // table. (e.g. '60s'). Must be greater than 0 to set. - TCPCloseWaitTimeout *metav1.Duration -} - -// KubeProxyWinkernelConfiguration contains Windows/HNS settings for -// the Kubernetes proxy server. -type KubeProxyWinkernelConfiguration struct { - // networkName is the name of the network kube-proxy will use - // to create endpoints and policies - NetworkName string - // sourceVip is the IP address of the source VIP endpoint used for - // NAT when loadbalancing - SourceVip string - // enableDSR tells kube-proxy whether HNS policies should be created - // with DSR - EnableDSR bool - // RootHnsEndpointName is the name of hnsendpoint that is attached to - // l2bridge for root network namespace - RootHnsEndpointName string - // ForwardHealthCheckVip forwards service VIP for health check port on - // Windows - ForwardHealthCheckVip bool -} - -// DetectLocalConfiguration contains optional settings related to DetectLocalMode option -type DetectLocalConfiguration struct { - // BridgeInterface is a string argument which represents a single bridge interface name. - // Kube-proxy considers traffic as local if originating from this given bridge. - // This argument should be set if DetectLocalMode is set to BridgeInterface. - BridgeInterface string - // InterfaceNamePrefix is a string argument which represents a single interface prefix name. - // Kube-proxy considers traffic as local if originating from one or more interfaces which match - // the given prefix. This argument should be set if DetectLocalMode is set to InterfaceNamePrefix. - InterfaceNamePrefix string -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// KubeProxyConfiguration contains everything necessary to configure the -// Kubernetes proxy server. -type KubeProxyConfiguration struct { - metav1.TypeMeta - - // featureGates is a map of feature names to bools that enable or disable alpha/experimental features. - FeatureGates map[string]bool - - // bindAddress is the IP address for the proxy server to serve on (set to 0.0.0.0 - // for all interfaces) - BindAddress string - // healthzBindAddress is the IP address and port for the health check server to serve on, - // defaulting to 0.0.0.0:10256 - HealthzBindAddress string - // metricsBindAddress is the IP address and port for the metrics server to serve on, - // defaulting to 127.0.0.1:10249 (set to 0.0.0.0 for all interfaces) - MetricsBindAddress string - // BindAddressHardFail, if true, kube-proxy will treat failure to bind to a port as fatal and exit - BindAddressHardFail bool - // enableProfiling enables profiling via web interface on /debug/pprof handler. - // Profiling handlers will be handled by metrics server. - EnableProfiling bool - // clusterCIDR is the CIDR range of the pods in the cluster. It is used to - // bridge traffic coming from outside of the cluster. If not provided, - // no off-cluster bridging will be performed. - ClusterCIDR string - // hostnameOverride, if non-empty, will be used as the identity instead of the actual hostname. - HostnameOverride string - // clientConnection specifies the kubeconfig file and client connection settings for the proxy - // server to use when communicating with the apiserver. - ClientConnection componentbaseconfig.ClientConnectionConfiguration - // iptables contains iptables-related configuration options. - IPTables KubeProxyIPTablesConfiguration - // ipvs contains ipvs-related configuration options. - IPVS KubeProxyIPVSConfiguration - // oomScoreAdj is the oom-score-adj value for kube-proxy process. Values must be within - // the range [-1000, 1000] - OOMScoreAdj *int32 - // mode specifies which proxy mode to use. - Mode ProxyMode - // portRange is the range of host ports (beginPort-endPort, inclusive) that may be consumed - // in order to proxy service traffic. If unspecified (0-0) then ports will be randomly chosen. - PortRange string - // conntrack contains conntrack-related configuration options. - Conntrack KubeProxyConntrackConfiguration - // configSyncPeriod is how often configuration from the apiserver is refreshed. Must be greater - // than 0. - ConfigSyncPeriod metav1.Duration - // nodePortAddresses is the --nodeport-addresses value for kube-proxy process. Values must be valid - // IP blocks. These values are as a parameter to select the interfaces where nodeport works. - // In case someone would like to expose a service on localhost for local visit and some other interfaces for - // particular purpose, a list of IP blocks would do that. - // If set it to "127.0.0.0/8", kube-proxy will only select the loopback interface for NodePort. - // If set it to a non-zero IP block, kube-proxy will filter that down to just the IPs that applied to the node. - // An empty string slice is meant to select all network interfaces. - NodePortAddresses []string - // winkernel contains winkernel-related configuration options. - Winkernel KubeProxyWinkernelConfiguration - // ShowHiddenMetricsForVersion is the version for which you want to show hidden metrics. - ShowHiddenMetricsForVersion string - // DetectLocalMode determines mode to use for detecting local traffic, defaults to LocalModeClusterCIDR - DetectLocalMode LocalMode - // DetectLocal contains optional configuration settings related to DetectLocalMode. - DetectLocal DetectLocalConfiguration -} - -// ProxyMode represents modes used by the Kubernetes proxy server. -// -// Currently, two modes of proxy are available on Linux platforms: 'iptables' and 'ipvs'. -// One mode of proxy is available on Windows platforms: 'kernelspace'. -// -// If the proxy mode is unspecified, the best-available proxy mode will be used (currently this -// is `iptables` on Linux and `kernelspace` on Windows). If the selected proxy mode cannot be -// used (due to lack of kernel support, missing userspace components, etc) then kube-proxy -// will exit with an error. -type ProxyMode string - -const ( - ProxyModeIPTables ProxyMode = "iptables" - ProxyModeIPVS ProxyMode = "ipvs" - ProxyModeKernelspace ProxyMode = "kernelspace" -) - -// LocalMode represents modes to detect local traffic from the node -type LocalMode string - -// Currently supported modes for LocalMode -const ( - LocalModeClusterCIDR LocalMode = "ClusterCIDR" - LocalModeNodeCIDR LocalMode = "NodeCIDR" - LocalModeBridgeInterface LocalMode = "BridgeInterface" - LocalModeInterfaceNamePrefix LocalMode = "InterfaceNamePrefix" -) - -func (m *ProxyMode) Set(s string) error { - *m = ProxyMode(s) - return nil -} - -func (m *ProxyMode) String() string { - if m != nil { - return string(*m) - } - return "" -} - -func (m *ProxyMode) Type() string { - return "ProxyMode" -} - -func (m *LocalMode) Set(s string) error { - *m = LocalMode(s) - return nil -} - -func (m *LocalMode) String() string { - if m != nil { - return string(*m) - } - return "" -} - -func (m *LocalMode) Type() string { - return "LocalMode" -} - -type ConfigurationMap map[string]string - -func (m *ConfigurationMap) String() string { - pairs := []string{} - for k, v := range *m { - pairs = append(pairs, fmt.Sprintf("%s=%s", k, v)) - } - sort.Strings(pairs) - return strings.Join(pairs, ",") -} - -func (m *ConfigurationMap) Set(value string) error { - for _, s := range strings.Split(value, ",") { - if len(s) == 0 { - continue - } - arr := strings.SplitN(s, "=", 2) - if len(arr) == 2 { - (*m)[strings.TrimSpace(arr[0])] = strings.TrimSpace(arr[1]) - } else { - (*m)[strings.TrimSpace(arr[0])] = "" - } - } - return nil -} - -func (*ConfigurationMap) Type() string { - return "mapStringString" -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/v1alpha1/defaults.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/v1alpha1/defaults.go deleted file mode 100644 index 127913249ad4..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/v1alpha1/defaults.go +++ /dev/null @@ -1,137 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "fmt" - "time" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - kruntime "k8s.io/apimachinery/pkg/runtime" - kubeproxyconfigv1alpha1 "k8s.io/kube-proxy/config/v1alpha1" - - "k8s.io/kubernetes/pkg/cluster/ports" - "k8s.io/kubernetes/pkg/kubelet/qos" - proxyutil "k8s.io/kubernetes/pkg/proxy/util" - netutils "k8s.io/utils/net" - "k8s.io/utils/pointer" -) - -func addDefaultingFuncs(scheme *kruntime.Scheme) error { - return RegisterDefaults(scheme) -} - -func SetDefaults_KubeProxyConfiguration(obj *kubeproxyconfigv1alpha1.KubeProxyConfiguration) { - - if len(obj.BindAddress) == 0 { - obj.BindAddress = "0.0.0.0" - } - - defaultHealthzAddress, defaultMetricsAddress := getDefaultAddresses(obj.BindAddress) - - if obj.HealthzBindAddress == "" { - obj.HealthzBindAddress = fmt.Sprintf("%s:%v", defaultHealthzAddress, ports.ProxyHealthzPort) - } else { - obj.HealthzBindAddress = proxyutil.AppendPortIfNeeded(obj.HealthzBindAddress, ports.ProxyHealthzPort) - } - if obj.MetricsBindAddress == "" { - obj.MetricsBindAddress = fmt.Sprintf("%s:%v", defaultMetricsAddress, ports.ProxyStatusPort) - } else { - obj.MetricsBindAddress = proxyutil.AppendPortIfNeeded(obj.MetricsBindAddress, ports.ProxyStatusPort) - } - - if obj.OOMScoreAdj == nil { - temp := int32(qos.KubeProxyOOMScoreAdj) - obj.OOMScoreAdj = &temp - } - if obj.IPTables.SyncPeriod.Duration == 0 { - obj.IPTables.SyncPeriod = metav1.Duration{Duration: 30 * time.Second} - } - if obj.IPTables.MinSyncPeriod.Duration == 0 { - obj.IPTables.MinSyncPeriod = metav1.Duration{Duration: 1 * time.Second} - } - if obj.IPTables.LocalhostNodePorts == nil { - obj.IPTables.LocalhostNodePorts = pointer.Bool(true) - } - if obj.IPVS.SyncPeriod.Duration == 0 { - obj.IPVS.SyncPeriod = metav1.Duration{Duration: 30 * time.Second} - } - - if obj.Conntrack.MaxPerCore == nil { - obj.Conntrack.MaxPerCore = pointer.Int32(32 * 1024) - } - if obj.Conntrack.Min == nil { - obj.Conntrack.Min = pointer.Int32(128 * 1024) - } - - if obj.IPTables.MasqueradeBit == nil { - temp := int32(14) - obj.IPTables.MasqueradeBit = &temp - } - if obj.Conntrack.TCPEstablishedTimeout == nil { - obj.Conntrack.TCPEstablishedTimeout = &metav1.Duration{Duration: 24 * time.Hour} // 1 day (1/5 default) - } - if obj.Conntrack.TCPCloseWaitTimeout == nil { - // See https://github.com/kubernetes/kubernetes/issues/32551. - // - // CLOSE_WAIT conntrack state occurs when the Linux kernel - // sees a FIN from the remote server. Note: this is a half-close - // condition that persists as long as the local side keeps the - // socket open. The condition is rare as it is typical in most - // protocols for both sides to issue a close; this typically - // occurs when the local socket is lazily garbage collected. - // - // If the CLOSE_WAIT conntrack entry expires, then FINs from the - // local socket will not be properly SNAT'd and will not reach the - // remote server (if the connection was subject to SNAT). If the - // remote timeouts for FIN_WAIT* states exceed the CLOSE_WAIT - // timeout, then there will be an inconsistency in the state of - // the connection and a new connection reusing the SNAT (src, - // port) pair may be rejected by the remote side with RST. This - // can cause new calls to connect(2) to return with ECONNREFUSED. - // - // We set CLOSE_WAIT to one hour by default to better match - // typical server timeouts. - obj.Conntrack.TCPCloseWaitTimeout = &metav1.Duration{Duration: 1 * time.Hour} - } - if obj.ConfigSyncPeriod.Duration == 0 { - obj.ConfigSyncPeriod.Duration = 15 * time.Minute - } - - if len(obj.ClientConnection.ContentType) == 0 { - obj.ClientConnection.ContentType = "application/vnd.kubernetes.protobuf" - } - if obj.ClientConnection.QPS == 0.0 { - obj.ClientConnection.QPS = 5.0 - } - if obj.ClientConnection.Burst == 0 { - obj.ClientConnection.Burst = 10 - } - if obj.FeatureGates == nil { - obj.FeatureGates = make(map[string]bool) - } -} - -// getDefaultAddresses returns default address of healthz and metrics server -// based on the given bind address. IPv6 addresses are enclosed in square -// brackets for appending port. -func getDefaultAddresses(bindAddress string) (defaultHealthzAddress, defaultMetricsAddress string) { - if netutils.ParseIPSloppy(bindAddress).To4() != nil { - return "0.0.0.0", "127.0.0.1" - } - return "[::]", "[::1]" -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/v1alpha1/doc.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/v1alpha1/doc.go deleted file mode 100644 index a67f856a8d04..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/v1alpha1/doc.go +++ /dev/null @@ -1,24 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package -// +k8s:conversion-gen=k8s.io/kubernetes/pkg/proxy/apis/config -// +k8s:conversion-gen-external-types=k8s.io/kube-proxy/config/v1alpha1 -// +k8s:defaulter-gen=TypeMeta -// +k8s:defaulter-gen-input=k8s.io/kube-proxy/config/v1alpha1 -// +groupName=kubeproxy.config.k8s.io - -package v1alpha1 // import "k8s.io/kubernetes/pkg/proxy/apis/config/v1alpha1" diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/v1alpha1/register.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/v1alpha1/register.go deleted file mode 100644 index a053b1e77839..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/v1alpha1/register.go +++ /dev/null @@ -1,43 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "k8s.io/apimachinery/pkg/runtime/schema" - kubeproxyconfigv1alpha1 "k8s.io/kube-proxy/config/v1alpha1" -) - -// GroupName is the group name used in this package -const GroupName = "kubeproxy.config.k8s.io" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} - -var ( - // localSchemeBuilder extends the SchemeBuilder instance with the external types. In this package, - // defaulting and conversion init funcs are registered as well. - localSchemeBuilder = &kubeproxyconfigv1alpha1.SchemeBuilder - // AddToScheme is a global function that registers this API group & version to a scheme - AddToScheme = localSchemeBuilder.AddToScheme -) - -func init() { - // We only register manually written functions here. The registration of the - // generated functions takes place in the generated files. The separation - // makes the code compile even when the generated files are missing. - localSchemeBuilder.Register(addDefaultingFuncs) -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/v1alpha1/zz_generated.conversion.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/v1alpha1/zz_generated.conversion.go deleted file mode 100644 index ab8c42ba754f..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/v1alpha1/zz_generated.conversion.go +++ /dev/null @@ -1,325 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by conversion-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - unsafe "unsafe" - - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - configv1alpha1 "k8s.io/component-base/config/v1alpha1" - v1alpha1 "k8s.io/kube-proxy/config/v1alpha1" - config "k8s.io/kubernetes/pkg/proxy/apis/config" -) - -func init() { - localSchemeBuilder.Register(RegisterConversions) -} - -// RegisterConversions adds conversion functions to the given scheme. -// Public to allow building arbitrary schemes. -func RegisterConversions(s *runtime.Scheme) error { - if err := s.AddGeneratedConversionFunc((*v1alpha1.DetectLocalConfiguration)(nil), (*config.DetectLocalConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_DetectLocalConfiguration_To_config_DetectLocalConfiguration(a.(*v1alpha1.DetectLocalConfiguration), b.(*config.DetectLocalConfiguration), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.DetectLocalConfiguration)(nil), (*v1alpha1.DetectLocalConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_DetectLocalConfiguration_To_v1alpha1_DetectLocalConfiguration(a.(*config.DetectLocalConfiguration), b.(*v1alpha1.DetectLocalConfiguration), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha1.KubeProxyConfiguration)(nil), (*config.KubeProxyConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_KubeProxyConfiguration_To_config_KubeProxyConfiguration(a.(*v1alpha1.KubeProxyConfiguration), b.(*config.KubeProxyConfiguration), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.KubeProxyConfiguration)(nil), (*v1alpha1.KubeProxyConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_KubeProxyConfiguration_To_v1alpha1_KubeProxyConfiguration(a.(*config.KubeProxyConfiguration), b.(*v1alpha1.KubeProxyConfiguration), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha1.KubeProxyConntrackConfiguration)(nil), (*config.KubeProxyConntrackConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_KubeProxyConntrackConfiguration_To_config_KubeProxyConntrackConfiguration(a.(*v1alpha1.KubeProxyConntrackConfiguration), b.(*config.KubeProxyConntrackConfiguration), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.KubeProxyConntrackConfiguration)(nil), (*v1alpha1.KubeProxyConntrackConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_KubeProxyConntrackConfiguration_To_v1alpha1_KubeProxyConntrackConfiguration(a.(*config.KubeProxyConntrackConfiguration), b.(*v1alpha1.KubeProxyConntrackConfiguration), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha1.KubeProxyIPTablesConfiguration)(nil), (*config.KubeProxyIPTablesConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_KubeProxyIPTablesConfiguration_To_config_KubeProxyIPTablesConfiguration(a.(*v1alpha1.KubeProxyIPTablesConfiguration), b.(*config.KubeProxyIPTablesConfiguration), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.KubeProxyIPTablesConfiguration)(nil), (*v1alpha1.KubeProxyIPTablesConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_KubeProxyIPTablesConfiguration_To_v1alpha1_KubeProxyIPTablesConfiguration(a.(*config.KubeProxyIPTablesConfiguration), b.(*v1alpha1.KubeProxyIPTablesConfiguration), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha1.KubeProxyIPVSConfiguration)(nil), (*config.KubeProxyIPVSConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_KubeProxyIPVSConfiguration_To_config_KubeProxyIPVSConfiguration(a.(*v1alpha1.KubeProxyIPVSConfiguration), b.(*config.KubeProxyIPVSConfiguration), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.KubeProxyIPVSConfiguration)(nil), (*v1alpha1.KubeProxyIPVSConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_KubeProxyIPVSConfiguration_To_v1alpha1_KubeProxyIPVSConfiguration(a.(*config.KubeProxyIPVSConfiguration), b.(*v1alpha1.KubeProxyIPVSConfiguration), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha1.KubeProxyWinkernelConfiguration)(nil), (*config.KubeProxyWinkernelConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_KubeProxyWinkernelConfiguration_To_config_KubeProxyWinkernelConfiguration(a.(*v1alpha1.KubeProxyWinkernelConfiguration), b.(*config.KubeProxyWinkernelConfiguration), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.KubeProxyWinkernelConfiguration)(nil), (*v1alpha1.KubeProxyWinkernelConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_KubeProxyWinkernelConfiguration_To_v1alpha1_KubeProxyWinkernelConfiguration(a.(*config.KubeProxyWinkernelConfiguration), b.(*v1alpha1.KubeProxyWinkernelConfiguration), scope) - }); err != nil { - return err - } - return nil -} - -func autoConvert_v1alpha1_DetectLocalConfiguration_To_config_DetectLocalConfiguration(in *v1alpha1.DetectLocalConfiguration, out *config.DetectLocalConfiguration, s conversion.Scope) error { - out.BridgeInterface = in.BridgeInterface - out.InterfaceNamePrefix = in.InterfaceNamePrefix - return nil -} - -// Convert_v1alpha1_DetectLocalConfiguration_To_config_DetectLocalConfiguration is an autogenerated conversion function. -func Convert_v1alpha1_DetectLocalConfiguration_To_config_DetectLocalConfiguration(in *v1alpha1.DetectLocalConfiguration, out *config.DetectLocalConfiguration, s conversion.Scope) error { - return autoConvert_v1alpha1_DetectLocalConfiguration_To_config_DetectLocalConfiguration(in, out, s) -} - -func autoConvert_config_DetectLocalConfiguration_To_v1alpha1_DetectLocalConfiguration(in *config.DetectLocalConfiguration, out *v1alpha1.DetectLocalConfiguration, s conversion.Scope) error { - out.BridgeInterface = in.BridgeInterface - out.InterfaceNamePrefix = in.InterfaceNamePrefix - return nil -} - -// Convert_config_DetectLocalConfiguration_To_v1alpha1_DetectLocalConfiguration is an autogenerated conversion function. -func Convert_config_DetectLocalConfiguration_To_v1alpha1_DetectLocalConfiguration(in *config.DetectLocalConfiguration, out *v1alpha1.DetectLocalConfiguration, s conversion.Scope) error { - return autoConvert_config_DetectLocalConfiguration_To_v1alpha1_DetectLocalConfiguration(in, out, s) -} - -func autoConvert_v1alpha1_KubeProxyConfiguration_To_config_KubeProxyConfiguration(in *v1alpha1.KubeProxyConfiguration, out *config.KubeProxyConfiguration, s conversion.Scope) error { - out.FeatureGates = *(*map[string]bool)(unsafe.Pointer(&in.FeatureGates)) - out.BindAddress = in.BindAddress - out.HealthzBindAddress = in.HealthzBindAddress - out.MetricsBindAddress = in.MetricsBindAddress - out.BindAddressHardFail = in.BindAddressHardFail - out.EnableProfiling = in.EnableProfiling - out.ClusterCIDR = in.ClusterCIDR - out.HostnameOverride = in.HostnameOverride - if err := configv1alpha1.Convert_v1alpha1_ClientConnectionConfiguration_To_config_ClientConnectionConfiguration(&in.ClientConnection, &out.ClientConnection, s); err != nil { - return err - } - if err := Convert_v1alpha1_KubeProxyIPTablesConfiguration_To_config_KubeProxyIPTablesConfiguration(&in.IPTables, &out.IPTables, s); err != nil { - return err - } - if err := Convert_v1alpha1_KubeProxyIPVSConfiguration_To_config_KubeProxyIPVSConfiguration(&in.IPVS, &out.IPVS, s); err != nil { - return err - } - out.OOMScoreAdj = (*int32)(unsafe.Pointer(in.OOMScoreAdj)) - out.Mode = config.ProxyMode(in.Mode) - out.PortRange = in.PortRange - if err := Convert_v1alpha1_KubeProxyConntrackConfiguration_To_config_KubeProxyConntrackConfiguration(&in.Conntrack, &out.Conntrack, s); err != nil { - return err - } - out.ConfigSyncPeriod = in.ConfigSyncPeriod - out.NodePortAddresses = *(*[]string)(unsafe.Pointer(&in.NodePortAddresses)) - if err := Convert_v1alpha1_KubeProxyWinkernelConfiguration_To_config_KubeProxyWinkernelConfiguration(&in.Winkernel, &out.Winkernel, s); err != nil { - return err - } - out.ShowHiddenMetricsForVersion = in.ShowHiddenMetricsForVersion - out.DetectLocalMode = config.LocalMode(in.DetectLocalMode) - if err := Convert_v1alpha1_DetectLocalConfiguration_To_config_DetectLocalConfiguration(&in.DetectLocal, &out.DetectLocal, s); err != nil { - return err - } - return nil -} - -// Convert_v1alpha1_KubeProxyConfiguration_To_config_KubeProxyConfiguration is an autogenerated conversion function. -func Convert_v1alpha1_KubeProxyConfiguration_To_config_KubeProxyConfiguration(in *v1alpha1.KubeProxyConfiguration, out *config.KubeProxyConfiguration, s conversion.Scope) error { - return autoConvert_v1alpha1_KubeProxyConfiguration_To_config_KubeProxyConfiguration(in, out, s) -} - -func autoConvert_config_KubeProxyConfiguration_To_v1alpha1_KubeProxyConfiguration(in *config.KubeProxyConfiguration, out *v1alpha1.KubeProxyConfiguration, s conversion.Scope) error { - out.FeatureGates = *(*map[string]bool)(unsafe.Pointer(&in.FeatureGates)) - out.BindAddress = in.BindAddress - out.HealthzBindAddress = in.HealthzBindAddress - out.MetricsBindAddress = in.MetricsBindAddress - out.BindAddressHardFail = in.BindAddressHardFail - out.EnableProfiling = in.EnableProfiling - out.ClusterCIDR = in.ClusterCIDR - out.HostnameOverride = in.HostnameOverride - if err := configv1alpha1.Convert_config_ClientConnectionConfiguration_To_v1alpha1_ClientConnectionConfiguration(&in.ClientConnection, &out.ClientConnection, s); err != nil { - return err - } - if err := Convert_config_KubeProxyIPTablesConfiguration_To_v1alpha1_KubeProxyIPTablesConfiguration(&in.IPTables, &out.IPTables, s); err != nil { - return err - } - if err := Convert_config_KubeProxyIPVSConfiguration_To_v1alpha1_KubeProxyIPVSConfiguration(&in.IPVS, &out.IPVS, s); err != nil { - return err - } - out.OOMScoreAdj = (*int32)(unsafe.Pointer(in.OOMScoreAdj)) - out.Mode = v1alpha1.ProxyMode(in.Mode) - out.PortRange = in.PortRange - if err := Convert_config_KubeProxyConntrackConfiguration_To_v1alpha1_KubeProxyConntrackConfiguration(&in.Conntrack, &out.Conntrack, s); err != nil { - return err - } - out.ConfigSyncPeriod = in.ConfigSyncPeriod - out.NodePortAddresses = *(*[]string)(unsafe.Pointer(&in.NodePortAddresses)) - if err := Convert_config_KubeProxyWinkernelConfiguration_To_v1alpha1_KubeProxyWinkernelConfiguration(&in.Winkernel, &out.Winkernel, s); err != nil { - return err - } - out.ShowHiddenMetricsForVersion = in.ShowHiddenMetricsForVersion - out.DetectLocalMode = v1alpha1.LocalMode(in.DetectLocalMode) - if err := Convert_config_DetectLocalConfiguration_To_v1alpha1_DetectLocalConfiguration(&in.DetectLocal, &out.DetectLocal, s); err != nil { - return err - } - return nil -} - -// Convert_config_KubeProxyConfiguration_To_v1alpha1_KubeProxyConfiguration is an autogenerated conversion function. -func Convert_config_KubeProxyConfiguration_To_v1alpha1_KubeProxyConfiguration(in *config.KubeProxyConfiguration, out *v1alpha1.KubeProxyConfiguration, s conversion.Scope) error { - return autoConvert_config_KubeProxyConfiguration_To_v1alpha1_KubeProxyConfiguration(in, out, s) -} - -func autoConvert_v1alpha1_KubeProxyConntrackConfiguration_To_config_KubeProxyConntrackConfiguration(in *v1alpha1.KubeProxyConntrackConfiguration, out *config.KubeProxyConntrackConfiguration, s conversion.Scope) error { - out.MaxPerCore = (*int32)(unsafe.Pointer(in.MaxPerCore)) - out.Min = (*int32)(unsafe.Pointer(in.Min)) - out.TCPEstablishedTimeout = (*v1.Duration)(unsafe.Pointer(in.TCPEstablishedTimeout)) - out.TCPCloseWaitTimeout = (*v1.Duration)(unsafe.Pointer(in.TCPCloseWaitTimeout)) - return nil -} - -// Convert_v1alpha1_KubeProxyConntrackConfiguration_To_config_KubeProxyConntrackConfiguration is an autogenerated conversion function. -func Convert_v1alpha1_KubeProxyConntrackConfiguration_To_config_KubeProxyConntrackConfiguration(in *v1alpha1.KubeProxyConntrackConfiguration, out *config.KubeProxyConntrackConfiguration, s conversion.Scope) error { - return autoConvert_v1alpha1_KubeProxyConntrackConfiguration_To_config_KubeProxyConntrackConfiguration(in, out, s) -} - -func autoConvert_config_KubeProxyConntrackConfiguration_To_v1alpha1_KubeProxyConntrackConfiguration(in *config.KubeProxyConntrackConfiguration, out *v1alpha1.KubeProxyConntrackConfiguration, s conversion.Scope) error { - out.MaxPerCore = (*int32)(unsafe.Pointer(in.MaxPerCore)) - out.Min = (*int32)(unsafe.Pointer(in.Min)) - out.TCPEstablishedTimeout = (*v1.Duration)(unsafe.Pointer(in.TCPEstablishedTimeout)) - out.TCPCloseWaitTimeout = (*v1.Duration)(unsafe.Pointer(in.TCPCloseWaitTimeout)) - return nil -} - -// Convert_config_KubeProxyConntrackConfiguration_To_v1alpha1_KubeProxyConntrackConfiguration is an autogenerated conversion function. -func Convert_config_KubeProxyConntrackConfiguration_To_v1alpha1_KubeProxyConntrackConfiguration(in *config.KubeProxyConntrackConfiguration, out *v1alpha1.KubeProxyConntrackConfiguration, s conversion.Scope) error { - return autoConvert_config_KubeProxyConntrackConfiguration_To_v1alpha1_KubeProxyConntrackConfiguration(in, out, s) -} - -func autoConvert_v1alpha1_KubeProxyIPTablesConfiguration_To_config_KubeProxyIPTablesConfiguration(in *v1alpha1.KubeProxyIPTablesConfiguration, out *config.KubeProxyIPTablesConfiguration, s conversion.Scope) error { - out.MasqueradeBit = (*int32)(unsafe.Pointer(in.MasqueradeBit)) - out.MasqueradeAll = in.MasqueradeAll - out.LocalhostNodePorts = (*bool)(unsafe.Pointer(in.LocalhostNodePorts)) - out.SyncPeriod = in.SyncPeriod - out.MinSyncPeriod = in.MinSyncPeriod - return nil -} - -// Convert_v1alpha1_KubeProxyIPTablesConfiguration_To_config_KubeProxyIPTablesConfiguration is an autogenerated conversion function. -func Convert_v1alpha1_KubeProxyIPTablesConfiguration_To_config_KubeProxyIPTablesConfiguration(in *v1alpha1.KubeProxyIPTablesConfiguration, out *config.KubeProxyIPTablesConfiguration, s conversion.Scope) error { - return autoConvert_v1alpha1_KubeProxyIPTablesConfiguration_To_config_KubeProxyIPTablesConfiguration(in, out, s) -} - -func autoConvert_config_KubeProxyIPTablesConfiguration_To_v1alpha1_KubeProxyIPTablesConfiguration(in *config.KubeProxyIPTablesConfiguration, out *v1alpha1.KubeProxyIPTablesConfiguration, s conversion.Scope) error { - out.MasqueradeBit = (*int32)(unsafe.Pointer(in.MasqueradeBit)) - out.MasqueradeAll = in.MasqueradeAll - out.LocalhostNodePorts = (*bool)(unsafe.Pointer(in.LocalhostNodePorts)) - out.SyncPeriod = in.SyncPeriod - out.MinSyncPeriod = in.MinSyncPeriod - return nil -} - -// Convert_config_KubeProxyIPTablesConfiguration_To_v1alpha1_KubeProxyIPTablesConfiguration is an autogenerated conversion function. -func Convert_config_KubeProxyIPTablesConfiguration_To_v1alpha1_KubeProxyIPTablesConfiguration(in *config.KubeProxyIPTablesConfiguration, out *v1alpha1.KubeProxyIPTablesConfiguration, s conversion.Scope) error { - return autoConvert_config_KubeProxyIPTablesConfiguration_To_v1alpha1_KubeProxyIPTablesConfiguration(in, out, s) -} - -func autoConvert_v1alpha1_KubeProxyIPVSConfiguration_To_config_KubeProxyIPVSConfiguration(in *v1alpha1.KubeProxyIPVSConfiguration, out *config.KubeProxyIPVSConfiguration, s conversion.Scope) error { - out.SyncPeriod = in.SyncPeriod - out.MinSyncPeriod = in.MinSyncPeriod - out.Scheduler = in.Scheduler - out.ExcludeCIDRs = *(*[]string)(unsafe.Pointer(&in.ExcludeCIDRs)) - out.StrictARP = in.StrictARP - out.TCPTimeout = in.TCPTimeout - out.TCPFinTimeout = in.TCPFinTimeout - out.UDPTimeout = in.UDPTimeout - return nil -} - -// Convert_v1alpha1_KubeProxyIPVSConfiguration_To_config_KubeProxyIPVSConfiguration is an autogenerated conversion function. -func Convert_v1alpha1_KubeProxyIPVSConfiguration_To_config_KubeProxyIPVSConfiguration(in *v1alpha1.KubeProxyIPVSConfiguration, out *config.KubeProxyIPVSConfiguration, s conversion.Scope) error { - return autoConvert_v1alpha1_KubeProxyIPVSConfiguration_To_config_KubeProxyIPVSConfiguration(in, out, s) -} - -func autoConvert_config_KubeProxyIPVSConfiguration_To_v1alpha1_KubeProxyIPVSConfiguration(in *config.KubeProxyIPVSConfiguration, out *v1alpha1.KubeProxyIPVSConfiguration, s conversion.Scope) error { - out.SyncPeriod = in.SyncPeriod - out.MinSyncPeriod = in.MinSyncPeriod - out.Scheduler = in.Scheduler - out.ExcludeCIDRs = *(*[]string)(unsafe.Pointer(&in.ExcludeCIDRs)) - out.StrictARP = in.StrictARP - out.TCPTimeout = in.TCPTimeout - out.TCPFinTimeout = in.TCPFinTimeout - out.UDPTimeout = in.UDPTimeout - return nil -} - -// Convert_config_KubeProxyIPVSConfiguration_To_v1alpha1_KubeProxyIPVSConfiguration is an autogenerated conversion function. -func Convert_config_KubeProxyIPVSConfiguration_To_v1alpha1_KubeProxyIPVSConfiguration(in *config.KubeProxyIPVSConfiguration, out *v1alpha1.KubeProxyIPVSConfiguration, s conversion.Scope) error { - return autoConvert_config_KubeProxyIPVSConfiguration_To_v1alpha1_KubeProxyIPVSConfiguration(in, out, s) -} - -func autoConvert_v1alpha1_KubeProxyWinkernelConfiguration_To_config_KubeProxyWinkernelConfiguration(in *v1alpha1.KubeProxyWinkernelConfiguration, out *config.KubeProxyWinkernelConfiguration, s conversion.Scope) error { - out.NetworkName = in.NetworkName - out.SourceVip = in.SourceVip - out.EnableDSR = in.EnableDSR - out.RootHnsEndpointName = in.RootHnsEndpointName - out.ForwardHealthCheckVip = in.ForwardHealthCheckVip - return nil -} - -// Convert_v1alpha1_KubeProxyWinkernelConfiguration_To_config_KubeProxyWinkernelConfiguration is an autogenerated conversion function. -func Convert_v1alpha1_KubeProxyWinkernelConfiguration_To_config_KubeProxyWinkernelConfiguration(in *v1alpha1.KubeProxyWinkernelConfiguration, out *config.KubeProxyWinkernelConfiguration, s conversion.Scope) error { - return autoConvert_v1alpha1_KubeProxyWinkernelConfiguration_To_config_KubeProxyWinkernelConfiguration(in, out, s) -} - -func autoConvert_config_KubeProxyWinkernelConfiguration_To_v1alpha1_KubeProxyWinkernelConfiguration(in *config.KubeProxyWinkernelConfiguration, out *v1alpha1.KubeProxyWinkernelConfiguration, s conversion.Scope) error { - out.NetworkName = in.NetworkName - out.SourceVip = in.SourceVip - out.EnableDSR = in.EnableDSR - out.RootHnsEndpointName = in.RootHnsEndpointName - out.ForwardHealthCheckVip = in.ForwardHealthCheckVip - return nil -} - -// Convert_config_KubeProxyWinkernelConfiguration_To_v1alpha1_KubeProxyWinkernelConfiguration is an autogenerated conversion function. -func Convert_config_KubeProxyWinkernelConfiguration_To_v1alpha1_KubeProxyWinkernelConfiguration(in *config.KubeProxyWinkernelConfiguration, out *v1alpha1.KubeProxyWinkernelConfiguration, s conversion.Scope) error { - return autoConvert_config_KubeProxyWinkernelConfiguration_To_v1alpha1_KubeProxyWinkernelConfiguration(in, out, s) -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/v1alpha1/zz_generated.deepcopy.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/v1alpha1/zz_generated.deepcopy.go deleted file mode 100644 index 61f6555edfc5..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/v1alpha1/zz_generated.deepcopy.go +++ /dev/null @@ -1,22 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1alpha1 diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/v1alpha1/zz_generated.defaults.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/v1alpha1/zz_generated.defaults.go deleted file mode 100644 index a4e7946ec04b..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/v1alpha1/zz_generated.defaults.go +++ /dev/null @@ -1,41 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by defaulter-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" - v1alpha1 "k8s.io/kube-proxy/config/v1alpha1" -) - -// RegisterDefaults adds defaulters functions to the given scheme. -// Public to allow building arbitrary schemes. -// All generated defaulters are covering - they call all nested defaulters. -func RegisterDefaults(scheme *runtime.Scheme) error { - scheme.AddTypeDefaultingFunc(&v1alpha1.KubeProxyConfiguration{}, func(obj interface{}) { - SetObjectDefaults_KubeProxyConfiguration(obj.(*v1alpha1.KubeProxyConfiguration)) - }) - return nil -} - -func SetObjectDefaults_KubeProxyConfiguration(in *v1alpha1.KubeProxyConfiguration) { - SetDefaults_KubeProxyConfiguration(in) -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/validation/validation.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/validation/validation.go deleted file mode 100644 index 607a5928aaaa..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/validation/validation.go +++ /dev/null @@ -1,293 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package validation - -import ( - "fmt" - "net" - "runtime" - "strconv" - "strings" - - utilnet "k8s.io/apimachinery/pkg/util/net" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apimachinery/pkg/util/validation/field" - utilfeature "k8s.io/apiserver/pkg/util/feature" - componentbaseconfig "k8s.io/component-base/config" - "k8s.io/component-base/metrics" - apivalidation "k8s.io/kubernetes/pkg/apis/core/validation" - kubeproxyconfig "k8s.io/kubernetes/pkg/proxy/apis/config" - netutils "k8s.io/utils/net" -) - -// Validate validates the configuration of kube-proxy -func Validate(config *kubeproxyconfig.KubeProxyConfiguration) field.ErrorList { - allErrs := field.ErrorList{} - - newPath := field.NewPath("KubeProxyConfiguration") - - effectiveFeatures := utilfeature.DefaultFeatureGate.DeepCopy() - if err := effectiveFeatures.SetFromMap(config.FeatureGates); err != nil { - allErrs = append(allErrs, field.Invalid(newPath.Child("featureGates"), config.FeatureGates, err.Error())) - } - - allErrs = append(allErrs, validateKubeProxyIPTablesConfiguration(config.IPTables, newPath.Child("KubeProxyIPTablesConfiguration"))...) - if config.Mode == kubeproxyconfig.ProxyModeIPVS { - allErrs = append(allErrs, validateKubeProxyIPVSConfiguration(config.IPVS, newPath.Child("KubeProxyIPVSConfiguration"))...) - } - allErrs = append(allErrs, validateKubeProxyConntrackConfiguration(config.Conntrack, newPath.Child("KubeProxyConntrackConfiguration"))...) - allErrs = append(allErrs, validateProxyMode(config.Mode, newPath.Child("Mode"))...) - allErrs = append(allErrs, validateClientConnectionConfiguration(config.ClientConnection, newPath.Child("ClientConnection"))...) - - if config.OOMScoreAdj != nil && (*config.OOMScoreAdj < -1000 || *config.OOMScoreAdj > 1000) { - allErrs = append(allErrs, field.Invalid(newPath.Child("OOMScoreAdj"), *config.OOMScoreAdj, "must be within the range [-1000, 1000]")) - } - - if config.ConfigSyncPeriod.Duration <= 0 { - allErrs = append(allErrs, field.Invalid(newPath.Child("ConfigSyncPeriod"), config.ConfigSyncPeriod, "must be greater than 0")) - } - - if netutils.ParseIPSloppy(config.BindAddress) == nil { - allErrs = append(allErrs, field.Invalid(newPath.Child("BindAddress"), config.BindAddress, "not a valid textual representation of an IP address")) - } - - if config.HealthzBindAddress != "" { - allErrs = append(allErrs, validateHostPort(config.HealthzBindAddress, newPath.Child("HealthzBindAddress"))...) - } - allErrs = append(allErrs, validateHostPort(config.MetricsBindAddress, newPath.Child("MetricsBindAddress"))...) - - if config.ClusterCIDR != "" { - cidrs := strings.Split(config.ClusterCIDR, ",") - switch { - case len(cidrs) > 2: - allErrs = append(allErrs, field.Invalid(newPath.Child("ClusterCIDR"), config.ClusterCIDR, "only one CIDR allowed or a valid DualStack CIDR (e.g. 10.100.0.0/16,fde4:8dba:82e1::/48)")) - // if DualStack and two cidrs validate if there is at least one of each IP family - case len(cidrs) == 2: - isDual, err := netutils.IsDualStackCIDRStrings(cidrs) - if err != nil || !isDual { - allErrs = append(allErrs, field.Invalid(newPath.Child("ClusterCIDR"), config.ClusterCIDR, "must be a valid DualStack CIDR (e.g. 10.100.0.0/16,fde4:8dba:82e1::/48)")) - } - // if we are here means that len(cidrs) == 1, we need to validate it - default: - if _, _, err := netutils.ParseCIDRSloppy(config.ClusterCIDR); err != nil { - allErrs = append(allErrs, field.Invalid(newPath.Child("ClusterCIDR"), config.ClusterCIDR, "must be a valid CIDR block (e.g. 10.100.0.0/16 or fde4:8dba:82e1::/48)")) - } - } - } - - if _, err := utilnet.ParsePortRange(config.PortRange); err != nil { - allErrs = append(allErrs, field.Invalid(newPath.Child("PortRange"), config.PortRange, "must be a valid port range (e.g. 300-2000)")) - } - - allErrs = append(allErrs, validateKubeProxyNodePortAddress(config.NodePortAddresses, newPath.Child("NodePortAddresses"))...) - allErrs = append(allErrs, validateShowHiddenMetricsVersion(config.ShowHiddenMetricsForVersion, newPath.Child("ShowHiddenMetricsForVersion"))...) - if config.DetectLocalMode == kubeproxyconfig.LocalModeBridgeInterface { - allErrs = append(allErrs, validateInterface(config.DetectLocal.BridgeInterface, newPath.Child("InterfaceName"))...) - } - if config.DetectLocalMode == kubeproxyconfig.LocalModeInterfaceNamePrefix { - allErrs = append(allErrs, validateInterface(config.DetectLocal.InterfaceNamePrefix, newPath.Child("InterfacePrefix"))...) - } - - return allErrs -} - -func validateKubeProxyIPTablesConfiguration(config kubeproxyconfig.KubeProxyIPTablesConfiguration, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - if config.MasqueradeBit != nil && (*config.MasqueradeBit < 0 || *config.MasqueradeBit > 31) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("MasqueradeBit"), config.MasqueradeBit, "must be within the range [0, 31]")) - } - - if config.SyncPeriod.Duration <= 0 { - allErrs = append(allErrs, field.Invalid(fldPath.Child("SyncPeriod"), config.SyncPeriod, "must be greater than 0")) - } - - if config.MinSyncPeriod.Duration < 0 { - allErrs = append(allErrs, field.Invalid(fldPath.Child("MinSyncPeriod"), config.MinSyncPeriod, "must be greater than or equal to 0")) - } - - if config.MinSyncPeriod.Duration > config.SyncPeriod.Duration { - allErrs = append(allErrs, field.Invalid(fldPath.Child("SyncPeriod"), config.MinSyncPeriod, fmt.Sprintf("must be greater than or equal to %s", fldPath.Child("MinSyncPeriod").String()))) - } - - return allErrs -} - -func validateKubeProxyIPVSConfiguration(config kubeproxyconfig.KubeProxyIPVSConfiguration, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - if config.SyncPeriod.Duration <= 0 { - allErrs = append(allErrs, field.Invalid(fldPath.Child("SyncPeriod"), config.SyncPeriod, "must be greater than 0")) - } - - if config.MinSyncPeriod.Duration < 0 { - allErrs = append(allErrs, field.Invalid(fldPath.Child("MinSyncPeriod"), config.MinSyncPeriod, "must be greater than or equal to 0")) - } - - if config.MinSyncPeriod.Duration > config.SyncPeriod.Duration { - allErrs = append(allErrs, field.Invalid(fldPath.Child("SyncPeriod"), config.MinSyncPeriod, fmt.Sprintf("must be greater than or equal to %s", fldPath.Child("MinSyncPeriod").String()))) - } - - allErrs = append(allErrs, validateIPVSTimeout(config, fldPath)...) - allErrs = append(allErrs, validateIPVSExcludeCIDRs(config.ExcludeCIDRs, fldPath.Child("ExcludeCidrs"))...) - - return allErrs -} - -func validateKubeProxyConntrackConfiguration(config kubeproxyconfig.KubeProxyConntrackConfiguration, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - if config.MaxPerCore != nil && *config.MaxPerCore < 0 { - allErrs = append(allErrs, field.Invalid(fldPath.Child("MaxPerCore"), config.MaxPerCore, "must be greater than or equal to 0")) - } - - if config.Min != nil && *config.Min < 0 { - allErrs = append(allErrs, field.Invalid(fldPath.Child("Min"), config.Min, "must be greater than or equal to 0")) - } - - if config.TCPEstablishedTimeout.Duration < 0 { - allErrs = append(allErrs, field.Invalid(fldPath.Child("TCPEstablishedTimeout"), config.TCPEstablishedTimeout, "must be greater than or equal to 0")) - } - - if config.TCPCloseWaitTimeout.Duration < 0 { - allErrs = append(allErrs, field.Invalid(fldPath.Child("TCPCloseWaitTimeout"), config.TCPCloseWaitTimeout, "must be greater than or equal to 0")) - } - - return allErrs -} - -func validateProxyMode(mode kubeproxyconfig.ProxyMode, fldPath *field.Path) field.ErrorList { - if runtime.GOOS == "windows" { - return validateProxyModeWindows(mode, fldPath) - } - - return validateProxyModeLinux(mode, fldPath) -} - -func validateProxyModeLinux(mode kubeproxyconfig.ProxyMode, fldPath *field.Path) field.ErrorList { - validModes := sets.NewString( - string(kubeproxyconfig.ProxyModeIPTables), - string(kubeproxyconfig.ProxyModeIPVS), - ) - - if mode == "" || validModes.Has(string(mode)) { - return nil - } - - errMsg := fmt.Sprintf("must be %s or blank (blank means the best-available proxy [currently iptables])", strings.Join(validModes.List(), ",")) - return field.ErrorList{field.Invalid(fldPath.Child("ProxyMode"), string(mode), errMsg)} -} - -func validateProxyModeWindows(mode kubeproxyconfig.ProxyMode, fldPath *field.Path) field.ErrorList { - validModes := sets.NewString( - string(kubeproxyconfig.ProxyModeKernelspace), - ) - - if mode == "" || validModes.Has(string(mode)) { - return nil - } - - errMsg := fmt.Sprintf("must be %s or blank (blank means the most-available proxy [currently 'kernelspace'])", strings.Join(validModes.List(), ",")) - return field.ErrorList{field.Invalid(fldPath.Child("ProxyMode"), string(mode), errMsg)} -} - -func validateClientConnectionConfiguration(config componentbaseconfig.ClientConnectionConfiguration, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(config.Burst), fldPath.Child("Burst"))...) - return allErrs -} - -func validateHostPort(input string, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - hostIP, port, err := net.SplitHostPort(input) - if err != nil { - allErrs = append(allErrs, field.Invalid(fldPath, input, "must be IP:port")) - return allErrs - } - - if ip := netutils.ParseIPSloppy(hostIP); ip == nil { - allErrs = append(allErrs, field.Invalid(fldPath, hostIP, "must be a valid IP")) - } - - if p, err := strconv.Atoi(port); err != nil { - allErrs = append(allErrs, field.Invalid(fldPath, port, "must be a valid port")) - } else if p < 1 || p > 65535 { - allErrs = append(allErrs, field.Invalid(fldPath, port, "must be a valid port")) - } - - return allErrs -} - -func validateKubeProxyNodePortAddress(nodePortAddresses []string, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - for i := range nodePortAddresses { - if _, _, err := netutils.ParseCIDRSloppy(nodePortAddresses[i]); err != nil { - allErrs = append(allErrs, field.Invalid(fldPath.Index(i), nodePortAddresses[i], "must be a valid CIDR")) - } - } - - return allErrs -} - -func validateIPVSTimeout(config kubeproxyconfig.KubeProxyIPVSConfiguration, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - if config.TCPTimeout.Duration < 0 { - allErrs = append(allErrs, field.Invalid(fldPath.Child("TCPTimeout"), config.TCPTimeout, "must be greater than or equal to 0")) - } - - if config.TCPFinTimeout.Duration < 0 { - allErrs = append(allErrs, field.Invalid(fldPath.Child("TCPFinTimeout"), config.TCPFinTimeout, "must be greater than or equal to 0")) - } - - if config.UDPTimeout.Duration < 0 { - allErrs = append(allErrs, field.Invalid(fldPath.Child("UDPTimeout"), config.UDPTimeout, "must be greater than or equal to 0")) - } - - return allErrs -} - -func validateIPVSExcludeCIDRs(excludeCIDRs []string, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - for i := range excludeCIDRs { - if _, _, err := netutils.ParseCIDRSloppy(excludeCIDRs[i]); err != nil { - allErrs = append(allErrs, field.Invalid(fldPath.Index(i), excludeCIDRs[i], "must be a valid CIDR")) - } - } - return allErrs -} - -func validateShowHiddenMetricsVersion(version string, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - errs := metrics.ValidateShowHiddenMetricsVersion(version) - for _, e := range errs { - allErrs = append(allErrs, field.Invalid(fldPath, version, e.Error())) - } - - return allErrs -} - -func validateInterface(iface string, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if len(iface) == 0 { - allErrs = append(allErrs, field.Invalid(fldPath, iface, "must not be empty")) - } - return allErrs -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/zz_generated.deepcopy.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/zz_generated.deepcopy.go deleted file mode 100644 index fd83125fc2af..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/zz_generated.deepcopy.go +++ /dev/null @@ -1,220 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package config - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in ConfigurationMap) DeepCopyInto(out *ConfigurationMap) { - { - in := &in - *out = make(ConfigurationMap, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - return - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationMap. -func (in ConfigurationMap) DeepCopy() ConfigurationMap { - if in == nil { - return nil - } - out := new(ConfigurationMap) - in.DeepCopyInto(out) - return *out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DetectLocalConfiguration) DeepCopyInto(out *DetectLocalConfiguration) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DetectLocalConfiguration. -func (in *DetectLocalConfiguration) DeepCopy() *DetectLocalConfiguration { - if in == nil { - return nil - } - out := new(DetectLocalConfiguration) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubeProxyConfiguration) DeepCopyInto(out *KubeProxyConfiguration) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.FeatureGates != nil { - in, out := &in.FeatureGates, &out.FeatureGates - *out = make(map[string]bool, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - out.ClientConnection = in.ClientConnection - in.IPTables.DeepCopyInto(&out.IPTables) - in.IPVS.DeepCopyInto(&out.IPVS) - if in.OOMScoreAdj != nil { - in, out := &in.OOMScoreAdj, &out.OOMScoreAdj - *out = new(int32) - **out = **in - } - in.Conntrack.DeepCopyInto(&out.Conntrack) - out.ConfigSyncPeriod = in.ConfigSyncPeriod - if in.NodePortAddresses != nil { - in, out := &in.NodePortAddresses, &out.NodePortAddresses - *out = make([]string, len(*in)) - copy(*out, *in) - } - out.Winkernel = in.Winkernel - out.DetectLocal = in.DetectLocal - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyConfiguration. -func (in *KubeProxyConfiguration) DeepCopy() *KubeProxyConfiguration { - if in == nil { - return nil - } - out := new(KubeProxyConfiguration) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *KubeProxyConfiguration) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubeProxyConntrackConfiguration) DeepCopyInto(out *KubeProxyConntrackConfiguration) { - *out = *in - if in.MaxPerCore != nil { - in, out := &in.MaxPerCore, &out.MaxPerCore - *out = new(int32) - **out = **in - } - if in.Min != nil { - in, out := &in.Min, &out.Min - *out = new(int32) - **out = **in - } - if in.TCPEstablishedTimeout != nil { - in, out := &in.TCPEstablishedTimeout, &out.TCPEstablishedTimeout - *out = new(v1.Duration) - **out = **in - } - if in.TCPCloseWaitTimeout != nil { - in, out := &in.TCPCloseWaitTimeout, &out.TCPCloseWaitTimeout - *out = new(v1.Duration) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyConntrackConfiguration. -func (in *KubeProxyConntrackConfiguration) DeepCopy() *KubeProxyConntrackConfiguration { - if in == nil { - return nil - } - out := new(KubeProxyConntrackConfiguration) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubeProxyIPTablesConfiguration) DeepCopyInto(out *KubeProxyIPTablesConfiguration) { - *out = *in - if in.MasqueradeBit != nil { - in, out := &in.MasqueradeBit, &out.MasqueradeBit - *out = new(int32) - **out = **in - } - if in.LocalhostNodePorts != nil { - in, out := &in.LocalhostNodePorts, &out.LocalhostNodePorts - *out = new(bool) - **out = **in - } - out.SyncPeriod = in.SyncPeriod - out.MinSyncPeriod = in.MinSyncPeriod - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyIPTablesConfiguration. -func (in *KubeProxyIPTablesConfiguration) DeepCopy() *KubeProxyIPTablesConfiguration { - if in == nil { - return nil - } - out := new(KubeProxyIPTablesConfiguration) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubeProxyIPVSConfiguration) DeepCopyInto(out *KubeProxyIPVSConfiguration) { - *out = *in - out.SyncPeriod = in.SyncPeriod - out.MinSyncPeriod = in.MinSyncPeriod - if in.ExcludeCIDRs != nil { - in, out := &in.ExcludeCIDRs, &out.ExcludeCIDRs - *out = make([]string, len(*in)) - copy(*out, *in) - } - out.TCPTimeout = in.TCPTimeout - out.TCPFinTimeout = in.TCPFinTimeout - out.UDPTimeout = in.UDPTimeout - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyIPVSConfiguration. -func (in *KubeProxyIPVSConfiguration) DeepCopy() *KubeProxyIPVSConfiguration { - if in == nil { - return nil - } - out := new(KubeProxyIPVSConfiguration) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubeProxyWinkernelConfiguration) DeepCopyInto(out *KubeProxyWinkernelConfiguration) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyWinkernelConfiguration. -func (in *KubeProxyWinkernelConfiguration) DeepCopy() *KubeProxyWinkernelConfiguration { - if in == nil { - return nil - } - out := new(KubeProxyWinkernelConfiguration) - in.DeepCopyInto(out) - return out -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/well_known_labels.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/well_known_labels.go deleted file mode 100644 index 84c4b9fa1589..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/well_known_labels.go +++ /dev/null @@ -1,23 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package apis - -const ( - // LabelServiceProxyName indicates that an alternative service - // proxy will implement this Service. - LabelServiceProxyName = "service.kubernetes.io/service-proxy-name" -) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/config/OWNERS b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/config/OWNERS index f5f691a16621..46a45ce4e769 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/config/OWNERS +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/config/OWNERS @@ -2,7 +2,6 @@ reviewers: - sig-network-reviewers - - lavalamp - smarterclayton labels: - sig/network diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/conntrack/cleanup.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/conntrack/cleanup.go new file mode 100644 index 000000000000..ab5dc7df8976 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/conntrack/cleanup.go @@ -0,0 +1,111 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package conntrack + +import ( + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/klog/v2" + "k8s.io/kubernetes/pkg/proxy" + proxyutil "k8s.io/kubernetes/pkg/proxy/util" + utilexec "k8s.io/utils/exec" +) + +// CleanStaleEntries takes care of flushing stale conntrack entries for services and endpoints. +func CleanStaleEntries(isIPv6 bool, exec utilexec.Interface, svcPortMap proxy.ServicePortMap, + serviceUpdateResult proxy.UpdateServiceMapResult, endpointUpdateResult proxy.UpdateEndpointMapResult) { + + deleteStaleServiceConntrackEntries(isIPv6, exec, svcPortMap, serviceUpdateResult, endpointUpdateResult) + deleteStaleEndpointConntrackEntries(exec, svcPortMap, endpointUpdateResult) +} + +// deleteStaleServiceConntrackEntries takes care of flushing stale conntrack entries related +// to UDP Service IPs. When a service has no endpoints and we drop traffic to it, conntrack +// may create "black hole" entries for that IP+port. When the service gets endpoints we +// need to delete those entries so further traffic doesn't get dropped. +func deleteStaleServiceConntrackEntries(isIPv6 bool, exec utilexec.Interface, svcPortMap proxy.ServicePortMap, serviceUpdateResult proxy.UpdateServiceMapResult, endpointUpdateResult proxy.UpdateEndpointMapResult) { + conntrackCleanupServiceIPs := serviceUpdateResult.DeletedUDPClusterIPs + conntrackCleanupServiceNodePorts := sets.New[int]() + + // merge newly active services gathered from updateEndpointsMap + // a UDP service that changes from 0 to non-0 endpoints is newly active. + for _, svcPortName := range endpointUpdateResult.NewlyActiveUDPServices { + if svcInfo, ok := svcPortMap[svcPortName]; ok { + klog.V(4).InfoS("Newly-active UDP service may have stale conntrack entries", "servicePortName", svcPortName) + conntrackCleanupServiceIPs.Insert(svcInfo.ClusterIP().String()) + for _, extIP := range svcInfo.ExternalIPStrings() { + conntrackCleanupServiceIPs.Insert(extIP) + } + for _, lbIP := range svcInfo.LoadBalancerIPStrings() { + conntrackCleanupServiceIPs.Insert(lbIP) + } + nodePort := svcInfo.NodePort() + if svcInfo.Protocol() == v1.ProtocolUDP && nodePort != 0 { + conntrackCleanupServiceNodePorts.Insert(nodePort) + } + } + } + + klog.V(4).InfoS("Deleting conntrack stale entries for services", "IPs", conntrackCleanupServiceIPs.UnsortedList()) + for _, svcIP := range conntrackCleanupServiceIPs.UnsortedList() { + if err := ClearEntriesForIP(exec, svcIP, v1.ProtocolUDP); err != nil { + klog.ErrorS(err, "Failed to delete stale service connections", "IP", svcIP) + } + } + klog.V(4).InfoS("Deleting conntrack stale entries for services", "nodePorts", conntrackCleanupServiceNodePorts.UnsortedList()) + for _, nodePort := range conntrackCleanupServiceNodePorts.UnsortedList() { + err := ClearEntriesForPort(exec, nodePort, isIPv6, v1.ProtocolUDP) + if err != nil { + klog.ErrorS(err, "Failed to clear udp conntrack", "nodePort", nodePort) + } + } +} + +// deleteStaleEndpointConntrackEntries takes care of flushing stale conntrack entries related +// to UDP endpoints. After a UDP endpoint is removed we must flush any conntrack entries +// for it so that if the same client keeps sending, the packets will get routed to a new endpoint. +func deleteStaleEndpointConntrackEntries(exec utilexec.Interface, svcPortMap proxy.ServicePortMap, endpointUpdateResult proxy.UpdateEndpointMapResult) { + for _, epSvcPair := range endpointUpdateResult.DeletedUDPEndpoints { + if svcInfo, ok := svcPortMap[epSvcPair.ServicePortName]; ok { + endpointIP := proxyutil.IPPart(epSvcPair.Endpoint) + nodePort := svcInfo.NodePort() + var err error + if nodePort != 0 { + err = ClearEntriesForPortNAT(exec, endpointIP, nodePort, v1.ProtocolUDP) + if err != nil { + klog.ErrorS(err, "Failed to delete nodeport-related endpoint connections", "servicePortName", epSvcPair.ServicePortName) + } + } + err = ClearEntriesForNAT(exec, svcInfo.ClusterIP().String(), endpointIP, v1.ProtocolUDP) + if err != nil { + klog.ErrorS(err, "Failed to delete endpoint connections", "servicePortName", epSvcPair.ServicePortName) + } + for _, extIP := range svcInfo.ExternalIPStrings() { + err := ClearEntriesForNAT(exec, extIP, endpointIP, v1.ProtocolUDP) + if err != nil { + klog.ErrorS(err, "Failed to delete endpoint connections for externalIP", "servicePortName", epSvcPair.ServicePortName, "externalIP", extIP) + } + } + for _, lbIP := range svcInfo.LoadBalancerIPStrings() { + err := ClearEntriesForNAT(exec, lbIP, endpointIP, v1.ProtocolUDP) + if err != nil { + klog.ErrorS(err, "Failed to delete endpoint connections for LoadBalancerIP", "servicePortName", epSvcPair.ServicePortName, "loadBalancerIP", lbIP) + } + } + } + } +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/conntrack/conntrack.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/conntrack/conntrack.go similarity index 97% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/conntrack/conntrack.go rename to cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/conntrack/conntrack.go index 4b39f61ac79e..450ca1dda3a3 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/conntrack/conntrack.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/conntrack/conntrack.go @@ -63,12 +63,12 @@ func Exec(execer exec.Interface, parameters ...string) error { if err != nil { return fmt.Errorf("error looking for path of conntrack: %v", err) } - klog.V(4).Infof("Clearing conntrack entries %v", parameters) + klog.V(4).InfoS("Clearing conntrack entries", "parameters", parameters) output, err := execer.Command(conntrackPath, parameters...).CombinedOutput() if err != nil { return fmt.Errorf("conntrack command returned: %q, error message: %s", string(output), err) } - klog.V(4).Infof("Conntrack entries deleted %s", string(output)) + klog.V(4).InfoS("Conntrack entries deleted", "output", string(output)) return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/endpoints.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/endpoints.go index 3cecb33226cf..fd1e9485cdb6 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/endpoints.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/endpoints.go @@ -30,10 +30,10 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/kubernetes/pkg/proxy/metrics" - utilproxy "k8s.io/kubernetes/pkg/proxy/util" + proxyutil "k8s.io/kubernetes/pkg/proxy/util" ) -var supportedEndpointSliceAddressTypes = sets.NewString( +var supportedEndpointSliceAddressTypes = sets.New[string]( string(discovery.AddressTypeIPv4), string(discovery.AddressTypeIPv6), ) @@ -49,7 +49,7 @@ type BaseEndpointInfo struct { // ZoneHints represent the zone hints for the endpoint. This is based on // endpoint.hints.forZones[*].name in the EndpointSlice API. - ZoneHints sets.String + ZoneHints sets.Set[string] // Ready indicates whether this endpoint is ready and NOT terminating. // For pods, this is true if a pod has a ready status and a nil deletion timestamp. // This is only set when watching EndpointSlices. If using Endpoints, this is always @@ -103,18 +103,18 @@ func (info *BaseEndpointInfo) IsTerminating() bool { } // GetZoneHints returns the zone hint for the endpoint. -func (info *BaseEndpointInfo) GetZoneHints() sets.String { +func (info *BaseEndpointInfo) GetZoneHints() sets.Set[string] { return info.ZoneHints } // IP returns just the IP part of the endpoint, it's a part of proxy.Endpoint interface. func (info *BaseEndpointInfo) IP() string { - return utilproxy.IPPart(info.Endpoint) + return proxyutil.IPPart(info.Endpoint) } // Port returns just the Port part of the endpoint. func (info *BaseEndpointInfo) Port() (int, error) { - return utilproxy.PortPart(info.Endpoint) + return proxyutil.PortPart(info.Endpoint) } // Equal is part of proxy.Endpoint interface. @@ -135,7 +135,7 @@ func (info *BaseEndpointInfo) GetZone() string { } func newBaseEndpointInfo(IP, nodeName, zone string, port int, isLocal bool, - ready, serving, terminating bool, zoneHints sets.String) *BaseEndpointInfo { + ready, serving, terminating bool, zoneHints sets.Set[string]) *BaseEndpointInfo { return &BaseEndpointInfo{ Endpoint: net.JoinHostPort(IP, strconv.Itoa(port)), IsLocal: isLocal, @@ -232,7 +232,7 @@ func (ect *EndpointChangeTracker) EndpointSliceUpdate(endpointSlice *discovery.E // PendingChanges returns a set whose keys are the names of the services whose endpoints // have changed since the last time ect was used to update an EndpointsMap. (You must call // this _before_ calling em.Update(ect).) -func (ect *EndpointChangeTracker) PendingChanges() sets.String { +func (ect *EndpointChangeTracker) PendingChanges() sets.Set[string] { return ect.endpointSliceCache.pendingChanges() } @@ -361,8 +361,8 @@ func (em EndpointsMap) unmerge(other EndpointsMap) { } // getLocalEndpointIPs returns endpoints IPs if given endpoint is local - local means the endpoint is running in same host as kube-proxy. -func (em EndpointsMap) getLocalReadyEndpointIPs() map[types.NamespacedName]sets.String { - localIPs := make(map[types.NamespacedName]sets.String) +func (em EndpointsMap) getLocalReadyEndpointIPs() map[types.NamespacedName]sets.Set[string] { + localIPs := make(map[types.NamespacedName]sets.Set[string]) for svcPortName, epList := range em { for _, ep := range epList { // Only add ready endpoints for health checking. Terminating endpoints may still serve traffic @@ -374,7 +374,7 @@ func (em EndpointsMap) getLocalReadyEndpointIPs() map[types.NamespacedName]sets. if ep.GetIsLocal() { nsn := svcPortName.NamespacedName if localIPs[nsn] == nil { - localIPs[nsn] = sets.NewString() + localIPs[nsn] = sets.New[string]() } localIPs[nsn].Insert(ep.IP()) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/endpointslicecache.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/endpointslicecache.go index 6f00982d4e22..ffbab9913c4b 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/endpointslicecache.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/endpointslicecache.go @@ -31,7 +31,7 @@ import ( "k8s.io/client-go/tools/events" "k8s.io/klog/v2" "k8s.io/kubernetes/pkg/features" - utilproxy "k8s.io/kubernetes/pkg/proxy/util" + proxyutil "k8s.io/kubernetes/pkg/proxy/util" utilnet "k8s.io/utils/net" ) @@ -81,7 +81,7 @@ type endpointInfo struct { Addresses []string NodeName *string Zone *string - ZoneHints sets.String + ZoneHints sets.Set[string] Ready bool Serving bool @@ -141,7 +141,7 @@ func newEndpointSliceInfo(endpointSlice *discovery.EndpointSlice, remove bool) * if utilfeature.DefaultFeatureGate.Enabled(features.TopologyAwareHints) { if endpoint.Hints != nil && len(endpoint.Hints.ForZones) > 0 { - epInfo.ZoneHints = sets.String{} + epInfo.ZoneHints = sets.New[string]() for _, zone := range endpoint.Hints.ForZones { epInfo.ZoneHints.Insert(zone.Name) } @@ -190,11 +190,11 @@ func (cache *EndpointSliceCache) updatePending(endpointSlice *discovery.Endpoint // pendingChanges returns a set whose keys are the names of the services whose endpoints // have changed since the last time checkoutChanges was called -func (cache *EndpointSliceCache) pendingChanges() sets.String { +func (cache *EndpointSliceCache) pendingChanges() sets.Set[string] { cache.lock.Lock() defer cache.lock.Unlock() - changes := sets.NewString() + changes := sets.New[string]() for serviceNN, esTracker := range cache.trackerByServiceMap { if len(esTracker.pending) > 0 { changes.Insert(serviceNN.String()) @@ -290,7 +290,7 @@ func (cache *EndpointSliceCache) addEndpoints(svcPortName *ServicePortName, port if (cache.ipFamily == v1.IPv6Protocol) != utilnet.IsIPv6String(endpoint.Addresses[0]) { // Emit event on the corresponding service which had a different IP // version than the endpoint. - utilproxy.LogAndEmitIncorrectIPVersionEvent(cache.recorder, "endpointslice", endpoint.Addresses[0], svcPortName.NamespacedName.Namespace, svcPortName.NamespacedName.Name, "") + proxyutil.LogAndEmitIncorrectIPVersionEvent(cache.recorder, "endpointslice", endpoint.Addresses[0], svcPortName.NamespacedName.Namespace, svcPortName.NamespacedName.Name, "") continue } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/service_health.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/service_health.go index a6f2afe765d4..edea50de78ae 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/service_health.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/service_health.go @@ -24,16 +24,14 @@ import ( "sync" "github.com/lithammer/dedent" - v1 "k8s.io/api/core/v1" - "k8s.io/klog/v2" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" + utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/client-go/tools/events" + "k8s.io/klog/v2" api "k8s.io/kubernetes/pkg/apis/core" - - utilerrors "k8s.io/apimachinery/pkg/util/errors" - "k8s.io/apimachinery/pkg/util/sets" - utilproxy "k8s.io/kubernetes/pkg/proxy/util" + proxyutil "k8s.io/kubernetes/pkg/proxy/util" ) // ServiceHealthServer serves HTTP endpoints for each service name, with results @@ -58,21 +56,17 @@ type proxierHealthChecker interface { IsHealthy() bool } -func newServiceHealthServer(hostname string, recorder events.EventRecorder, listener listener, factory httpServerFactory, nodePortAddresses *utilproxy.NodePortAddresses, healthzServer proxierHealthChecker) ServiceHealthServer { - - nodeAddresses, err := nodePortAddresses.GetNodeAddresses(utilproxy.RealNetwork{}) - if err != nil || nodeAddresses.Len() == 0 { - klog.ErrorS(err, "Failed to get node ip address matching node port addresses, health check port will listen to all node addresses", "nodePortAddresses", nodePortAddresses) - nodeAddresses = sets.NewString() - nodeAddresses.Insert(utilproxy.IPv4ZeroCIDR) - } - - // if any of the addresses is zero cidr then we listen - // to old style : - for _, addr := range nodeAddresses.List() { - if utilproxy.IsZeroCIDR(addr) { - nodeAddresses = sets.NewString("") - break +func newServiceHealthServer(hostname string, recorder events.EventRecorder, listener listener, factory httpServerFactory, nodePortAddresses *proxyutil.NodePortAddresses, healthzServer proxierHealthChecker) ServiceHealthServer { + // It doesn't matter whether we listen on "0.0.0.0", "::", or ""; go + // treats them all the same. + nodeIPs := []net.IP{net.IPv4zero} + + if !nodePortAddresses.MatchAll() { + ips, err := nodePortAddresses.GetNodeIPs(proxyutil.RealNetwork{}) + if err == nil { + nodeIPs = ips + } else { + klog.ErrorS(err, "Failed to get node ip address matching node port addresses, health check port will listen to all node addresses", "nodePortAddresses", nodePortAddresses) } } @@ -83,22 +77,22 @@ func newServiceHealthServer(hostname string, recorder events.EventRecorder, list httpFactory: factory, healthzServer: healthzServer, services: map[types.NamespacedName]*hcInstance{}, - nodeAddresses: nodeAddresses, + nodeIPs: nodeIPs, } } // NewServiceHealthServer allocates a new service healthcheck server manager -func NewServiceHealthServer(hostname string, recorder events.EventRecorder, nodePortAddresses *utilproxy.NodePortAddresses, healthzServer proxierHealthChecker) ServiceHealthServer { +func NewServiceHealthServer(hostname string, recorder events.EventRecorder, nodePortAddresses *proxyutil.NodePortAddresses, healthzServer proxierHealthChecker) ServiceHealthServer { return newServiceHealthServer(hostname, recorder, stdNetListener{}, stdHTTPServerFactory{}, nodePortAddresses, healthzServer) } type server struct { hostname string // node addresses where health check port will listen on - nodeAddresses sets.String - recorder events.EventRecorder // can be nil - listener listener - httpFactory httpServerFactory + nodeIPs []net.IP + recorder events.EventRecorder // can be nil + listener listener + httpFactory httpServerFactory healthzServer proxierHealthChecker @@ -169,12 +163,11 @@ func (hcI *hcInstance) listenAndServeAll(hcs *server) error { var err error var listener net.Listener - addresses := hcs.nodeAddresses.List() - hcI.httpServers = make([]httpServer, 0, len(addresses)) + hcI.httpServers = make([]httpServer, 0, len(hcs.nodeIPs)) // for each of the node addresses start listening and serving - for _, address := range addresses { - addr := net.JoinHostPort(address, fmt.Sprint(hcI.port)) + for _, ip := range hcs.nodeIPs { + addr := net.JoinHostPort(ip.String(), fmt.Sprint(hcI.port)) // create http server httpSrv := hcs.httpFactory.New(addr, hcHandler{name: hcI.nsn, hcs: hcs}) // start listener @@ -235,8 +228,8 @@ func (h hcHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) { return } count := svc.endpoints - kubeProxyHealthy := h.hcs.healthzServer.IsHealthy() h.hcs.lock.RUnlock() + kubeProxyHealthy := h.hcs.healthzServer.IsHealthy() resp.Header().Set("Content-Type", "application/json") resp.Header().Set("X-Content-Type-Options", "nosniff") diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/iptables/OWNERS b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/iptables/OWNERS deleted file mode 100644 index 5368499adb22..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/iptables/OWNERS +++ /dev/null @@ -1,8 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -reviewers: - - sig-network-reviewers - - smarterclayton - - justinsb -labels: - - sig/network diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/iptables/proxier.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/iptables/proxier.go deleted file mode 100644 index 937441ecaac4..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/iptables/proxier.go +++ /dev/null @@ -1,1687 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package iptables - -// -// NOTE: this needs to be tested in e2e since it uses iptables for everything. -// - -import ( - "bytes" - "crypto/sha256" - "encoding/base32" - "fmt" - "net" - "reflect" - "strconv" - "strings" - "sync" - "sync/atomic" - "time" - - v1 "k8s.io/api/core/v1" - discovery "k8s.io/api/discovery/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apimachinery/pkg/util/wait" - utilfeature "k8s.io/apiserver/pkg/util/feature" - "k8s.io/client-go/tools/events" - utilsysctl "k8s.io/component-helpers/node/util/sysctl" - "k8s.io/klog/v2" - "k8s.io/kubernetes/pkg/features" - "k8s.io/kubernetes/pkg/proxy" - "k8s.io/kubernetes/pkg/proxy/healthcheck" - "k8s.io/kubernetes/pkg/proxy/metaproxier" - "k8s.io/kubernetes/pkg/proxy/metrics" - utilproxy "k8s.io/kubernetes/pkg/proxy/util" - proxyutiliptables "k8s.io/kubernetes/pkg/proxy/util/iptables" - "k8s.io/kubernetes/pkg/util/async" - "k8s.io/kubernetes/pkg/util/conntrack" - utiliptables "k8s.io/kubernetes/pkg/util/iptables" - utilexec "k8s.io/utils/exec" - netutils "k8s.io/utils/net" -) - -const ( - // the services chain - kubeServicesChain utiliptables.Chain = "KUBE-SERVICES" - - // the external services chain - kubeExternalServicesChain utiliptables.Chain = "KUBE-EXTERNAL-SERVICES" - - // the nodeports chain - kubeNodePortsChain utiliptables.Chain = "KUBE-NODEPORTS" - - // the kubernetes postrouting chain - kubePostroutingChain utiliptables.Chain = "KUBE-POSTROUTING" - - // kubeMarkMasqChain is the mark-for-masquerade chain - kubeMarkMasqChain utiliptables.Chain = "KUBE-MARK-MASQ" - - // the kubernetes forward chain - kubeForwardChain utiliptables.Chain = "KUBE-FORWARD" - - // kubeProxyFirewallChain is the kube-proxy firewall chain - kubeProxyFirewallChain utiliptables.Chain = "KUBE-PROXY-FIREWALL" - - // kube proxy canary chain is used for monitoring rule reload - kubeProxyCanaryChain utiliptables.Chain = "KUBE-PROXY-CANARY" - - // kubeletFirewallChain is a duplicate of kubelet's firewall containing - // the anti-martian-packet rule. It should not be used for any other - // rules. - kubeletFirewallChain utiliptables.Chain = "KUBE-FIREWALL" - - // largeClusterEndpointsThreshold is the number of endpoints at which - // we switch into "large cluster mode" and optimize for iptables - // performance over iptables debuggability - largeClusterEndpointsThreshold = 1000 -) - -const sysctlRouteLocalnet = "net/ipv4/conf/all/route_localnet" -const sysctlBridgeCallIPTables = "net/bridge/bridge-nf-call-iptables" - -// internal struct for string service information -type servicePortInfo struct { - *proxy.BaseServicePortInfo - // The following fields are computed and stored for performance reasons. - nameString string - clusterPolicyChainName utiliptables.Chain - localPolicyChainName utiliptables.Chain - firewallChainName utiliptables.Chain - externalChainName utiliptables.Chain -} - -// returns a new proxy.ServicePort which abstracts a serviceInfo -func newServiceInfo(port *v1.ServicePort, service *v1.Service, bsvcPortInfo *proxy.BaseServicePortInfo) proxy.ServicePort { - svcPort := &servicePortInfo{BaseServicePortInfo: bsvcPortInfo} - - // Store the following for performance reasons. - svcName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name} - svcPortName := proxy.ServicePortName{NamespacedName: svcName, Port: port.Name} - protocol := strings.ToLower(string(svcPort.Protocol())) - svcPort.nameString = svcPortName.String() - svcPort.clusterPolicyChainName = servicePortPolicyClusterChain(svcPort.nameString, protocol) - svcPort.localPolicyChainName = servicePortPolicyLocalChainName(svcPort.nameString, protocol) - svcPort.firewallChainName = serviceFirewallChainName(svcPort.nameString, protocol) - svcPort.externalChainName = serviceExternalChainName(svcPort.nameString, protocol) - - return svcPort -} - -// internal struct for endpoints information -type endpointsInfo struct { - *proxy.BaseEndpointInfo - - ChainName utiliptables.Chain -} - -// returns a new proxy.Endpoint which abstracts a endpointsInfo -func newEndpointInfo(baseInfo *proxy.BaseEndpointInfo, svcPortName *proxy.ServicePortName) proxy.Endpoint { - return &endpointsInfo{ - BaseEndpointInfo: baseInfo, - ChainName: servicePortEndpointChainName(svcPortName.String(), strings.ToLower(string(svcPortName.Protocol)), baseInfo.Endpoint), - } -} - -// Equal overrides the Equal() function implemented by proxy.BaseEndpointInfo. -func (e *endpointsInfo) Equal(other proxy.Endpoint) bool { - o, ok := other.(*endpointsInfo) - if !ok { - klog.ErrorS(nil, "Failed to cast endpointsInfo") - return false - } - return e.Endpoint == o.Endpoint && - e.IsLocal == o.IsLocal && - e.ChainName == o.ChainName && - e.Ready == o.Ready -} - -// Proxier is an iptables based proxy for connections between a localhost:lport -// and services that provide the actual backends. -type Proxier struct { - // endpointsChanges and serviceChanges contains all changes to endpoints and - // services that happened since iptables was synced. For a single object, - // changes are accumulated, i.e. previous is state from before all of them, - // current is state after applying all of those. - endpointsChanges *proxy.EndpointChangeTracker - serviceChanges *proxy.ServiceChangeTracker - - mu sync.Mutex // protects the following fields - svcPortMap proxy.ServicePortMap - endpointsMap proxy.EndpointsMap - nodeLabels map[string]string - // endpointSlicesSynced, and servicesSynced are set to true - // when corresponding objects are synced after startup. This is used to avoid - // updating iptables with some partial data after kube-proxy restart. - endpointSlicesSynced bool - servicesSynced bool - needFullSync bool - initialized int32 - syncRunner *async.BoundedFrequencyRunner // governs calls to syncProxyRules - syncPeriod time.Duration - lastIPTablesCleanup time.Time - - // These are effectively const and do not need the mutex to be held. - iptables utiliptables.Interface - masqueradeAll bool - masqueradeMark string - exec utilexec.Interface - localDetector proxyutiliptables.LocalTrafficDetector - hostname string - nodeIP net.IP - recorder events.EventRecorder - - serviceHealthServer healthcheck.ServiceHealthServer - healthzServer healthcheck.ProxierHealthUpdater - - // Since converting probabilities (floats) to strings is expensive - // and we are using only probabilities in the format of 1/n, we are - // precomputing some number of those and cache for future reuse. - precomputedProbabilities []string - - // The following buffers are used to reuse memory and avoid allocations - // that are significantly impacting performance. - iptablesData *bytes.Buffer - existingFilterChainsData *bytes.Buffer - filterChains utilproxy.LineBuffer - filterRules utilproxy.LineBuffer - natChains utilproxy.LineBuffer - natRules utilproxy.LineBuffer - - // largeClusterMode is set at the beginning of syncProxyRules if we are - // going to end up outputting "lots" of iptables rules and so we need to - // optimize for performance over debuggability. - largeClusterMode bool - - // localhostNodePorts indicates whether we allow NodePort services to be accessed - // via localhost. - localhostNodePorts bool - // nodePortAddresses selects the interfaces where nodePort works. - nodePortAddresses *utilproxy.NodePortAddresses - // networkInterfacer defines an interface for several net library functions. - // Inject for test purpose. - networkInterfacer utilproxy.NetworkInterfacer -} - -// Proxier implements proxy.Provider -var _ proxy.Provider = &Proxier{} - -// NewProxier returns a new Proxier given an iptables Interface instance. -// Because of the iptables logic, it is assumed that there is only a single Proxier active on a machine. -// An error will be returned if iptables fails to update or acquire the initial lock. -// Once a proxier is created, it will keep iptables up to date in the background and -// will not terminate if a particular iptables call fails. -func NewProxier(ipFamily v1.IPFamily, - ipt utiliptables.Interface, - sysctl utilsysctl.Interface, - exec utilexec.Interface, - syncPeriod time.Duration, - minSyncPeriod time.Duration, - masqueradeAll bool, - localhostNodePorts bool, - masqueradeBit int, - localDetector proxyutiliptables.LocalTrafficDetector, - hostname string, - nodeIP net.IP, - recorder events.EventRecorder, - healthzServer healthcheck.ProxierHealthUpdater, - nodePortAddressStrings []string, -) (*Proxier, error) { - nodePortAddresses := utilproxy.NewNodePortAddresses(nodePortAddressStrings) - - if !nodePortAddresses.ContainsIPv4Loopback() { - localhostNodePorts = false - } - if localhostNodePorts { - // Set the route_localnet sysctl we need for exposing NodePorts on loopback addresses - // Refer to https://issues.k8s.io/90259 - klog.InfoS("Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses") - if err := utilproxy.EnsureSysctl(sysctl, sysctlRouteLocalnet, 1); err != nil { - return nil, err - } - } - - // Proxy needs br_netfilter and bridge-nf-call-iptables=1 when containers - // are connected to a Linux bridge (but not SDN bridges). Until most - // plugins handle this, log when config is missing - if val, err := sysctl.GetSysctl(sysctlBridgeCallIPTables); err == nil && val != 1 { - klog.InfoS("Missing br-netfilter module or unset sysctl br-nf-call-iptables, proxy may not work as intended") - } - - // Generate the masquerade mark to use for SNAT rules. - masqueradeValue := 1 << uint(masqueradeBit) - masqueradeMark := fmt.Sprintf("%#08x", masqueradeValue) - klog.V(2).InfoS("Using iptables mark for masquerade", "ipFamily", ipt.Protocol(), "mark", masqueradeMark) - - serviceHealthServer := healthcheck.NewServiceHealthServer(hostname, recorder, nodePortAddresses, healthzServer) - - proxier := &Proxier{ - svcPortMap: make(proxy.ServicePortMap), - serviceChanges: proxy.NewServiceChangeTracker(newServiceInfo, ipFamily, recorder, nil), - endpointsMap: make(proxy.EndpointsMap), - endpointsChanges: proxy.NewEndpointChangeTracker(hostname, newEndpointInfo, ipFamily, recorder, nil), - needFullSync: true, - syncPeriod: syncPeriod, - iptables: ipt, - masqueradeAll: masqueradeAll, - masqueradeMark: masqueradeMark, - exec: exec, - localDetector: localDetector, - hostname: hostname, - nodeIP: nodeIP, - recorder: recorder, - serviceHealthServer: serviceHealthServer, - healthzServer: healthzServer, - precomputedProbabilities: make([]string, 0, 1001), - iptablesData: bytes.NewBuffer(nil), - existingFilterChainsData: bytes.NewBuffer(nil), - filterChains: utilproxy.LineBuffer{}, - filterRules: utilproxy.LineBuffer{}, - natChains: utilproxy.LineBuffer{}, - natRules: utilproxy.LineBuffer{}, - localhostNodePorts: localhostNodePorts, - nodePortAddresses: nodePortAddresses, - networkInterfacer: utilproxy.RealNetwork{}, - } - - burstSyncs := 2 - klog.V(2).InfoS("Iptables sync params", "ipFamily", ipt.Protocol(), "minSyncPeriod", minSyncPeriod, "syncPeriod", syncPeriod, "burstSyncs", burstSyncs) - // We pass syncPeriod to ipt.Monitor, which will call us only if it needs to. - // We need to pass *some* maxInterval to NewBoundedFrequencyRunner anyway though. - // time.Hour is arbitrary. - proxier.syncRunner = async.NewBoundedFrequencyRunner("sync-runner", proxier.syncProxyRules, minSyncPeriod, time.Hour, burstSyncs) - - go ipt.Monitor(kubeProxyCanaryChain, []utiliptables.Table{utiliptables.TableMangle, utiliptables.TableNAT, utiliptables.TableFilter}, - proxier.forceSyncProxyRules, syncPeriod, wait.NeverStop) - - if ipt.HasRandomFully() { - klog.V(2).InfoS("Iptables supports --random-fully", "ipFamily", ipt.Protocol()) - } else { - klog.V(2).InfoS("Iptables does not support --random-fully", "ipFamily", ipt.Protocol()) - } - - return proxier, nil -} - -// NewDualStackProxier creates a MetaProxier instance, with IPv4 and IPv6 proxies. -func NewDualStackProxier( - ipt [2]utiliptables.Interface, - sysctl utilsysctl.Interface, - exec utilexec.Interface, - syncPeriod time.Duration, - minSyncPeriod time.Duration, - masqueradeAll bool, - localhostNodePorts bool, - masqueradeBit int, - localDetectors [2]proxyutiliptables.LocalTrafficDetector, - hostname string, - nodeIP [2]net.IP, - recorder events.EventRecorder, - healthzServer healthcheck.ProxierHealthUpdater, - nodePortAddresses []string, -) (proxy.Provider, error) { - // Create an ipv4 instance of the single-stack proxier - ipFamilyMap := utilproxy.MapCIDRsByIPFamily(nodePortAddresses) - ipv4Proxier, err := NewProxier(v1.IPv4Protocol, ipt[0], sysctl, - exec, syncPeriod, minSyncPeriod, masqueradeAll, localhostNodePorts, masqueradeBit, localDetectors[0], hostname, - nodeIP[0], recorder, healthzServer, ipFamilyMap[v1.IPv4Protocol]) - if err != nil { - return nil, fmt.Errorf("unable to create ipv4 proxier: %v", err) - } - - ipv6Proxier, err := NewProxier(v1.IPv6Protocol, ipt[1], sysctl, - exec, syncPeriod, minSyncPeriod, masqueradeAll, false, masqueradeBit, localDetectors[1], hostname, - nodeIP[1], recorder, healthzServer, ipFamilyMap[v1.IPv6Protocol]) - if err != nil { - return nil, fmt.Errorf("unable to create ipv6 proxier: %v", err) - } - return metaproxier.NewMetaProxier(ipv4Proxier, ipv6Proxier), nil -} - -type iptablesJumpChain struct { - table utiliptables.Table - dstChain utiliptables.Chain - srcChain utiliptables.Chain - comment string - extraArgs []string -} - -var iptablesJumpChains = []iptablesJumpChain{ - {utiliptables.TableFilter, kubeExternalServicesChain, utiliptables.ChainInput, "kubernetes externally-visible service portals", []string{"-m", "conntrack", "--ctstate", "NEW"}}, - {utiliptables.TableFilter, kubeExternalServicesChain, utiliptables.ChainForward, "kubernetes externally-visible service portals", []string{"-m", "conntrack", "--ctstate", "NEW"}}, - {utiliptables.TableFilter, kubeNodePortsChain, utiliptables.ChainInput, "kubernetes health check service ports", nil}, - {utiliptables.TableFilter, kubeServicesChain, utiliptables.ChainForward, "kubernetes service portals", []string{"-m", "conntrack", "--ctstate", "NEW"}}, - {utiliptables.TableFilter, kubeServicesChain, utiliptables.ChainOutput, "kubernetes service portals", []string{"-m", "conntrack", "--ctstate", "NEW"}}, - {utiliptables.TableFilter, kubeForwardChain, utiliptables.ChainForward, "kubernetes forwarding rules", nil}, - {utiliptables.TableFilter, kubeProxyFirewallChain, utiliptables.ChainInput, "kubernetes load balancer firewall", []string{"-m", "conntrack", "--ctstate", "NEW"}}, - {utiliptables.TableFilter, kubeProxyFirewallChain, utiliptables.ChainOutput, "kubernetes load balancer firewall", []string{"-m", "conntrack", "--ctstate", "NEW"}}, - {utiliptables.TableFilter, kubeProxyFirewallChain, utiliptables.ChainForward, "kubernetes load balancer firewall", []string{"-m", "conntrack", "--ctstate", "NEW"}}, - {utiliptables.TableNAT, kubeServicesChain, utiliptables.ChainOutput, "kubernetes service portals", nil}, - {utiliptables.TableNAT, kubeServicesChain, utiliptables.ChainPrerouting, "kubernetes service portals", nil}, -} - -// Duplicates of chains created in pkg/kubelet/kubelet_network_linux.go; we create these -// on startup but do not delete them in CleanupLeftovers. -var iptablesKubeletJumpChains = []iptablesJumpChain{ - {utiliptables.TableFilter, kubeletFirewallChain, utiliptables.ChainInput, "", nil}, - {utiliptables.TableFilter, kubeletFirewallChain, utiliptables.ChainOutput, "", nil}, - - // Move this to iptablesJumpChains once IPTablesOwnershipCleanup is GA and kubelet - // no longer creates this chain, - {utiliptables.TableNAT, kubePostroutingChain, utiliptables.ChainPostrouting, "kubernetes postrouting rules", nil}, -} - -var iptablesCleanupOnlyChains = []iptablesJumpChain{ - // Present in kube 1.13 - 1.19. Removed by #95252 in favor of adding reject rules for incoming/forwarding packets to kubeExternalServicesChain - {utiliptables.TableFilter, kubeServicesChain, utiliptables.ChainInput, "kubernetes service portals", []string{"-m", "conntrack", "--ctstate", "NEW"}}, -} - -// CleanupLeftovers removes all iptables rules and chains created by the Proxier -// It returns true if an error was encountered. Errors are logged. -func CleanupLeftovers(ipt utiliptables.Interface) (encounteredError bool) { - // Unlink our chains - for _, jump := range append(iptablesJumpChains, iptablesCleanupOnlyChains...) { - args := append(jump.extraArgs, - "-m", "comment", "--comment", jump.comment, - "-j", string(jump.dstChain), - ) - if err := ipt.DeleteRule(jump.table, jump.srcChain, args...); err != nil { - if !utiliptables.IsNotFoundError(err) { - klog.ErrorS(err, "Error removing pure-iptables proxy rule") - encounteredError = true - } - } - } - - // Flush and remove all of our "-t nat" chains. - iptablesData := bytes.NewBuffer(nil) - if err := ipt.SaveInto(utiliptables.TableNAT, iptablesData); err != nil { - klog.ErrorS(err, "Failed to execute iptables-save", "table", utiliptables.TableNAT) - encounteredError = true - } else { - existingNATChains := utiliptables.GetChainsFromTable(iptablesData.Bytes()) - natChains := &utilproxy.LineBuffer{} - natRules := &utilproxy.LineBuffer{} - natChains.Write("*nat") - // Start with chains we know we need to remove. - for _, chain := range []utiliptables.Chain{kubeServicesChain, kubeNodePortsChain, kubePostroutingChain} { - if _, found := existingNATChains[chain]; found { - chainString := string(chain) - natChains.Write(utiliptables.MakeChainLine(chain)) // flush - natRules.Write("-X", chainString) // delete - } - } - // Hunt for service and endpoint chains. - for chain := range existingNATChains { - chainString := string(chain) - if isServiceChainName(chainString) { - natChains.Write(utiliptables.MakeChainLine(chain)) // flush - natRules.Write("-X", chainString) // delete - } - } - natRules.Write("COMMIT") - natLines := append(natChains.Bytes(), natRules.Bytes()...) - // Write it. - err = ipt.Restore(utiliptables.TableNAT, natLines, utiliptables.NoFlushTables, utiliptables.RestoreCounters) - if err != nil { - klog.ErrorS(err, "Failed to execute iptables-restore", "table", utiliptables.TableNAT) - metrics.IptablesRestoreFailuresTotal.Inc() - encounteredError = true - } - } - - // Flush and remove all of our "-t filter" chains. - iptablesData.Reset() - if err := ipt.SaveInto(utiliptables.TableFilter, iptablesData); err != nil { - klog.ErrorS(err, "Failed to execute iptables-save", "table", utiliptables.TableFilter) - encounteredError = true - } else { - existingFilterChains := utiliptables.GetChainsFromTable(iptablesData.Bytes()) - filterChains := &utilproxy.LineBuffer{} - filterRules := &utilproxy.LineBuffer{} - filterChains.Write("*filter") - for _, chain := range []utiliptables.Chain{kubeServicesChain, kubeExternalServicesChain, kubeForwardChain, kubeNodePortsChain} { - if _, found := existingFilterChains[chain]; found { - chainString := string(chain) - filterChains.Write(utiliptables.MakeChainLine(chain)) - filterRules.Write("-X", chainString) - } - } - filterRules.Write("COMMIT") - filterLines := append(filterChains.Bytes(), filterRules.Bytes()...) - // Write it. - if err := ipt.Restore(utiliptables.TableFilter, filterLines, utiliptables.NoFlushTables, utiliptables.RestoreCounters); err != nil { - klog.ErrorS(err, "Failed to execute iptables-restore", "table", utiliptables.TableFilter) - metrics.IptablesRestoreFailuresTotal.Inc() - encounteredError = true - } - } - return encounteredError -} - -func computeProbability(n int) string { - return fmt.Sprintf("%0.10f", 1.0/float64(n)) -} - -// This assumes proxier.mu is held -func (proxier *Proxier) precomputeProbabilities(numberOfPrecomputed int) { - if len(proxier.precomputedProbabilities) == 0 { - proxier.precomputedProbabilities = append(proxier.precomputedProbabilities, "") - } - for i := len(proxier.precomputedProbabilities); i <= numberOfPrecomputed; i++ { - proxier.precomputedProbabilities = append(proxier.precomputedProbabilities, computeProbability(i)) - } -} - -// This assumes proxier.mu is held -func (proxier *Proxier) probability(n int) string { - if n >= len(proxier.precomputedProbabilities) { - proxier.precomputeProbabilities(n) - } - return proxier.precomputedProbabilities[n] -} - -// Sync is called to synchronize the proxier state to iptables as soon as possible. -func (proxier *Proxier) Sync() { - if proxier.healthzServer != nil { - proxier.healthzServer.QueuedUpdate() - } - metrics.SyncProxyRulesLastQueuedTimestamp.SetToCurrentTime() - proxier.syncRunner.Run() -} - -// SyncLoop runs periodic work. This is expected to run as a goroutine or as the main loop of the app. It does not return. -func (proxier *Proxier) SyncLoop() { - // Update healthz timestamp at beginning in case Sync() never succeeds. - if proxier.healthzServer != nil { - proxier.healthzServer.Updated() - } - - // synthesize "last change queued" time as the informers are syncing. - metrics.SyncProxyRulesLastQueuedTimestamp.SetToCurrentTime() - proxier.syncRunner.Loop(wait.NeverStop) -} - -func (proxier *Proxier) setInitialized(value bool) { - var initialized int32 - if value { - initialized = 1 - } - atomic.StoreInt32(&proxier.initialized, initialized) -} - -func (proxier *Proxier) isInitialized() bool { - return atomic.LoadInt32(&proxier.initialized) > 0 -} - -// OnServiceAdd is called whenever creation of new service object -// is observed. -func (proxier *Proxier) OnServiceAdd(service *v1.Service) { - proxier.OnServiceUpdate(nil, service) -} - -// OnServiceUpdate is called whenever modification of an existing -// service object is observed. -func (proxier *Proxier) OnServiceUpdate(oldService, service *v1.Service) { - if proxier.serviceChanges.Update(oldService, service) && proxier.isInitialized() { - proxier.Sync() - } -} - -// OnServiceDelete is called whenever deletion of an existing service -// object is observed. -func (proxier *Proxier) OnServiceDelete(service *v1.Service) { - proxier.OnServiceUpdate(service, nil) - -} - -// OnServiceSynced is called once all the initial event handlers were -// called and the state is fully propagated to local cache. -func (proxier *Proxier) OnServiceSynced() { - proxier.mu.Lock() - proxier.servicesSynced = true - proxier.setInitialized(proxier.endpointSlicesSynced) - proxier.mu.Unlock() - - // Sync unconditionally - this is called once per lifetime. - proxier.syncProxyRules() -} - -// OnEndpointSliceAdd is called whenever creation of a new endpoint slice object -// is observed. -func (proxier *Proxier) OnEndpointSliceAdd(endpointSlice *discovery.EndpointSlice) { - if proxier.endpointsChanges.EndpointSliceUpdate(endpointSlice, false) && proxier.isInitialized() { - proxier.Sync() - } -} - -// OnEndpointSliceUpdate is called whenever modification of an existing endpoint -// slice object is observed. -func (proxier *Proxier) OnEndpointSliceUpdate(_, endpointSlice *discovery.EndpointSlice) { - if proxier.endpointsChanges.EndpointSliceUpdate(endpointSlice, false) && proxier.isInitialized() { - proxier.Sync() - } -} - -// OnEndpointSliceDelete is called whenever deletion of an existing endpoint slice -// object is observed. -func (proxier *Proxier) OnEndpointSliceDelete(endpointSlice *discovery.EndpointSlice) { - if proxier.endpointsChanges.EndpointSliceUpdate(endpointSlice, true) && proxier.isInitialized() { - proxier.Sync() - } -} - -// OnEndpointSlicesSynced is called once all the initial event handlers were -// called and the state is fully propagated to local cache. -func (proxier *Proxier) OnEndpointSlicesSynced() { - proxier.mu.Lock() - proxier.endpointSlicesSynced = true - proxier.setInitialized(proxier.servicesSynced) - proxier.mu.Unlock() - - // Sync unconditionally - this is called once per lifetime. - proxier.syncProxyRules() -} - -// OnNodeAdd is called whenever creation of new node object -// is observed. -func (proxier *Proxier) OnNodeAdd(node *v1.Node) { - if node.Name != proxier.hostname { - klog.ErrorS(nil, "Received a watch event for a node that doesn't match the current node", - "eventNode", node.Name, "currentNode", proxier.hostname) - return - } - - if reflect.DeepEqual(proxier.nodeLabels, node.Labels) { - return - } - - proxier.mu.Lock() - proxier.nodeLabels = map[string]string{} - for k, v := range node.Labels { - proxier.nodeLabels[k] = v - } - proxier.needFullSync = true - proxier.mu.Unlock() - klog.V(4).InfoS("Updated proxier node labels", "labels", node.Labels) - - proxier.Sync() -} - -// OnNodeUpdate is called whenever modification of an existing -// node object is observed. -func (proxier *Proxier) OnNodeUpdate(oldNode, node *v1.Node) { - if node.Name != proxier.hostname { - klog.ErrorS(nil, "Received a watch event for a node that doesn't match the current node", - "eventNode", node.Name, "currentNode", proxier.hostname) - return - } - - if reflect.DeepEqual(proxier.nodeLabels, node.Labels) { - return - } - - proxier.mu.Lock() - proxier.nodeLabels = map[string]string{} - for k, v := range node.Labels { - proxier.nodeLabels[k] = v - } - proxier.needFullSync = true - proxier.mu.Unlock() - klog.V(4).InfoS("Updated proxier node labels", "labels", node.Labels) - - proxier.Sync() -} - -// OnNodeDelete is called whenever deletion of an existing node -// object is observed. -func (proxier *Proxier) OnNodeDelete(node *v1.Node) { - if node.Name != proxier.hostname { - klog.ErrorS(nil, "Received a watch event for a node that doesn't match the current node", - "eventNode", node.Name, "currentNode", proxier.hostname) - return - } - proxier.mu.Lock() - proxier.nodeLabels = nil - proxier.needFullSync = true - proxier.mu.Unlock() - - proxier.Sync() -} - -// OnNodeSynced is called once all the initial event handlers were -// called and the state is fully propagated to local cache. -func (proxier *Proxier) OnNodeSynced() { -} - -// portProtoHash takes the ServicePortName and protocol for a service -// returns the associated 16 character hash. This is computed by hashing (sha256) -// then encoding to base32 and truncating to 16 chars. We do this because IPTables -// Chain Names must be <= 28 chars long, and the longer they are the harder they are to read. -func portProtoHash(servicePortName string, protocol string) string { - hash := sha256.Sum256([]byte(servicePortName + protocol)) - encoded := base32.StdEncoding.EncodeToString(hash[:]) - return encoded[:16] -} - -const ( - servicePortPolicyClusterChainNamePrefix = "KUBE-SVC-" - servicePortPolicyLocalChainNamePrefix = "KUBE-SVL-" - serviceFirewallChainNamePrefix = "KUBE-FW-" - serviceExternalChainNamePrefix = "KUBE-EXT-" - servicePortEndpointChainNamePrefix = "KUBE-SEP-" - - // For cleanup. This can be removed after 1.26 is released. - deprecatedServiceLBChainNamePrefix = "KUBE-XLB-" -) - -// servicePortPolicyClusterChain returns the name of the KUBE-SVC-XXXX chain for a service, which is the -// main iptables chain for that service, used for dispatching to endpoints when using `Cluster` -// traffic policy. -func servicePortPolicyClusterChain(servicePortName string, protocol string) utiliptables.Chain { - return utiliptables.Chain(servicePortPolicyClusterChainNamePrefix + portProtoHash(servicePortName, protocol)) -} - -// servicePortPolicyLocalChainName returns the name of the KUBE-SVL-XXXX chain for a service, which -// handles dispatching to local endpoints when using `Local` traffic policy. This chain only -// exists if the service has `Local` internal or external traffic policy. -func servicePortPolicyLocalChainName(servicePortName string, protocol string) utiliptables.Chain { - return utiliptables.Chain(servicePortPolicyLocalChainNamePrefix + portProtoHash(servicePortName, protocol)) -} - -// serviceFirewallChainName returns the name of the KUBE-FW-XXXX chain for a service, which -// is used to implement the filtering for the LoadBalancerSourceRanges feature. -func serviceFirewallChainName(servicePortName string, protocol string) utiliptables.Chain { - return utiliptables.Chain(serviceFirewallChainNamePrefix + portProtoHash(servicePortName, protocol)) -} - -// serviceExternalChainName returns the name of the KUBE-EXT-XXXX chain for a service, which -// implements "short-circuiting" for internally-originated external-destination traffic when using -// `Local` external traffic policy. It forwards traffic from local sources to the KUBE-SVC-XXXX -// chain and traffic from external sources to the KUBE-SVL-XXXX chain. -func serviceExternalChainName(servicePortName string, protocol string) utiliptables.Chain { - return utiliptables.Chain(serviceExternalChainNamePrefix + portProtoHash(servicePortName, protocol)) -} - -// servicePortEndpointChainName returns the name of the KUBE-SEP-XXXX chain for a particular -// service endpoint. -func servicePortEndpointChainName(servicePortName string, protocol string, endpoint string) utiliptables.Chain { - hash := sha256.Sum256([]byte(servicePortName + protocol + endpoint)) - encoded := base32.StdEncoding.EncodeToString(hash[:]) - return utiliptables.Chain(servicePortEndpointChainNamePrefix + encoded[:16]) -} - -func isServiceChainName(chainString string) bool { - prefixes := []string{ - servicePortPolicyClusterChainNamePrefix, - servicePortPolicyLocalChainNamePrefix, - servicePortEndpointChainNamePrefix, - serviceFirewallChainNamePrefix, - serviceExternalChainNamePrefix, - deprecatedServiceLBChainNamePrefix, - } - - for _, p := range prefixes { - if strings.HasPrefix(chainString, p) { - return true - } - } - return false -} - -// After a UDP endpoint has been removed, we must flush any pending conntrack entries to it, or else we -// risk sending more traffic to it, all of which will be lost (because UDP). -// This assumes the proxier mutex is held -// TODO: move it to util -func (proxier *Proxier) deleteUDPEndpointConnections(deletedUDPEndpoints []proxy.ServiceEndpoint) { - for _, epSvcPair := range deletedUDPEndpoints { - if svcInfo, ok := proxier.svcPortMap[epSvcPair.ServicePortName]; ok { - endpointIP := utilproxy.IPPart(epSvcPair.Endpoint) - nodePort := svcInfo.NodePort() - var err error - if nodePort != 0 { - err = conntrack.ClearEntriesForPortNAT(proxier.exec, endpointIP, nodePort, v1.ProtocolUDP) - if err != nil { - klog.ErrorS(err, "Failed to delete nodeport-related endpoint connections", "servicePortName", epSvcPair.ServicePortName) - } - } - err = conntrack.ClearEntriesForNAT(proxier.exec, svcInfo.ClusterIP().String(), endpointIP, v1.ProtocolUDP) - if err != nil { - klog.ErrorS(err, "Failed to delete endpoint connections", "servicePortName", epSvcPair.ServicePortName) - } - for _, extIP := range svcInfo.ExternalIPStrings() { - err := conntrack.ClearEntriesForNAT(proxier.exec, extIP, endpointIP, v1.ProtocolUDP) - if err != nil { - klog.ErrorS(err, "Failed to delete endpoint connections for externalIP", "servicePortName", epSvcPair.ServicePortName, "externalIP", extIP) - } - } - for _, lbIP := range svcInfo.LoadBalancerIPStrings() { - err := conntrack.ClearEntriesForNAT(proxier.exec, lbIP, endpointIP, v1.ProtocolUDP) - if err != nil { - klog.ErrorS(err, "Failed to delete endpoint connections for LoadBalancerIP", "servicePortName", epSvcPair.ServicePortName, "loadBalancerIP", lbIP) - } - } - } - } -} - -// Assumes proxier.mu is held. -func (proxier *Proxier) appendServiceCommentLocked(args []string, svcName string) []string { - // Not printing these comments, can reduce size of iptables (in case of large - // number of endpoints) even by 40%+. So if total number of endpoint chains - // is large enough, we simply drop those comments. - if proxier.largeClusterMode { - return args - } - return append(args, "-m", "comment", "--comment", svcName) -} - -// Called by the iptables.Monitor, and in response to topology changes; this calls -// syncProxyRules() and tells it to resync all services, regardless of whether the -// Service or Endpoints/EndpointSlice objects themselves have changed -func (proxier *Proxier) forceSyncProxyRules() { - proxier.mu.Lock() - proxier.needFullSync = true - proxier.mu.Unlock() - - proxier.syncProxyRules() -} - -// This is where all of the iptables-save/restore calls happen. -// The only other iptables rules are those that are setup in iptablesInit() -// This assumes proxier.mu is NOT held -func (proxier *Proxier) syncProxyRules() { - proxier.mu.Lock() - defer proxier.mu.Unlock() - - // don't sync rules till we've received services and endpoints - if !proxier.isInitialized() { - klog.V(2).InfoS("Not syncing iptables until Services and Endpoints have been received from master") - return - } - - // Keep track of how long syncs take. - start := time.Now() - defer func() { - metrics.SyncProxyRulesLatency.Observe(metrics.SinceInSeconds(start)) - klog.V(2).InfoS("SyncProxyRules complete", "elapsed", time.Since(start)) - }() - - tryPartialSync := !proxier.needFullSync && utilfeature.DefaultFeatureGate.Enabled(features.MinimizeIPTablesRestore) - var serviceChanged, endpointsChanged sets.String - if tryPartialSync { - serviceChanged = proxier.serviceChanges.PendingChanges() - endpointsChanged = proxier.endpointsChanges.PendingChanges() - } - serviceUpdateResult := proxier.svcPortMap.Update(proxier.serviceChanges) - endpointUpdateResult := proxier.endpointsMap.Update(proxier.endpointsChanges) - - // We need to detect stale connections to UDP Services so we - // can clean dangling conntrack entries that can blackhole traffic. - conntrackCleanupServiceIPs := serviceUpdateResult.DeletedUDPClusterIPs - conntrackCleanupServiceNodePorts := sets.NewInt() - // merge stale services gathered from updateEndpointsMap - // an UDP service that changes from 0 to non-0 endpoints is considered stale. - for _, svcPortName := range endpointUpdateResult.NewlyActiveUDPServices { - if svcInfo, ok := proxier.svcPortMap[svcPortName]; ok { - klog.V(4).InfoS("Newly-active UDP service may have stale conntrack entries", "servicePortName", svcPortName) - conntrackCleanupServiceIPs.Insert(svcInfo.ClusterIP().String()) - for _, extIP := range svcInfo.ExternalIPStrings() { - conntrackCleanupServiceIPs.Insert(extIP) - } - for _, lbIP := range svcInfo.LoadBalancerIPStrings() { - conntrackCleanupServiceIPs.Insert(lbIP) - } - nodePort := svcInfo.NodePort() - if svcInfo.Protocol() == v1.ProtocolUDP && nodePort != 0 { - conntrackCleanupServiceNodePorts.Insert(nodePort) - } - } - } - - klog.V(2).InfoS("Syncing iptables rules") - - success := false - defer func() { - if !success { - klog.InfoS("Sync failed", "retryingTime", proxier.syncPeriod) - proxier.syncRunner.RetryAfter(proxier.syncPeriod) - if tryPartialSync { - metrics.IptablesPartialRestoreFailuresTotal.Inc() - } - // proxier.serviceChanges and proxier.endpointChanges have already - // been flushed, so we've lost the state needed to be able to do - // a partial sync. - proxier.needFullSync = true - } - }() - - if !tryPartialSync { - // Ensure that our jump rules (eg from PREROUTING to KUBE-SERVICES) exist. - // We can't do this as part of the iptables-restore because we don't want - // to specify/replace *all* of the rules in PREROUTING, etc. - // - // We need to create these rules when kube-proxy first starts, and we need - // to recreate them if the utiliptables Monitor detects that iptables has - // been flushed. In both of those cases, the code will force a full sync. - // In all other cases, it ought to be safe to assume that the rules - // already exist, so we'll skip this step when doing a partial sync, to - // save us from having to invoke /sbin/iptables 20 times on each sync - // (which will be very slow on hosts with lots of iptables rules). - for _, jump := range append(iptablesJumpChains, iptablesKubeletJumpChains...) { - if _, err := proxier.iptables.EnsureChain(jump.table, jump.dstChain); err != nil { - klog.ErrorS(err, "Failed to ensure chain exists", "table", jump.table, "chain", jump.dstChain) - return - } - args := jump.extraArgs - if jump.comment != "" { - args = append(args, "-m", "comment", "--comment", jump.comment) - } - args = append(args, "-j", string(jump.dstChain)) - if _, err := proxier.iptables.EnsureRule(utiliptables.Prepend, jump.table, jump.srcChain, args...); err != nil { - klog.ErrorS(err, "Failed to ensure chain jumps", "table", jump.table, "srcChain", jump.srcChain, "dstChain", jump.dstChain) - return - } - } - } - - // - // Below this point we will not return until we try to write the iptables rules. - // - - // Reset all buffers used later. - // This is to avoid memory reallocations and thus improve performance. - proxier.filterChains.Reset() - proxier.filterRules.Reset() - proxier.natChains.Reset() - proxier.natRules.Reset() - - // Write chain lines for all the "top-level" chains we'll be filling in - for _, chainName := range []utiliptables.Chain{kubeServicesChain, kubeExternalServicesChain, kubeForwardChain, kubeNodePortsChain, kubeProxyFirewallChain} { - proxier.filterChains.Write(utiliptables.MakeChainLine(chainName)) - } - for _, chainName := range []utiliptables.Chain{kubeServicesChain, kubeNodePortsChain, kubePostroutingChain, kubeMarkMasqChain} { - proxier.natChains.Write(utiliptables.MakeChainLine(chainName)) - } - - // Install the kubernetes-specific postrouting rules. We use a whole chain for - // this so that it is easier to flush and change, for example if the mark - // value should ever change. - // NB: THIS MUST MATCH the corresponding code in the kubelet - proxier.natRules.Write( - "-A", string(kubePostroutingChain), - "-m", "mark", "!", "--mark", fmt.Sprintf("%s/%s", proxier.masqueradeMark, proxier.masqueradeMark), - "-j", "RETURN", - ) - // Clear the mark to avoid re-masquerading if the packet re-traverses the network stack. - proxier.natRules.Write( - "-A", string(kubePostroutingChain), - "-j", "MARK", "--xor-mark", proxier.masqueradeMark, - ) - masqRule := []string{ - "-A", string(kubePostroutingChain), - "-m", "comment", "--comment", `"kubernetes service traffic requiring SNAT"`, - "-j", "MASQUERADE", - } - if proxier.iptables.HasRandomFully() { - masqRule = append(masqRule, "--random-fully") - } - proxier.natRules.Write(masqRule) - - // Install the kubernetes-specific masquerade mark rule. We use a whole chain for - // this so that it is easier to flush and change, for example if the mark - // value should ever change. - proxier.natRules.Write( - "-A", string(kubeMarkMasqChain), - "-j", "MARK", "--or-mark", proxier.masqueradeMark, - ) - - isIPv6 := proxier.iptables.IsIPv6() - if !isIPv6 && proxier.localhostNodePorts { - // Kube-proxy's use of `route_localnet` to enable NodePorts on localhost - // creates a security hole (https://issue.k8s.io/90259) which this - // iptables rule mitigates. - // NB: THIS MUST MATCH the corresponding code in the kubelet. (Actually, - // kubelet uses "--dst"/"--src" rather than "-d"/"-s" but that's just a - // command-line thing and results in the same rule being created.) - proxier.filterChains.Write(utiliptables.MakeChainLine(kubeletFirewallChain)) - proxier.filterRules.Write( - "-A", string(kubeletFirewallChain), - "-m", "comment", "--comment", `"block incoming localnet connections"`, - "-d", "127.0.0.0/8", - "!", "-s", "127.0.0.0/8", - "-m", "conntrack", - "!", "--ctstate", "RELATED,ESTABLISHED,DNAT", - "-j", "DROP", - ) - } - - // Accumulate NAT chains to keep. - activeNATChains := map[utiliptables.Chain]bool{} // use a map as a set - - // To avoid growing this slice, we arbitrarily set its size to 64, - // there is never more than that many arguments for a single line. - // Note that even if we go over 64, it will still be correct - it - // is just for efficiency, not correctness. - args := make([]string, 64) - - // Compute total number of endpoint chains across all services - // to get a sense of how big the cluster is. - totalEndpoints := 0 - for svcName := range proxier.svcPortMap { - totalEndpoints += len(proxier.endpointsMap[svcName]) - } - proxier.largeClusterMode = (totalEndpoints > largeClusterEndpointsThreshold) - - // These two variables are used to publish the sync_proxy_rules_no_endpoints_total - // metric. - serviceNoLocalEndpointsTotalInternal := 0 - serviceNoLocalEndpointsTotalExternal := 0 - - // Build rules for each service-port. - for svcName, svc := range proxier.svcPortMap { - svcInfo, ok := svc.(*servicePortInfo) - if !ok { - klog.ErrorS(nil, "Failed to cast serviceInfo", "serviceName", svcName) - continue - } - protocol := strings.ToLower(string(svcInfo.Protocol())) - svcPortNameString := svcInfo.nameString - - // Figure out the endpoints for Cluster and Local traffic policy. - // allLocallyReachableEndpoints is the set of all endpoints that can be routed to - // from this node, given the service's traffic policies. hasEndpoints is true - // if the service has any usable endpoints on any node, not just this one. - allEndpoints := proxier.endpointsMap[svcName] - clusterEndpoints, localEndpoints, allLocallyReachableEndpoints, hasEndpoints := proxy.CategorizeEndpoints(allEndpoints, svcInfo, proxier.nodeLabels) - - // Note the endpoint chains that will be used - for _, ep := range allLocallyReachableEndpoints { - if epInfo, ok := ep.(*endpointsInfo); ok { - activeNATChains[epInfo.ChainName] = true - } - } - - // clusterPolicyChain contains the endpoints used with "Cluster" traffic policy - clusterPolicyChain := svcInfo.clusterPolicyChainName - usesClusterPolicyChain := len(clusterEndpoints) > 0 && svcInfo.UsesClusterEndpoints() - if usesClusterPolicyChain { - activeNATChains[clusterPolicyChain] = true - } - - // localPolicyChain contains the endpoints used with "Local" traffic policy - localPolicyChain := svcInfo.localPolicyChainName - usesLocalPolicyChain := len(localEndpoints) > 0 && svcInfo.UsesLocalEndpoints() - if usesLocalPolicyChain { - activeNATChains[localPolicyChain] = true - } - - // internalPolicyChain is the chain containing the endpoints for - // "internal" (ClusterIP) traffic. internalTrafficChain is the chain that - // internal traffic is routed to (which is always the same as - // internalPolicyChain). hasInternalEndpoints is true if we should - // generate rules pointing to internalTrafficChain, or false if there are - // no available internal endpoints. - internalPolicyChain := clusterPolicyChain - hasInternalEndpoints := hasEndpoints - if svcInfo.InternalPolicyLocal() { - internalPolicyChain = localPolicyChain - if len(localEndpoints) == 0 { - hasInternalEndpoints = false - } - } - internalTrafficChain := internalPolicyChain - - // Similarly, externalPolicyChain is the chain containing the endpoints - // for "external" (NodePort, LoadBalancer, and ExternalIP) traffic. - // externalTrafficChain is the chain that external traffic is routed to - // (which is always the service's "EXT" chain). hasExternalEndpoints is - // true if there are endpoints that will be reached by external traffic. - // (But we may still have to generate externalTrafficChain even if there - // are no external endpoints, to ensure that the short-circuit rules for - // local traffic are set up.) - externalPolicyChain := clusterPolicyChain - hasExternalEndpoints := hasEndpoints - if svcInfo.ExternalPolicyLocal() { - externalPolicyChain = localPolicyChain - if len(localEndpoints) == 0 { - hasExternalEndpoints = false - } - } - externalTrafficChain := svcInfo.externalChainName // eventually jumps to externalPolicyChain - - // usesExternalTrafficChain is based on hasEndpoints, not hasExternalEndpoints, - // because we need the local-traffic-short-circuiting rules even when there - // are no externally-usable endpoints. - usesExternalTrafficChain := hasEndpoints && svcInfo.ExternallyAccessible() - if usesExternalTrafficChain { - activeNATChains[externalTrafficChain] = true - } - - // Traffic to LoadBalancer IPs can go directly to externalTrafficChain - // unless LoadBalancerSourceRanges is in use in which case we will - // create a firewall chain. - loadBalancerTrafficChain := externalTrafficChain - fwChain := svcInfo.firewallChainName - usesFWChain := hasEndpoints && len(svcInfo.LoadBalancerIPStrings()) > 0 && len(svcInfo.LoadBalancerSourceRanges()) > 0 - if usesFWChain { - activeNATChains[fwChain] = true - loadBalancerTrafficChain = fwChain - } - - var internalTrafficFilterTarget, internalTrafficFilterComment string - var externalTrafficFilterTarget, externalTrafficFilterComment string - if !hasEndpoints { - // The service has no endpoints at all; hasInternalEndpoints and - // hasExternalEndpoints will also be false, and we will not - // generate any chains in the "nat" table for the service; only - // rules in the "filter" table rejecting incoming packets for - // the service's IPs. - internalTrafficFilterTarget = "REJECT" - internalTrafficFilterComment = fmt.Sprintf(`"%s has no endpoints"`, svcPortNameString) - externalTrafficFilterTarget = "REJECT" - externalTrafficFilterComment = internalTrafficFilterComment - } else { - if !hasInternalEndpoints { - // The internalTrafficPolicy is "Local" but there are no local - // endpoints. Traffic to the clusterIP will be dropped, but - // external traffic may still be accepted. - internalTrafficFilterTarget = "DROP" - internalTrafficFilterComment = fmt.Sprintf(`"%s has no local endpoints"`, svcPortNameString) - serviceNoLocalEndpointsTotalInternal++ - } - if !hasExternalEndpoints { - // The externalTrafficPolicy is "Local" but there are no - // local endpoints. Traffic to "external" IPs from outside - // the cluster will be dropped, but traffic from inside - // the cluster may still be accepted. - externalTrafficFilterTarget = "DROP" - externalTrafficFilterComment = fmt.Sprintf(`"%s has no local endpoints"`, svcPortNameString) - serviceNoLocalEndpointsTotalExternal++ - } - } - - // Capture the clusterIP. - if hasInternalEndpoints { - proxier.natRules.Write( - "-A", string(kubeServicesChain), - "-m", "comment", "--comment", fmt.Sprintf(`"%s cluster IP"`, svcPortNameString), - "-m", protocol, "-p", protocol, - "-d", svcInfo.ClusterIP().String(), - "--dport", strconv.Itoa(svcInfo.Port()), - "-j", string(internalTrafficChain)) - } else { - // No endpoints. - proxier.filterRules.Write( - "-A", string(kubeServicesChain), - "-m", "comment", "--comment", internalTrafficFilterComment, - "-m", protocol, "-p", protocol, - "-d", svcInfo.ClusterIP().String(), - "--dport", strconv.Itoa(svcInfo.Port()), - "-j", internalTrafficFilterTarget, - ) - } - - // Capture externalIPs. - for _, externalIP := range svcInfo.ExternalIPStrings() { - if hasEndpoints { - // Send traffic bound for external IPs to the "external - // destinations" chain. - proxier.natRules.Write( - "-A", string(kubeServicesChain), - "-m", "comment", "--comment", fmt.Sprintf(`"%s external IP"`, svcPortNameString), - "-m", protocol, "-p", protocol, - "-d", externalIP, - "--dport", strconv.Itoa(svcInfo.Port()), - "-j", string(externalTrafficChain)) - } - if !hasExternalEndpoints { - // Either no endpoints at all (REJECT) or no endpoints for - // external traffic (DROP anything that didn't get - // short-circuited by the EXT chain.) - proxier.filterRules.Write( - "-A", string(kubeExternalServicesChain), - "-m", "comment", "--comment", externalTrafficFilterComment, - "-m", protocol, "-p", protocol, - "-d", externalIP, - "--dport", strconv.Itoa(svcInfo.Port()), - "-j", externalTrafficFilterTarget, - ) - } - } - - // Capture load-balancer ingress. - for _, lbip := range svcInfo.LoadBalancerIPStrings() { - if hasEndpoints { - proxier.natRules.Write( - "-A", string(kubeServicesChain), - "-m", "comment", "--comment", fmt.Sprintf(`"%s loadbalancer IP"`, svcPortNameString), - "-m", protocol, "-p", protocol, - "-d", lbip, - "--dport", strconv.Itoa(svcInfo.Port()), - "-j", string(loadBalancerTrafficChain)) - - } - if usesFWChain { - proxier.filterRules.Write( - "-A", string(kubeProxyFirewallChain), - "-m", "comment", "--comment", fmt.Sprintf(`"%s traffic not accepted by %s"`, svcPortNameString, svcInfo.firewallChainName), - "-m", protocol, "-p", protocol, - "-d", lbip, - "--dport", strconv.Itoa(svcInfo.Port()), - "-j", "DROP") - } - } - if !hasExternalEndpoints { - // Either no endpoints at all (REJECT) or no endpoints for - // external traffic (DROP anything that didn't get short-circuited - // by the EXT chain.) - for _, lbip := range svcInfo.LoadBalancerIPStrings() { - proxier.filterRules.Write( - "-A", string(kubeExternalServicesChain), - "-m", "comment", "--comment", externalTrafficFilterComment, - "-m", protocol, "-p", protocol, - "-d", lbip, - "--dport", strconv.Itoa(svcInfo.Port()), - "-j", externalTrafficFilterTarget, - ) - } - } - - // Capture nodeports. - if svcInfo.NodePort() != 0 { - if hasEndpoints { - // Jump to the external destination chain. For better or for - // worse, nodeports are not subect to loadBalancerSourceRanges, - // and we can't change that. - proxier.natRules.Write( - "-A", string(kubeNodePortsChain), - "-m", "comment", "--comment", svcPortNameString, - "-m", protocol, "-p", protocol, - "--dport", strconv.Itoa(svcInfo.NodePort()), - "-j", string(externalTrafficChain)) - } - if !hasExternalEndpoints { - // Either no endpoints at all (REJECT) or no endpoints for - // external traffic (DROP anything that didn't get - // short-circuited by the EXT chain.) - proxier.filterRules.Write( - "-A", string(kubeExternalServicesChain), - "-m", "comment", "--comment", externalTrafficFilterComment, - "-m", "addrtype", "--dst-type", "LOCAL", - "-m", protocol, "-p", protocol, - "--dport", strconv.Itoa(svcInfo.NodePort()), - "-j", externalTrafficFilterTarget, - ) - } - } - - // Capture healthCheckNodePorts. - if svcInfo.HealthCheckNodePort() != 0 { - // no matter if node has local endpoints, healthCheckNodePorts - // need to add a rule to accept the incoming connection - proxier.filterRules.Write( - "-A", string(kubeNodePortsChain), - "-m", "comment", "--comment", fmt.Sprintf(`"%s health check node port"`, svcPortNameString), - "-m", "tcp", "-p", "tcp", - "--dport", strconv.Itoa(svcInfo.HealthCheckNodePort()), - "-j", "ACCEPT", - ) - } - - // If the SVC/SVL/EXT/FW/SEP chains have not changed since the last sync - // then we can omit them from the restore input. (We have already marked - // them in activeNATChains, so they won't get deleted.) - if tryPartialSync && !serviceChanged.Has(svcName.NamespacedName.String()) && !endpointsChanged.Has(svcName.NamespacedName.String()) { - continue - } - - // Set up internal traffic handling. - if hasInternalEndpoints { - args = append(args[:0], - "-m", "comment", "--comment", fmt.Sprintf(`"%s cluster IP"`, svcPortNameString), - "-m", protocol, "-p", protocol, - "-d", svcInfo.ClusterIP().String(), - "--dport", strconv.Itoa(svcInfo.Port()), - ) - if proxier.masqueradeAll { - proxier.natRules.Write( - "-A", string(internalTrafficChain), - args, - "-j", string(kubeMarkMasqChain)) - } else if proxier.localDetector.IsImplemented() { - // This masquerades off-cluster traffic to a service VIP. The - // idea is that you can establish a static route for your - // Service range, routing to any node, and that node will - // bridge into the Service for you. Since that might bounce - // off-node, we masquerade here. - proxier.natRules.Write( - "-A", string(internalTrafficChain), - args, - proxier.localDetector.IfNotLocal(), - "-j", string(kubeMarkMasqChain)) - } - } - - // Set up external traffic handling (if any "external" destinations are - // enabled). All captured traffic for all external destinations should - // jump to externalTrafficChain, which will handle some special cases and - // then jump to externalPolicyChain. - if usesExternalTrafficChain { - proxier.natChains.Write(utiliptables.MakeChainLine(externalTrafficChain)) - - if !svcInfo.ExternalPolicyLocal() { - // If we are using non-local endpoints we need to masquerade, - // in case we cross nodes. - proxier.natRules.Write( - "-A", string(externalTrafficChain), - "-m", "comment", "--comment", fmt.Sprintf(`"masquerade traffic for %s external destinations"`, svcPortNameString), - "-j", string(kubeMarkMasqChain)) - } else { - // If we are only using same-node endpoints, we can retain the - // source IP in most cases. - - if proxier.localDetector.IsImplemented() { - // Treat all locally-originated pod -> external destination - // traffic as a special-case. It is subject to neither - // form of traffic policy, which simulates going up-and-out - // to an external load-balancer and coming back in. - proxier.natRules.Write( - "-A", string(externalTrafficChain), - "-m", "comment", "--comment", fmt.Sprintf(`"pod traffic for %s external destinations"`, svcPortNameString), - proxier.localDetector.IfLocal(), - "-j", string(clusterPolicyChain)) - } - - // Locally originated traffic (not a pod, but the host node) - // still needs masquerade because the LBIP itself is a local - // address, so that will be the chosen source IP. - proxier.natRules.Write( - "-A", string(externalTrafficChain), - "-m", "comment", "--comment", fmt.Sprintf(`"masquerade LOCAL traffic for %s external destinations"`, svcPortNameString), - "-m", "addrtype", "--src-type", "LOCAL", - "-j", string(kubeMarkMasqChain)) - - // Redirect all src-type=LOCAL -> external destination to the - // policy=cluster chain. This allows traffic originating - // from the host to be redirected to the service correctly. - proxier.natRules.Write( - "-A", string(externalTrafficChain), - "-m", "comment", "--comment", fmt.Sprintf(`"route LOCAL traffic for %s external destinations"`, svcPortNameString), - "-m", "addrtype", "--src-type", "LOCAL", - "-j", string(clusterPolicyChain)) - } - - // Anything else falls thru to the appropriate policy chain. - if hasExternalEndpoints { - proxier.natRules.Write( - "-A", string(externalTrafficChain), - "-j", string(externalPolicyChain)) - } - } - - // Set up firewall chain, if needed - if usesFWChain { - proxier.natChains.Write(utiliptables.MakeChainLine(fwChain)) - - // The service firewall rules are created based on the - // loadBalancerSourceRanges field. This only works for VIP-like - // loadbalancers that preserve source IPs. For loadbalancers which - // direct traffic to service NodePort, the firewall rules will not - // apply. - args = append(args[:0], - "-A", string(fwChain), - "-m", "comment", "--comment", fmt.Sprintf(`"%s loadbalancer IP"`, svcPortNameString), - ) - - // firewall filter based on each source range - allowFromNode := false - for _, src := range svcInfo.LoadBalancerSourceRanges() { - proxier.natRules.Write(args, "-s", src, "-j", string(externalTrafficChain)) - _, cidr, err := netutils.ParseCIDRSloppy(src) - if err != nil { - klog.ErrorS(err, "Error parsing CIDR in LoadBalancerSourceRanges, dropping it", "cidr", cidr) - } else if cidr.Contains(proxier.nodeIP) { - allowFromNode = true - } - } - // For VIP-like LBs, the VIP is often added as a local - // address (via an IP route rule). In that case, a request - // from a node to the VIP will not hit the loadbalancer but - // will loop back with the source IP set to the VIP. We - // need the following rules to allow requests from this node. - if allowFromNode { - for _, lbip := range svcInfo.LoadBalancerIPStrings() { - proxier.natRules.Write( - args, - "-s", lbip, - "-j", string(externalTrafficChain)) - } - } - // If the packet was able to reach the end of firewall chain, - // then it did not get DNATed, so it will match the - // corresponding KUBE-PROXY-FIREWALL rule. - proxier.natRules.Write( - "-A", string(fwChain), - "-m", "comment", "--comment", fmt.Sprintf(`"other traffic to %s will be dropped by KUBE-PROXY-FIREWALL"`, svcPortNameString), - ) - } - - // If Cluster policy is in use, create the chain and create rules jumping - // from clusterPolicyChain to the clusterEndpoints - if usesClusterPolicyChain { - proxier.natChains.Write(utiliptables.MakeChainLine(clusterPolicyChain)) - proxier.writeServiceToEndpointRules(svcPortNameString, svcInfo, clusterPolicyChain, clusterEndpoints, args) - } - - // If Local policy is in use, create the chain and create rules jumping - // from localPolicyChain to the localEndpoints - if usesLocalPolicyChain { - proxier.natChains.Write(utiliptables.MakeChainLine(localPolicyChain)) - proxier.writeServiceToEndpointRules(svcPortNameString, svcInfo, localPolicyChain, localEndpoints, args) - } - - // Generate the per-endpoint chains. - for _, ep := range allLocallyReachableEndpoints { - epInfo, ok := ep.(*endpointsInfo) - if !ok { - klog.ErrorS(nil, "Failed to cast endpointsInfo", "endpointsInfo", ep) - continue - } - - endpointChain := epInfo.ChainName - - // Create the endpoint chain - proxier.natChains.Write(utiliptables.MakeChainLine(endpointChain)) - activeNATChains[endpointChain] = true - - args = append(args[:0], "-A", string(endpointChain)) - args = proxier.appendServiceCommentLocked(args, svcPortNameString) - // Handle traffic that loops back to the originator with SNAT. - proxier.natRules.Write( - args, - "-s", epInfo.IP(), - "-j", string(kubeMarkMasqChain)) - // Update client-affinity lists. - if svcInfo.SessionAffinityType() == v1.ServiceAffinityClientIP { - args = append(args, "-m", "recent", "--name", string(endpointChain), "--set") - } - // DNAT to final destination. - args = append(args, "-m", protocol, "-p", protocol, "-j", "DNAT", "--to-destination", epInfo.Endpoint) - proxier.natRules.Write(args) - } - } - - // Delete chains no longer in use. Since "iptables-save" can take several seconds - // to run on hosts with lots of iptables rules, we don't bother to do this on - // every sync in large clusters. (Stale chains will not be referenced by any - // active rules, so they're harmless other than taking up memory.) - if !proxier.largeClusterMode || time.Since(proxier.lastIPTablesCleanup) > proxier.syncPeriod { - var existingNATChains map[utiliptables.Chain]struct{} - - proxier.iptablesData.Reset() - if err := proxier.iptables.SaveInto(utiliptables.TableNAT, proxier.iptablesData); err == nil { - existingNATChains = utiliptables.GetChainsFromTable(proxier.iptablesData.Bytes()) - - for chain := range existingNATChains { - if !activeNATChains[chain] { - chainString := string(chain) - if !isServiceChainName(chainString) { - // Ignore chains that aren't ours. - continue - } - // We must (as per iptables) write a chain-line - // for it, which has the nice effect of flushing - // the chain. Then we can remove the chain. - proxier.natChains.Write(utiliptables.MakeChainLine(chain)) - proxier.natRules.Write("-X", chainString) - } - } - proxier.lastIPTablesCleanup = time.Now() - } else { - klog.ErrorS(err, "Failed to execute iptables-save: stale chains will not be deleted") - } - } - - // Finally, tail-call to the nodePorts chain. This needs to be after all - // other service portal rules. - nodeAddresses, err := proxier.nodePortAddresses.GetNodeAddresses(proxier.networkInterfacer) - if err != nil { - klog.ErrorS(err, "Failed to get node ip address matching nodeport cidrs, services with nodeport may not work as intended", "CIDRs", proxier.nodePortAddresses) - } - // nodeAddresses may contain dual-stack zero-CIDRs if proxier.nodePortAddresses is empty. - // Ensure nodeAddresses only contains the addresses for this proxier's IP family. - for addr := range nodeAddresses { - if utilproxy.IsZeroCIDR(addr) && isIPv6 == netutils.IsIPv6CIDRString(addr) { - // if any of the addresses is zero cidr of this IP family, non-zero IPs can be excluded. - nodeAddresses = sets.NewString(addr) - break - } - } - - for address := range nodeAddresses { - if utilproxy.IsZeroCIDR(address) { - destinations := []string{"-m", "addrtype", "--dst-type", "LOCAL"} - if isIPv6 { - // For IPv6, Regardless of the value of localhostNodePorts is true - // or false, we should disable access to the nodePort via localhost. Since it never works and always - // cause kernel warnings. - destinations = append(destinations, "!", "-d", "::1/128") - } - - if !proxier.localhostNodePorts && !isIPv6 { - // If set localhostNodePorts to "false"(route_localnet=0), We should generate iptables rules that - // disable NodePort services to be accessed via localhost. Since it doesn't work and causes - // the kernel to log warnings if anyone tries. - destinations = append(destinations, "!", "-d", "127.0.0.0/8") - } - - proxier.natRules.Write( - "-A", string(kubeServicesChain), - "-m", "comment", "--comment", `"kubernetes service nodeports; NOTE: this must be the last rule in this chain"`, - destinations, - "-j", string(kubeNodePortsChain)) - break - } - - // Ignore IP addresses with incorrect version - if isIPv6 && !netutils.IsIPv6String(address) || !isIPv6 && netutils.IsIPv6String(address) { - klog.ErrorS(nil, "IP has incorrect IP version", "IP", address) - continue - } - - // For ipv6, Regardless of the value of localhostNodePorts is true or false, we should disallow access - // to the nodePort via lookBack address. - if isIPv6 && utilproxy.IsLoopBack(address) { - klog.ErrorS(nil, "disallow nodePort services to be accessed via ipv6 localhost address", "IP", address) - continue - } - - // For ipv4, When localhostNodePorts is set to false, Ignore ipv4 lookBack address - if !isIPv6 && utilproxy.IsLoopBack(address) && !proxier.localhostNodePorts { - klog.ErrorS(nil, "disallow nodePort services to be accessed via ipv4 localhost address", "IP", address) - continue - } - - // create nodeport rules for each IP one by one - proxier.natRules.Write( - "-A", string(kubeServicesChain), - "-m", "comment", "--comment", `"kubernetes service nodeports; NOTE: this must be the last rule in this chain"`, - "-d", address, - "-j", string(kubeNodePortsChain)) - } - - // Drop the packets in INVALID state, which would potentially cause - // unexpected connection reset. - // https://github.com/kubernetes/kubernetes/issues/74839 - proxier.filterRules.Write( - "-A", string(kubeForwardChain), - "-m", "conntrack", - "--ctstate", "INVALID", - "-j", "DROP", - ) - - // If the masqueradeMark has been added then we want to forward that same - // traffic, this allows NodePort traffic to be forwarded even if the default - // FORWARD policy is not accept. - proxier.filterRules.Write( - "-A", string(kubeForwardChain), - "-m", "comment", "--comment", `"kubernetes forwarding rules"`, - "-m", "mark", "--mark", fmt.Sprintf("%s/%s", proxier.masqueradeMark, proxier.masqueradeMark), - "-j", "ACCEPT", - ) - - // The following rule ensures the traffic after the initial packet accepted - // by the "kubernetes forwarding rules" rule above will be accepted. - proxier.filterRules.Write( - "-A", string(kubeForwardChain), - "-m", "comment", "--comment", `"kubernetes forwarding conntrack rule"`, - "-m", "conntrack", - "--ctstate", "RELATED,ESTABLISHED", - "-j", "ACCEPT", - ) - - metrics.IptablesRulesTotal.WithLabelValues(string(utiliptables.TableFilter)).Set(float64(proxier.filterRules.Lines())) - metrics.IptablesRulesTotal.WithLabelValues(string(utiliptables.TableNAT)).Set(float64(proxier.natRules.Lines())) - - // Sync rules. - proxier.iptablesData.Reset() - proxier.iptablesData.WriteString("*filter\n") - proxier.iptablesData.Write(proxier.filterChains.Bytes()) - proxier.iptablesData.Write(proxier.filterRules.Bytes()) - proxier.iptablesData.WriteString("COMMIT\n") - proxier.iptablesData.WriteString("*nat\n") - proxier.iptablesData.Write(proxier.natChains.Bytes()) - proxier.iptablesData.Write(proxier.natRules.Bytes()) - proxier.iptablesData.WriteString("COMMIT\n") - - klog.V(2).InfoS("Reloading service iptables data", - "numServices", len(proxier.svcPortMap), - "numEndpoints", totalEndpoints, - "numFilterChains", proxier.filterChains.Lines(), - "numFilterRules", proxier.filterRules.Lines(), - "numNATChains", proxier.natChains.Lines(), - "numNATRules", proxier.natRules.Lines(), - ) - klog.V(9).InfoS("Restoring iptables", "rules", proxier.iptablesData.Bytes()) - - // NOTE: NoFlushTables is used so we don't flush non-kubernetes chains in the table - err = proxier.iptables.RestoreAll(proxier.iptablesData.Bytes(), utiliptables.NoFlushTables, utiliptables.RestoreCounters) - if err != nil { - if pErr, ok := err.(utiliptables.ParseError); ok { - lines := utiliptables.ExtractLines(proxier.iptablesData.Bytes(), pErr.Line(), 3) - klog.ErrorS(pErr, "Failed to execute iptables-restore", "rules", lines) - } else { - klog.ErrorS(err, "Failed to execute iptables-restore") - } - metrics.IptablesRestoreFailuresTotal.Inc() - return - } - success = true - proxier.needFullSync = false - - for name, lastChangeTriggerTimes := range endpointUpdateResult.LastChangeTriggerTimes { - for _, lastChangeTriggerTime := range lastChangeTriggerTimes { - latency := metrics.SinceInSeconds(lastChangeTriggerTime) - metrics.NetworkProgrammingLatency.Observe(latency) - klog.V(4).InfoS("Network programming", "endpoint", klog.KRef(name.Namespace, name.Name), "elapsed", latency) - } - } - - metrics.SyncProxyRulesNoLocalEndpointsTotal.WithLabelValues("internal").Set(float64(serviceNoLocalEndpointsTotalInternal)) - metrics.SyncProxyRulesNoLocalEndpointsTotal.WithLabelValues("external").Set(float64(serviceNoLocalEndpointsTotalExternal)) - if proxier.healthzServer != nil { - proxier.healthzServer.Updated() - } - metrics.SyncProxyRulesLastTimestamp.SetToCurrentTime() - - // Update service healthchecks. The endpoints list might include services that are - // not "OnlyLocal", but the services list will not, and the serviceHealthServer - // will just drop those endpoints. - if err := proxier.serviceHealthServer.SyncServices(proxier.svcPortMap.HealthCheckNodePorts()); err != nil { - klog.ErrorS(err, "Error syncing healthcheck services") - } - if err := proxier.serviceHealthServer.SyncEndpoints(proxier.endpointsMap.LocalReadyEndpoints()); err != nil { - klog.ErrorS(err, "Error syncing healthcheck endpoints") - } - - // Finish housekeeping. - // Clear stale conntrack entries for UDP Services, this has to be done AFTER the iptables rules are programmed. - // TODO: these could be made more consistent. - klog.V(4).InfoS("Deleting conntrack stale entries for services", "IPs", conntrackCleanupServiceIPs.UnsortedList()) - for _, svcIP := range conntrackCleanupServiceIPs.UnsortedList() { - if err := conntrack.ClearEntriesForIP(proxier.exec, svcIP, v1.ProtocolUDP); err != nil { - klog.ErrorS(err, "Failed to delete stale service connections", "IP", svcIP) - } - } - klog.V(4).InfoS("Deleting conntrack stale entries for services", "nodePorts", conntrackCleanupServiceNodePorts.UnsortedList()) - for _, nodePort := range conntrackCleanupServiceNodePorts.UnsortedList() { - err := conntrack.ClearEntriesForPort(proxier.exec, nodePort, isIPv6, v1.ProtocolUDP) - if err != nil { - klog.ErrorS(err, "Failed to clear udp conntrack", "nodePort", nodePort) - } - } - klog.V(4).InfoS("Deleting stale endpoint connections", "endpoints", endpointUpdateResult.DeletedUDPEndpoints) - proxier.deleteUDPEndpointConnections(endpointUpdateResult.DeletedUDPEndpoints) -} - -func (proxier *Proxier) writeServiceToEndpointRules(svcPortNameString string, svcInfo proxy.ServicePort, svcChain utiliptables.Chain, endpoints []proxy.Endpoint, args []string) { - // First write session affinity rules, if applicable. - if svcInfo.SessionAffinityType() == v1.ServiceAffinityClientIP { - for _, ep := range endpoints { - epInfo, ok := ep.(*endpointsInfo) - if !ok { - continue - } - comment := fmt.Sprintf(`"%s -> %s"`, svcPortNameString, epInfo.Endpoint) - - args = append(args[:0], - "-A", string(svcChain), - ) - args = proxier.appendServiceCommentLocked(args, comment) - args = append(args, - "-m", "recent", "--name", string(epInfo.ChainName), - "--rcheck", "--seconds", strconv.Itoa(svcInfo.StickyMaxAgeSeconds()), "--reap", - "-j", string(epInfo.ChainName), - ) - proxier.natRules.Write(args) - } - } - - // Now write loadbalancing rules. - numEndpoints := len(endpoints) - for i, ep := range endpoints { - epInfo, ok := ep.(*endpointsInfo) - if !ok { - continue - } - comment := fmt.Sprintf(`"%s -> %s"`, svcPortNameString, epInfo.Endpoint) - - args = append(args[:0], "-A", string(svcChain)) - args = proxier.appendServiceCommentLocked(args, comment) - if i < (numEndpoints - 1) { - // Each rule is a probabilistic match. - args = append(args, - "-m", "statistic", - "--mode", "random", - "--probability", proxier.probability(numEndpoints-i)) - } - // The final (or only if n == 1) rule is a guaranteed match. - proxier.natRules.Write(args, "-j", string(epInfo.ChainName)) - } -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/OWNERS b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/OWNERS index cbaad91602cb..b33dc0c340dc 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/OWNERS +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/OWNERS @@ -3,6 +3,7 @@ reviewers: - sig-network-reviewers - andrewsykim + - aroradaman - uablrek approvers: - sig-network-approvers diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/graceful_termination.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/graceful_termination.go index 5cc92db57c09..5c873af52306 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/graceful_termination.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/graceful_termination.go @@ -23,7 +23,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/klog/v2" - utilipvs "k8s.io/kubernetes/pkg/util/ipvs" + utilipvs "k8s.io/kubernetes/pkg/proxy/ipvs/util" ) const ( diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/netlink.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/netlink.go index ab0b9eaaa14d..cc173eae5c10 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/netlink.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/netlink.go @@ -40,4 +40,9 @@ type NetLinkHandle interface { // Only the addresses of the current family are returned. // IPv6 link-local and loopback addresses are excluded. GetLocalAddresses(dev string) (sets.Set[string], error) + // GetAllLocalAddressesExcept return all local addresses on the node, except from the passed dev. + // This is not the same as to take the diff between GetAllLocalAddresses and GetLocalAddresses + // since an address can be assigned to many interfaces. This problem raised + // https://github.com/kubernetes/kubernetes/issues/114815 + GetAllLocalAddressesExcept(dev string) (sets.Set[string], error) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/netlink_linux.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/netlink_linux.go index f4d2368885d9..98c96bc70319 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/netlink_linux.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/netlink_linux.go @@ -24,7 +24,8 @@ import ( "net" "k8s.io/apimachinery/pkg/util/sets" - utilproxy "k8s.io/kubernetes/pkg/proxy/util" + "k8s.io/klog/v2" + proxyutil "k8s.io/kubernetes/pkg/proxy/util" netutils "k8s.io/utils/net" "github.com/vishvananda/netlink" @@ -134,7 +135,7 @@ func (h *netlinkHandle) GetAllLocalAddresses() (sets.Set[string], error) { if err != nil { return nil, fmt.Errorf("Could not get addresses: %v", err) } - return utilproxy.AddressSet(h.isValidForSet, addr), nil + return proxyutil.AddressSet(h.isValidForSet, addr), nil } // GetLocalAddresses return all local addresses for an interface. @@ -149,7 +150,7 @@ func (h *netlinkHandle) GetLocalAddresses(dev string) (sets.Set[string], error) if err != nil { return nil, fmt.Errorf("Can't get addresses from %s: %v", ifi.Name, err) } - return utilproxy.AddressSet(h.isValidForSet, addr), nil + return proxyutil.AddressSet(h.isValidForSet, addr), nil } func (h *netlinkHandle) isValidForSet(ip net.IP) bool { @@ -164,3 +165,30 @@ func (h *netlinkHandle) isValidForSet(ip net.IP) bool { } return true } + +// GetAllLocalAddressesExcept return all local addresses on the node, +// except from the passed dev. This is not the same as to take the +// diff between GetAllLocalAddresses and GetLocalAddresses since an +// address can be assigned to many interfaces. This problem raised +// https://github.com/kubernetes/kubernetes/issues/114815 +func (h *netlinkHandle) GetAllLocalAddressesExcept(dev string) (sets.Set[string], error) { + ifaces, err := net.Interfaces() + if err != nil { + return nil, err + } + var addr []net.Addr + for _, iface := range ifaces { + if iface.Name == dev { + continue + } + ifadr, err := iface.Addrs() + if err != nil { + // This may happen if the interface was deleted. Ignore + // but log the error. + klog.ErrorS(err, "Reading addresses", "interface", iface.Name) + continue + } + addr = append(addr, ifadr...) + } + return proxyutil.AddressSet(h.isValidForSet, addr), nil +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/netlink_unsupported.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/netlink_unsupported.go index 31f3fb7406b2..1cb38d3fb8f0 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/netlink_unsupported.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/netlink_unsupported.go @@ -71,6 +71,11 @@ func (h *netlinkHandle) GetLocalAddresses(dev string) (sets.Set[string], error) return nil, fmt.Errorf("netlink is not supported in this platform") } +// GetAllLocalAddressesExcept is part of interface. +func (h *netlinkHandle) GetAllLocalAddressesExcept(dev string) (sets.Set[string], error) { + return nil, fmt.Errorf("netlink is not supported in this platform") +} + // Must match the one in proxier_test.go func (h *netlinkHandle) isValidForSet(ip net.IP) bool { return false diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/proxier.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/proxier.go index cf52b2fcdcee..aa672ed690dc 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/proxier.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/proxier.go @@ -43,16 +43,16 @@ import ( "k8s.io/client-go/tools/events" utilsysctl "k8s.io/component-helpers/node/util/sysctl" "k8s.io/kubernetes/pkg/proxy" + "k8s.io/kubernetes/pkg/proxy/conntrack" "k8s.io/kubernetes/pkg/proxy/healthcheck" + utilipvs "k8s.io/kubernetes/pkg/proxy/ipvs/util" "k8s.io/kubernetes/pkg/proxy/metaproxier" "k8s.io/kubernetes/pkg/proxy/metrics" - utilproxy "k8s.io/kubernetes/pkg/proxy/util" + proxyutil "k8s.io/kubernetes/pkg/proxy/util" proxyutiliptables "k8s.io/kubernetes/pkg/proxy/util/iptables" "k8s.io/kubernetes/pkg/util/async" - "k8s.io/kubernetes/pkg/util/conntrack" utilipset "k8s.io/kubernetes/pkg/util/ipset" utiliptables "k8s.io/kubernetes/pkg/util/iptables" - utilipvs "k8s.io/kubernetes/pkg/util/ipvs" ) const ( @@ -268,19 +268,19 @@ type Proxier struct { // that are significantly impacting performance. iptablesData *bytes.Buffer filterChainsData *bytes.Buffer - natChains utilproxy.LineBuffer - filterChains utilproxy.LineBuffer - natRules utilproxy.LineBuffer - filterRules utilproxy.LineBuffer + natChains proxyutil.LineBuffer + filterChains proxyutil.LineBuffer + natRules proxyutil.LineBuffer + filterRules proxyutil.LineBuffer // Added as a member to the struct to allow injection for testing. netlinkHandle NetLinkHandle // ipsetList is the list of ipsets that ipvs proxier used. ipsetList map[string]*IPSet // nodePortAddresses selects the interfaces where nodePort works. - nodePortAddresses *utilproxy.NodePortAddresses + nodePortAddresses *proxyutil.NodePortAddresses // networkInterfacer defines an interface for several net library functions. // Inject for test purpose. - networkInterfacer utilproxy.NetworkInterfacer + networkInterfacer proxyutil.NetworkInterfacer gracefuldeleteManager *GracefulTerminationManager // serviceNoLocalEndpointsInternal represents the set of services that couldn't be applied // due to the absence of local endpoints when the internal traffic policy is "Local". @@ -338,7 +338,7 @@ func NewProxier(ipFamily v1.IPFamily, } // Set the conntrack sysctl we need for - if err := utilproxy.EnsureSysctl(sysctl, sysctlVSConnTrack, 1); err != nil { + if err := proxyutil.EnsureSysctl(sysctl, sysctlVSConnTrack, 1); err != nil { return nil, err } @@ -357,34 +357,34 @@ func NewProxier(ipFamily v1.IPFamily, klog.V(2).InfoS("Left as-is", "sysctl", sysctlConnReuse) } else { // Set the connection reuse mode - if err := utilproxy.EnsureSysctl(sysctl, sysctlConnReuse, 0); err != nil { + if err := proxyutil.EnsureSysctl(sysctl, sysctlConnReuse, 0); err != nil { return nil, err } } // Set the expire_nodest_conn sysctl we need for - if err := utilproxy.EnsureSysctl(sysctl, sysctlExpireNoDestConn, 1); err != nil { + if err := proxyutil.EnsureSysctl(sysctl, sysctlExpireNoDestConn, 1); err != nil { return nil, err } // Set the expire_quiescent_template sysctl we need for - if err := utilproxy.EnsureSysctl(sysctl, sysctlExpireQuiescentTemplate, 1); err != nil { + if err := proxyutil.EnsureSysctl(sysctl, sysctlExpireQuiescentTemplate, 1); err != nil { return nil, err } // Set the ip_forward sysctl we need for - if err := utilproxy.EnsureSysctl(sysctl, sysctlForward, 1); err != nil { + if err := proxyutil.EnsureSysctl(sysctl, sysctlForward, 1); err != nil { return nil, err } if strictARP { // Set the arp_ignore sysctl we need for - if err := utilproxy.EnsureSysctl(sysctl, sysctlArpIgnore, 1); err != nil { + if err := proxyutil.EnsureSysctl(sysctl, sysctlArpIgnore, 1); err != nil { return nil, err } // Set the arp_announce sysctl we need for - if err := utilproxy.EnsureSysctl(sysctl, sysctlArpAnnounce, 2); err != nil { + if err := proxyutil.EnsureSysctl(sysctl, sysctlArpAnnounce, 2); err != nil { return nil, err } } @@ -409,7 +409,7 @@ func NewProxier(ipFamily v1.IPFamily, scheduler = defaultScheduler } - nodePortAddresses := utilproxy.NewNodePortAddresses(nodePortAddressStrings) + nodePortAddresses := proxyutil.NewNodePortAddresses(ipFamily, nodePortAddressStrings) serviceHealthServer := healthcheck.NewServiceHealthServer(hostname, recorder, nodePortAddresses, healthzServer) @@ -440,14 +440,14 @@ func NewProxier(ipFamily v1.IPFamily, ipvsScheduler: scheduler, iptablesData: bytes.NewBuffer(nil), filterChainsData: bytes.NewBuffer(nil), - natChains: utilproxy.LineBuffer{}, - natRules: utilproxy.LineBuffer{}, - filterChains: utilproxy.LineBuffer{}, - filterRules: utilproxy.LineBuffer{}, + natChains: proxyutil.LineBuffer{}, + natRules: proxyutil.LineBuffer{}, + filterChains: proxyutil.LineBuffer{}, + filterRules: proxyutil.LineBuffer{}, netlinkHandle: NewNetLinkHandle(ipFamily == v1.IPv6Protocol), ipset: ipset, nodePortAddresses: nodePortAddresses, - networkInterfacer: utilproxy.RealNetwork{}, + networkInterfacer: proxyutil.RealNetwork{}, gracefuldeleteManager: NewGracefulTerminationManager(ipvs), } // initialize ipsetList with all sets we needed @@ -490,14 +490,12 @@ func NewDualStackProxier( safeIpset := newSafeIpset(ipset) - ipFamilyMap := utilproxy.MapCIDRsByIPFamily(nodePortAddresses) - // Create an ipv4 instance of the single-stack proxier ipv4Proxier, err := NewProxier(v1.IPv4Protocol, ipt[0], ipvs, safeIpset, sysctl, exec, syncPeriod, minSyncPeriod, filterCIDRs(false, excludeCIDRs), strictARP, tcpTimeout, tcpFinTimeout, udpTimeout, masqueradeAll, masqueradeBit, localDetectors[0], hostname, nodeIP[0], - recorder, healthzServer, scheduler, ipFamilyMap[v1.IPv4Protocol], kernelHandler) + recorder, healthzServer, scheduler, nodePortAddresses, kernelHandler) if err != nil { return nil, fmt.Errorf("unable to create ipv4 proxier: %v", err) } @@ -506,7 +504,7 @@ func NewDualStackProxier( exec, syncPeriod, minSyncPeriod, filterCIDRs(true, excludeCIDRs), strictARP, tcpTimeout, tcpFinTimeout, udpTimeout, masqueradeAll, masqueradeBit, localDetectors[1], hostname, nodeIP[1], - nil, nil, scheduler, ipFamilyMap[v1.IPv6Protocol], kernelHandler) + recorder, healthzServer, scheduler, nodePortAddresses, kernelHandler) if err != nil { return nil, fmt.Errorf("unable to create ipv6 proxier: %v", err) } @@ -946,29 +944,6 @@ func (proxier *Proxier) syncProxyRules() { serviceUpdateResult := proxier.svcPortMap.Update(proxier.serviceChanges) endpointUpdateResult := proxier.endpointsMap.Update(proxier.endpointsChanges) - // We need to detect stale connections to UDP Services so we - // can clean dangling conntrack entries that can blackhole traffic. - conntrackCleanupServiceIPs := serviceUpdateResult.DeletedUDPClusterIPs - conntrackCleanupServiceNodePorts := sets.NewInt() - // merge stale services gathered from updateEndpointsMap - // an UDP service that changes from 0 to non-0 endpoints is considered stale. - for _, svcPortName := range endpointUpdateResult.NewlyActiveUDPServices { - if svcInfo, ok := proxier.svcPortMap[svcPortName]; ok { - klog.V(4).InfoS("Newly-active UDP service may have stale conntrack entries", "servicePortName", svcPortName) - conntrackCleanupServiceIPs.Insert(svcInfo.ClusterIP().String()) - for _, extIP := range svcInfo.ExternalIPStrings() { - conntrackCleanupServiceIPs.Insert(extIP) - } - for _, lbIP := range svcInfo.LoadBalancerIPStrings() { - conntrackCleanupServiceIPs.Insert(lbIP) - } - nodePort := svcInfo.NodePort() - if svcInfo.Protocol() == v1.ProtocolUDP && nodePort != 0 { - conntrackCleanupServiceNodePorts.Insert(nodePort) - } - } - } - klog.V(3).InfoS("Syncing ipvs proxier rules") proxier.serviceNoLocalEndpointsInternal = sets.New[string]() @@ -1013,11 +988,10 @@ func (proxier *Proxier) syncProxyRules() { klog.ErrorS(err, "Error listing addresses binded to dummy interface") } // nodeAddressSet All addresses *except* those on the dummy interface - nodeAddressSet, err := proxier.netlinkHandle.GetAllLocalAddresses() + nodeAddressSet, err := proxier.netlinkHandle.GetAllLocalAddressesExcept(defaultDummyDevice) if err != nil { klog.ErrorS(err, "Error listing node addresses") } - nodeAddressSet = nodeAddressSet.Difference(alreadyBoundAddrs) hasNodePort := false for _, svc := range proxier.svcPortMap { @@ -1028,35 +1002,23 @@ func (proxier *Proxier) syncProxyRules() { } } - // Both nodeAddresses and nodeIPs can be reused for all nodePort services - // and only need to be computed if we have at least one nodePort service. - var ( - // List of node addresses to listen on if a nodePort is set. - nodeAddresses []string - // List of node IP addresses to be used as IPVS services if nodePort is set. - nodeIPs []net.IP - ) - + // List of node IP addresses to be used as IPVS services if nodePort is set. This + // can be reused for all nodePort services. + var nodeIPs []net.IP if hasNodePort { - nodeAddrSet, err := proxier.nodePortAddresses.GetNodeAddresses(proxier.networkInterfacer) - if err != nil { - klog.ErrorS(err, "Failed to get node IP address matching nodeport cidr") + if proxier.nodePortAddresses.MatchAll() { + for _, ipStr := range nodeAddressSet.UnsortedList() { + nodeIPs = append(nodeIPs, netutils.ParseIPSloppy(ipStr)) + } } else { - nodeAddresses = nodeAddrSet.List() - for _, address := range nodeAddresses { - a := netutils.ParseIPSloppy(address) - if a.IsLoopback() { - continue - } - if utilproxy.IsZeroCIDR(address) { - nodeIPs = nil - for _, ipStr := range nodeAddressSet.UnsortedList() { - nodeIPs = append(nodeIPs, netutils.ParseIPSloppy(ipStr)) + allNodeIPs, err := proxier.nodePortAddresses.GetNodeIPs(proxier.networkInterfacer) + if err != nil { + klog.ErrorS(err, "Failed to get node IP address matching nodeport cidr") + } else { + for _, ip := range allNodeIPs { + if !ip.IsLoopback() { + nodeIPs = append(nodeIPs, ip) } - break - } - if getIPFamily(a) == proxier.ipFamily { - nodeIPs = append(nodeIPs, a) } } } @@ -1193,9 +1155,13 @@ func (proxier *Proxier) syncProxyRules() { if proxier.ipvsScheduler == "mh" { serv.Flags |= utilipvs.FlagSourceHash } - if err := proxier.syncService(svcPortNameString, serv, true, alreadyBoundAddrs); err == nil { + // We must not add the address to the dummy device if it exist on another interface + shouldBind := !nodeAddressSet.Has(serv.Address.String()) + if err := proxier.syncService(svcPortNameString, serv, shouldBind, alreadyBoundAddrs); err == nil { activeIPVSServices.Insert(serv.String()) - activeBindAddrs.Insert(serv.Address.String()) + if shouldBind { + activeBindAddrs.Insert(serv.Address.String()) + } if err := proxier.syncEndpoint(svcPortName, svcInfo.ExternalPolicyLocal(), serv); err != nil { klog.ErrorS(err, "Failed to sync endpoint for service", "servicePortName", svcPortName, "virtualServer", serv) } @@ -1296,9 +1262,13 @@ func (proxier *Proxier) syncProxyRules() { if proxier.ipvsScheduler == "mh" { serv.Flags |= utilipvs.FlagSourceHash } - if err := proxier.syncService(svcPortNameString, serv, true, alreadyBoundAddrs); err == nil { + // We must not add the address to the dummy device if it exist on another interface + shouldBind := !nodeAddressSet.Has(serv.Address.String()) + if err := proxier.syncService(svcPortNameString, serv, shouldBind, alreadyBoundAddrs); err == nil { activeIPVSServices.Insert(serv.String()) - activeBindAddrs.Insert(serv.Address.String()) + if shouldBind { + activeBindAddrs.Insert(serv.Address.String()) + } if err := proxier.syncEndpoint(svcPortName, svcInfo.ExternalPolicyLocal(), serv); err != nil { klog.ErrorS(err, "Failed to sync endpoint for service", "servicePortName", svcPortName, "virtualServer", serv) } @@ -1308,7 +1278,7 @@ func (proxier *Proxier) syncProxyRules() { } if svcInfo.NodePort() != 0 { - if len(nodeAddresses) == 0 || len(nodeIPs) == 0 { + if len(nodeIPs) == 0 { // Skip nodePort configuration since an error occurred when // computing nodeAddresses or nodeIPs. continue @@ -1528,24 +1498,8 @@ func (proxier *Proxier) syncProxyRules() { metrics.SyncProxyRulesNoLocalEndpointsTotal.WithLabelValues("internal").Set(float64(proxier.serviceNoLocalEndpointsInternal.Len())) metrics.SyncProxyRulesNoLocalEndpointsTotal.WithLabelValues("external").Set(float64(proxier.serviceNoLocalEndpointsExternal.Len())) - // Finish housekeeping. - // Clear stale conntrack entries for UDP Services, this has to be done AFTER the ipvs rules are programmed. - // TODO: these could be made more consistent. - klog.V(4).InfoS("Deleting conntrack stale entries for services", "IPs", conntrackCleanupServiceIPs.UnsortedList()) - for _, svcIP := range conntrackCleanupServiceIPs.UnsortedList() { - if err := conntrack.ClearEntriesForIP(proxier.exec, svcIP, v1.ProtocolUDP); err != nil { - klog.ErrorS(err, "Failed to delete stale service connections", "IP", svcIP) - } - } - klog.V(4).InfoS("Deleting conntrack stale entries for services", "nodePorts", conntrackCleanupServiceNodePorts.UnsortedList()) - for _, nodePort := range conntrackCleanupServiceNodePorts.UnsortedList() { - err := conntrack.ClearEntriesForPort(proxier.exec, nodePort, proxier.ipFamily == v1.IPv6Protocol, v1.ProtocolUDP) - if err != nil { - klog.ErrorS(err, "Failed to clear udp conntrack", "nodePort", nodePort) - } - } - klog.V(4).InfoS("Deleting stale endpoint connections", "endpoints", endpointUpdateResult.DeletedUDPEndpoints) - proxier.deleteUDPEndpointConnections(endpointUpdateResult.DeletedUDPEndpoints) + // Finish housekeeping, clear stale conntrack entries for UDP Services + conntrack.CleanStaleEntries(proxier.ipFamily == v1.IPv6Protocol, proxier.exec, proxier.svcPortMap, serviceUpdateResult, endpointUpdateResult) } // writeIptablesRules write all iptables rules to proxier.natRules or proxier.FilterRules that ipvs proxier needed @@ -1726,6 +1680,9 @@ func (proxier *Proxier) writeIptablesRules() { proxier.filterRules.Write( "-A", string(kubeIPVSFilterChain), "-m", "set", "--match-set", proxier.ipsetList[kubeExternalIPSet].Name, "dst,dst", "-j", "RETURN") + proxier.filterRules.Write( + "-A", string(kubeIPVSFilterChain), + "-m", "set", "--match-set", proxier.ipsetList[kubeHealthCheckNodePortSet].Name, "dst", "-j", "RETURN") proxier.filterRules.Write( "-A", string(kubeIPVSFilterChain), "-m", "conntrack", "--ctstate", "NEW", @@ -1734,7 +1691,12 @@ func (proxier *Proxier) writeIptablesRules() { // Install the kubernetes-specific postrouting rules. We use a whole chain for // this so that it is easier to flush and change, for example if the mark // value should ever change. - // NB: THIS MUST MATCH the corresponding code in the kubelet + + // NOTE: kubelet creates identical copies of these rules. If you want to change + // these rules in the future, you MUST do so in a way that will interoperate + // correctly with skewed versions of the rules created by kubelet. (Remove this + // comment once IPTablesOwnershipCleanup is GA.) + proxier.natRules.Write( "-A", string(kubePostroutingChain), "-m", "mark", "!", "--mark", fmt.Sprintf("%s/%s", proxier.masqueradeMark, proxier.masqueradeMark), @@ -1812,42 +1774,6 @@ func (proxier *Proxier) createAndLinkKubeChain() { } -// After a UDP endpoint has been removed, we must flush any pending conntrack entries to it, or else we -// risk sending more traffic to it, all of which will be lost (because UDP). -// This assumes the proxier mutex is held -// TODO: move it to util -func (proxier *Proxier) deleteUDPEndpointConnections(deletedUDPEndpoints []proxy.ServiceEndpoint) { - for _, epSvcPair := range deletedUDPEndpoints { - if svcInfo, ok := proxier.svcPortMap[epSvcPair.ServicePortName]; ok { - endpointIP := utilproxy.IPPart(epSvcPair.Endpoint) - nodePort := svcInfo.NodePort() - var err error - if nodePort != 0 { - err = conntrack.ClearEntriesForPortNAT(proxier.exec, endpointIP, nodePort, v1.ProtocolUDP) - if err != nil { - klog.ErrorS(err, "Failed to delete nodeport-related endpoint connections", "servicePortName", epSvcPair.ServicePortName) - } - } - err = conntrack.ClearEntriesForNAT(proxier.exec, svcInfo.ClusterIP().String(), endpointIP, v1.ProtocolUDP) - if err != nil { - klog.ErrorS(err, "Failed to delete endpoint connections", "servicePortName", epSvcPair.ServicePortName) - } - for _, extIP := range svcInfo.ExternalIPStrings() { - err := conntrack.ClearEntriesForNAT(proxier.exec, extIP, endpointIP, v1.ProtocolUDP) - if err != nil { - klog.ErrorS(err, "Failed to delete endpoint connections for externalIP", "servicePortName", epSvcPair.ServicePortName, "externalIP", extIP) - } - } - for _, lbIP := range svcInfo.LoadBalancerIPStrings() { - err := conntrack.ClearEntriesForNAT(proxier.exec, lbIP, endpointIP, v1.ProtocolUDP) - if err != nil { - klog.ErrorS(err, "Failed to delete endpoint connections for LoadBalancerIP", "servicePortName", epSvcPair.ServicePortName, "loadBalancerIP", lbIP) - } - } - } - } -} - func (proxier *Proxier) syncService(svcName string, vs *utilipvs.VirtualServer, bindAddr bool, alreadyBoundAddrs sets.Set[string]) error { appliedVirtualServer, _ := proxier.ipvs.GetVirtualServer(vs) if appliedVirtualServer == nil || !appliedVirtualServer.Equal(vs) { @@ -1899,7 +1825,7 @@ func (proxier *Proxier) syncEndpoint(svcPortName proxy.ServicePortName, onlyNode } // curEndpoints represents IPVS destinations listed from current system. - curEndpoints := sets.NewString() + curEndpoints := sets.New[string]() curDests, err := proxier.ipvs.GetRealServers(appliedVirtualServer) if err != nil { klog.ErrorS(err, "Failed to list IPVS destinations") @@ -1943,13 +1869,13 @@ func (proxier *Proxier) syncEndpoint(svcPortName proxy.ServicePortName, onlyNode } } - newEndpoints := sets.NewString() + newEndpoints := sets.New[string]() for _, epInfo := range endpoints { newEndpoints.Insert(epInfo.String()) } // Create new endpoints - for _, ep := range newEndpoints.List() { + for _, ep := range sets.List(newEndpoints) { ip, port, err := net.SplitHostPort(ep) if err != nil { klog.ErrorS(err, "Failed to parse endpoint", "endpoint", ep) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/ipvs/ipvs.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/util/ipvs.go similarity index 100% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/ipvs/ipvs.go rename to cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/util/ipvs.go diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/ipvs/ipvs_linux.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/util/ipvs_linux.go similarity index 99% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/ipvs/ipvs_linux.go rename to cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/util/ipvs_linux.go index d6b947415218..a783d1d428b8 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/ipvs/ipvs_linux.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/util/ipvs_linux.go @@ -46,7 +46,7 @@ type Protocol uint16 func New() Interface { handle, err := libipvs.New("") if err != nil { - klog.Errorf("IPVS interface can't be initialized, error: %v", err) + klog.ErrorS(err, "IPVS interface can't be initialized") return nil } return &runner{ diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/ipvs/ipvs_unsupported.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/util/ipvs_unsupported.go similarity index 100% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/ipvs/ipvs_unsupported.go rename to cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/util/ipvs_unsupported.go diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/metrics/metrics.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/metrics/metrics.go index 27304214d996..b725584c8e70 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/metrics/metrics.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/metrics/metrics.go @@ -27,7 +27,8 @@ import ( const kubeProxySubsystem = "kubeproxy" var ( - // SyncProxyRulesLatency is the latency of one round of kube-proxy syncing proxy rules. + // SyncProxyRulesLatency is the latency of one round of kube-proxy syncing proxy + // rules. (With the iptables proxy, this includes both full and partial syncs.) SyncProxyRulesLatency = metrics.NewHistogram( &metrics.HistogramOpts{ Subsystem: kubeProxySubsystem, @@ -38,6 +39,28 @@ var ( }, ) + // SyncFullProxyRulesLatency is the latency of one round of full rule syncing. + SyncFullProxyRulesLatency = metrics.NewHistogram( + &metrics.HistogramOpts{ + Subsystem: kubeProxySubsystem, + Name: "sync_full_proxy_rules_duration_seconds", + Help: "SyncProxyRules latency in seconds for full resyncs", + Buckets: metrics.ExponentialBuckets(0.001, 2, 15), + StabilityLevel: metrics.ALPHA, + }, + ) + + // SyncPartialProxyRulesLatency is the latency of one round of partial rule syncing. + SyncPartialProxyRulesLatency = metrics.NewHistogram( + &metrics.HistogramOpts{ + Subsystem: kubeProxySubsystem, + Name: "sync_partial_proxy_rules_duration_seconds", + Help: "SyncProxyRules latency in seconds for partial resyncs", + Buckets: metrics.ExponentialBuckets(0.001, 2, 15), + StabilityLevel: metrics.ALPHA, + }, + ) + // SyncProxyRulesLastTimestamp is the timestamp proxy rules were last // successfully synced. SyncProxyRulesLastTimestamp = metrics.NewGauge( @@ -180,6 +203,8 @@ var registerMetricsOnce sync.Once func RegisterMetrics() { registerMetricsOnce.Do(func() { legacyregistry.MustRegister(SyncProxyRulesLatency) + legacyregistry.MustRegister(SyncFullProxyRulesLatency) + legacyregistry.MustRegister(SyncPartialProxyRulesLatency) legacyregistry.MustRegister(SyncProxyRulesLastTimestamp) legacyregistry.MustRegister(NetworkProgrammingLatency) legacyregistry.MustRegister(EndpointChangesPending) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/node.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/node.go index f2cbf6b1f2d6..1845818945a2 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/node.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/node.go @@ -33,6 +33,12 @@ type NodePodCIDRHandler struct { podCIDRs []string } +func NewNodePodCIDRHandler(podCIDRs []string) *NodePodCIDRHandler { + return &NodePodCIDRHandler{ + podCIDRs: podCIDRs, + } +} + var _ config.NodeHandler = &NodePodCIDRHandler{} // OnNodeAdd is a handler for Node creates. diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/service.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/service.go index ab8e6fa5d140..279191f57b25 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/service.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/service.go @@ -32,7 +32,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" apiservice "k8s.io/kubernetes/pkg/api/v1/service" "k8s.io/kubernetes/pkg/proxy/metrics" - utilproxy "k8s.io/kubernetes/pkg/proxy/util" + proxyutil "k8s.io/kubernetes/pkg/proxy/util" ) // BaseServicePortInfo contains base information that defines a service. @@ -165,7 +165,7 @@ func (sct *ServiceChangeTracker) newBaseServiceInfo(port *v1.ServicePort, servic stickyMaxAgeSeconds = int(*service.Spec.SessionAffinityConfig.ClientIP.TimeoutSeconds) } - clusterIP := utilproxy.GetClusterIPByFamily(sct.ipFamily, service) + clusterIP := proxyutil.GetClusterIPByFamily(sct.ipFamily, service) info := &BaseServicePortInfo{ clusterIP: netutils.ParseIPSloppy(clusterIP), port: int(port.Port), @@ -194,21 +194,21 @@ func (sct *ServiceChangeTracker) newBaseServiceInfo(port *v1.ServicePort, servic // services, this is actually expected. Hence we downgraded from reporting by events // to just log lines with high verbosity - ipFamilyMap := utilproxy.MapIPsByIPFamily(service.Spec.ExternalIPs) + ipFamilyMap := proxyutil.MapIPsByIPFamily(service.Spec.ExternalIPs) info.externalIPs = ipFamilyMap[sct.ipFamily] // Log the IPs not matching the ipFamily - if ips, ok := ipFamilyMap[utilproxy.OtherIPFamily(sct.ipFamily)]; ok && len(ips) > 0 { + if ips, ok := ipFamilyMap[proxyutil.OtherIPFamily(sct.ipFamily)]; ok && len(ips) > 0 { klog.V(4).InfoS("Service change tracker ignored the following external IPs for given service as they don't match IP Family", - "ipFamily", sct.ipFamily, "externalIPs", strings.Join(ips, ","), "service", klog.KObj(service)) + "ipFamily", sct.ipFamily, "externalIPs", strings.Join(ips, ", "), "service", klog.KObj(service)) } - ipFamilyMap = utilproxy.MapCIDRsByIPFamily(loadBalancerSourceRanges) + ipFamilyMap = proxyutil.MapCIDRsByIPFamily(loadBalancerSourceRanges) info.loadBalancerSourceRanges = ipFamilyMap[sct.ipFamily] // Log the CIDRs not matching the ipFamily - if cidrs, ok := ipFamilyMap[utilproxy.OtherIPFamily(sct.ipFamily)]; ok && len(cidrs) > 0 { + if cidrs, ok := ipFamilyMap[proxyutil.OtherIPFamily(sct.ipFamily)]; ok && len(cidrs) > 0 { klog.V(4).InfoS("Service change tracker ignored the following load balancer source ranges for given Service as they don't match IP Family", - "ipFamily", sct.ipFamily, "loadBalancerSourceRanges", strings.Join(cidrs, ","), "service", klog.KObj(service)) + "ipFamily", sct.ipFamily, "loadBalancerSourceRanges", strings.Join(cidrs, ", "), "service", klog.KObj(service)) } // Obtain Load Balancer Ingress IPs @@ -220,11 +220,11 @@ func (sct *ServiceChangeTracker) newBaseServiceInfo(port *v1.ServicePort, servic } if len(ips) > 0 { - ipFamilyMap = utilproxy.MapIPsByIPFamily(ips) + ipFamilyMap = proxyutil.MapIPsByIPFamily(ips) - if ipList, ok := ipFamilyMap[utilproxy.OtherIPFamily(sct.ipFamily)]; ok && len(ipList) > 0 { + if ipList, ok := ipFamilyMap[proxyutil.OtherIPFamily(sct.ipFamily)]; ok && len(ipList) > 0 { klog.V(4).InfoS("Service change tracker ignored the following load balancer ingress IPs for given Service as they don't match the IP Family", - "ipFamily", sct.ipFamily, "loadBalancerIngressIps", strings.Join(ipList, ","), "service", klog.KObj(service)) + "ipFamily", sct.ipFamily, "loadBalancerIngressIps", strings.Join(ipList, ", "), "service", klog.KObj(service)) } // Create the LoadBalancerStatus with the filtered IPs for _, ip := range ipFamilyMap[sct.ipFamily] { @@ -330,11 +330,11 @@ func (sct *ServiceChangeTracker) Update(previous, current *v1.Service) bool { // PendingChanges returns a set whose keys are the names of the services that have changed // since the last time sct was used to update a ServiceMap. (You must call this _before_ // calling sm.Update(sct).) -func (sct *ServiceChangeTracker) PendingChanges() sets.String { +func (sct *ServiceChangeTracker) PendingChanges() sets.Set[string] { sct.lock.Lock() defer sct.lock.Unlock() - changes := sets.NewString() + changes := sets.New[string]() for name := range sct.items { changes.Insert(name.String()) } @@ -346,12 +346,12 @@ type UpdateServiceMapResult struct { // DeletedUDPClusterIPs holds stale (no longer assigned to a Service) Service IPs // that had UDP ports. Callers can use this to abort timeout-waits or clear // connection-tracking information. - DeletedUDPClusterIPs sets.String + DeletedUDPClusterIPs sets.Set[string] } // Update updates ServicePortMap base on the given changes. func (sm ServicePortMap) Update(changes *ServiceChangeTracker) (result UpdateServiceMapResult) { - result.DeletedUDPClusterIPs = sets.NewString() + result.DeletedUDPClusterIPs = sets.New[string]() sm.apply(changes, result.DeletedUDPClusterIPs) return result } @@ -381,11 +381,11 @@ func (sct *ServiceChangeTracker) serviceToServiceMap(service *v1.Service) Servic return nil } - if utilproxy.ShouldSkipService(service) { + if proxyutil.ShouldSkipService(service) { return nil } - clusterIP := utilproxy.GetClusterIPByFamily(sct.ipFamily, service) + clusterIP := proxyutil.GetClusterIPByFamily(sct.ipFamily, service) if clusterIP == "" { return nil } @@ -407,7 +407,7 @@ func (sct *ServiceChangeTracker) serviceToServiceMap(service *v1.Service) Servic // apply the changes to ServicePortMap and update the deleted UDP cluster IP set. // apply triggers processServiceMapChange on every change. -func (sm *ServicePortMap) apply(changes *ServiceChangeTracker, deletedUDPClusterIPs sets.String) { +func (sm *ServicePortMap) apply(changes *ServiceChangeTracker, deletedUDPClusterIPs sets.Set[string]) { changes.lock.Lock() defer changes.lock.Unlock() for _, change := range changes.items { @@ -446,9 +446,9 @@ func (sm *ServicePortMap) apply(changes *ServiceChangeTracker, deletedUDPCluster // B{{"ns", "cluster-ip", "http"}: {"172.16.55.10", 1234, "TCP"}} // A updated to be {{"ns", "cluster-ip", "http"}: {"172.16.55.10", 1234, "TCP"}} // produce string set {"ns/cluster-ip:http"} -func (sm *ServicePortMap) merge(other ServicePortMap) sets.String { +func (sm *ServicePortMap) merge(other ServicePortMap) sets.Set[string] { // existingPorts is going to store all identifiers of all services in `other` ServicePortMap. - existingPorts := sets.NewString() + existingPorts := sets.New[string]() for svcPortName, info := range other { // Take ServicePortName.String() as the newly merged service's identifier and put it into existingPorts. existingPorts.Insert(svcPortName.String()) @@ -475,7 +475,7 @@ func (sm *ServicePortMap) filter(other ServicePortMap) { // unmerge deletes all other ServicePortMap's elements from current ServicePortMap and // updates deletedUDPClusterIPs with all of the newly-deleted UDP cluster IPs. -func (sm *ServicePortMap) unmerge(other ServicePortMap, deletedUDPClusterIPs sets.String) { +func (sm *ServicePortMap) unmerge(other ServicePortMap, deletedUDPClusterIPs sets.Set[string]) { for svcPortName := range other { info, exists := (*sm)[svcPortName] if exists { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/topology.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/topology.go index e8248a93a7e7..52f539b659fb 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/topology.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/topology.go @@ -55,7 +55,7 @@ func CategorizeEndpoints(endpoints []Endpoint, svcInfo ServicePort, nodeLabels m // if there are 0 cluster-wide endpoints, we can try to fallback to any terminating endpoints that are ready. // When falling back to terminating endpoints, we do NOT consider topology aware routing since this is a best // effort attempt to avoid dropping connections. - if len(clusterEndpoints) == 0 && utilfeature.DefaultFeatureGate.Enabled(features.ProxyTerminatingEndpoints) { + if len(clusterEndpoints) == 0 { clusterEndpoints = filterEndpoints(endpoints, func(ep Endpoint) bool { if ep.IsServing() && ep.IsTerminating() { return true @@ -87,7 +87,7 @@ func CategorizeEndpoints(endpoints []Endpoint, svcInfo ServicePort, nodeLabels m if ep.GetIsLocal() { hasLocalReadyEndpoints = true } - } else if ep.IsServing() && ep.IsTerminating() && utilfeature.DefaultFeatureGate.Enabled(features.ProxyTerminatingEndpoints) { + } else if ep.IsServing() && ep.IsTerminating() { hasAnyEndpoints = true if ep.GetIsLocal() { hasLocalServingTerminatingEndpoints = true diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/types.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/types.go index 1b6b843a8c14..7c9d19ab8183 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/types.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/types.go @@ -126,7 +126,7 @@ type Endpoint interface { IsTerminating() bool // GetZoneHints returns the zone hint for the endpoint. This is based on // endpoint.hints.forZones[0].name in the EndpointSlice API. - GetZoneHints() sets.String + GetZoneHints() sets.Set[string] // IP returns IP part of the endpoint. IP() string // Port returns the Port part of the endpoint. diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/util/nodeport_addresses.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/util/nodeport_addresses.go index aebe5f0718cc..c5332a079586 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/util/nodeport_addresses.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/util/nodeport_addresses.go @@ -20,7 +20,7 @@ import ( "fmt" "net" - "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/api/core/v1" netutils "k8s.io/utils/net" ) @@ -30,32 +30,52 @@ type NodePortAddresses struct { cidrs []*net.IPNet containsIPv4Loopback bool + matchAll bool } // RFC 5735 127.0.0.0/8 - This block is assigned for use as the Internet host loopback address var ipv4LoopbackStart = net.IPv4(127, 0, 0, 0) -// NewNodePortAddresses takes the `--nodeport-addresses` value (which is assumed to -// contain only valid CIDRs) and returns a NodePortAddresses object. If cidrStrings is -// empty, this is treated as `["0.0.0.0/0", "::/0"]`. -func NewNodePortAddresses(cidrStrings []string) *NodePortAddresses { - if len(cidrStrings) == 0 { - cidrStrings = []string{IPv4ZeroCIDR, IPv6ZeroCIDR} +// NewNodePortAddresses takes an IP family and the `--nodeport-addresses` value (which is +// assumed to contain only valid CIDRs, potentially of both IP families) and returns a +// NodePortAddresses object for the given family. If there are no CIDRs of the given +// family then the CIDR "0.0.0.0/0" or "::/0" will be added (even if there are CIDRs of +// the other family). +func NewNodePortAddresses(family v1.IPFamily, cidrStrings []string) *NodePortAddresses { + npa := &NodePortAddresses{} + + // Filter CIDRs to correct family + for _, str := range cidrStrings { + if (family == v1.IPv4Protocol) == netutils.IsIPv4CIDRString(str) { + npa.cidrStrings = append(npa.cidrStrings, str) + } } - - npa := &NodePortAddresses{ - cidrStrings: cidrStrings, + if len(npa.cidrStrings) == 0 { + if family == v1.IPv4Protocol { + npa.cidrStrings = []string{IPv4ZeroCIDR} + } else { + npa.cidrStrings = []string{IPv6ZeroCIDR} + } } + // Now parse for _, str := range npa.cidrStrings { _, cidr, _ := netutils.ParseCIDRSloppy(str) - npa.cidrs = append(npa.cidrs, cidr) if netutils.IsIPv4CIDR(cidr) { if cidr.IP.IsLoopback() || cidr.Contains(ipv4LoopbackStart) { npa.containsIPv4Loopback = true } } + + if IsZeroCIDR(str) { + // Ignore everything else + npa.cidrs = []*net.IPNet{cidr} + npa.matchAll = true + break + } + + npa.cidrs = append(npa.cidrs, cidr) } return npa @@ -65,32 +85,23 @@ func (npa *NodePortAddresses) String() string { return fmt.Sprintf("%v", npa.cidrStrings) } -// GetNodeAddresses return all matched node IP addresses for npa's CIDRs. -// If npa's CIDRs include "0.0.0.0/0" and/or "::/0", then those values will be returned -// verbatim in the response and no actual IPs of that family will be returned. -// If no matching IPs are found, GetNodeAddresses will return an error. -// NetworkInterfacer is injected for test purpose. -func (npa *NodePortAddresses) GetNodeAddresses(nw NetworkInterfacer) (sets.String, error) { - uniqueAddressList := sets.NewString() - - // First round of iteration to pick out `0.0.0.0/0` or `::/0` for the sake of excluding non-zero IPs. - for _, cidr := range npa.cidrStrings { - if IsZeroCIDR(cidr) { - uniqueAddressList.Insert(cidr) - } - } +// MatchAll returns true if npa matches all node IPs (of npa's given family) +func (npa *NodePortAddresses) MatchAll() bool { + return npa.matchAll +} +// GetNodeIPs return all matched node IP addresses for npa's CIDRs. If no matching +// IPs are found, it returns an empty list. +// NetworkInterfacer is injected for test purpose. +func (npa *NodePortAddresses) GetNodeIPs(nw NetworkInterfacer) ([]net.IP, error) { addrs, err := nw.InterfaceAddrs() if err != nil { return nil, fmt.Errorf("error listing all interfaceAddrs from host, error: %v", err) } - // Second round of iteration to parse IPs based on cidr. + // Use a map to dedup matches + addresses := make(map[string]net.IP) for _, cidr := range npa.cidrs { - if IsZeroCIDR(cidr.String()) { - continue - } - for _, addr := range addrs { var ip net.IP // nw.InterfaceAddrs may return net.IPAddr or net.IPNet on windows, and it will return net.IPNet on linux. @@ -104,21 +115,17 @@ func (npa *NodePortAddresses) GetNodeAddresses(nw NetworkInterfacer) (sets.Strin } if cidr.Contains(ip) { - if netutils.IsIPv6(ip) && !uniqueAddressList.Has(IPv6ZeroCIDR) { - uniqueAddressList.Insert(ip.String()) - } - if !netutils.IsIPv6(ip) && !uniqueAddressList.Has(IPv4ZeroCIDR) { - uniqueAddressList.Insert(ip.String()) - } + addresses[ip.String()] = ip } } } - if uniqueAddressList.Len() == 0 { - return nil, fmt.Errorf("no addresses found for cidrs %v", npa.cidrStrings) + ips := make([]net.IP, 0, len(addresses)) + for _, ip := range addresses { + ips = append(ips, ip) } - return uniqueAddressList, nil + return ips, nil } // ContainsIPv4Loopback returns true if npa's CIDRs contain an IPv4 loopback address. diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/util/utils.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/util/utils.go index 319daf27b4a0..44140e9fd97a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/util/utils.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/util/utils.go @@ -483,6 +483,11 @@ func (buf *LineBuffer) Bytes() []byte { return buf.b.Bytes() } +// String returns the contents of buf as a string +func (buf *LineBuffer) String() string { + return buf.b.String() +} + // Lines returns the number of lines in buf. Note that more precisely, this returns the // number of times Write() or WriteBytes() was called; it assumes that you never wrote // any newlines to the buffer yourself. diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/winkernel/OWNERS b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/winkernel/OWNERS deleted file mode 100644 index eec557c644c2..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/winkernel/OWNERS +++ /dev/null @@ -1,17 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -approvers: - - feiskyer - - sbangari - - daschott -reviewers: - - feiskyer - - sbangari - - daschott -labels: - - sig/network -emeritus_approvers: - - dineshgovindasamy - - ksubrmnn - - kumarvin123 - - madhanrm diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/winkernel/hns.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/winkernel/hns.go deleted file mode 100644 index 1f31c94e6368..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/winkernel/hns.go +++ /dev/null @@ -1,451 +0,0 @@ -//go:build windows -// +build windows - -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package winkernel - -import ( - "crypto/sha1" - "encoding/json" - "fmt" - - "github.com/Microsoft/hcsshim/hcn" - "k8s.io/klog/v2" - - "strings" -) - -type HostNetworkService interface { - getNetworkByName(name string) (*hnsNetworkInfo, error) - getAllEndpointsByNetwork(networkName string) (map[string]*endpointsInfo, error) - getEndpointByID(id string) (*endpointsInfo, error) - getEndpointByIpAddress(ip string, networkName string) (*endpointsInfo, error) - getEndpointByName(id string) (*endpointsInfo, error) - createEndpoint(ep *endpointsInfo, networkName string) (*endpointsInfo, error) - deleteEndpoint(hnsID string) error - getLoadBalancer(endpoints []endpointsInfo, flags loadBalancerFlags, sourceVip string, vip string, protocol uint16, internalPort uint16, externalPort uint16, previousLoadBalancers map[loadBalancerIdentifier]*loadBalancerInfo) (*loadBalancerInfo, error) - getAllLoadBalancers() (map[loadBalancerIdentifier]*loadBalancerInfo, error) - deleteLoadBalancer(hnsID string) error -} - -type hns struct{} - -var ( - // LoadBalancerFlagsIPv6 enables IPV6. - LoadBalancerFlagsIPv6 hcn.LoadBalancerFlags = 2 - // LoadBalancerPortMappingFlagsVipExternalIP enables VipExternalIP. - LoadBalancerPortMappingFlagsVipExternalIP hcn.LoadBalancerPortMappingFlags = 16 -) - -func (hns hns) getNetworkByName(name string) (*hnsNetworkInfo, error) { - hnsnetwork, err := hcn.GetNetworkByName(name) - if err != nil { - klog.ErrorS(err, "Error getting network by name") - return nil, err - } - - var remoteSubnets []*remoteSubnetInfo - for _, policy := range hnsnetwork.Policies { - if policy.Type == hcn.RemoteSubnetRoute { - policySettings := hcn.RemoteSubnetRoutePolicySetting{} - err = json.Unmarshal(policy.Settings, &policySettings) - if err != nil { - return nil, fmt.Errorf("failed to unmarshal Remote Subnet policy settings") - } - rs := &remoteSubnetInfo{ - destinationPrefix: policySettings.DestinationPrefix, - isolationID: policySettings.IsolationId, - providerAddress: policySettings.ProviderAddress, - drMacAddress: policySettings.DistributedRouterMacAddress, - } - remoteSubnets = append(remoteSubnets, rs) - } - } - - return &hnsNetworkInfo{ - id: hnsnetwork.Id, - name: hnsnetwork.Name, - networkType: string(hnsnetwork.Type), - remoteSubnets: remoteSubnets, - }, nil -} - -func (hns hns) getAllEndpointsByNetwork(networkName string) (map[string]*(endpointsInfo), error) { - hcnnetwork, err := hcn.GetNetworkByName(networkName) - if err != nil { - klog.ErrorS(err, "failed to get HNS network by name", "name", networkName) - return nil, err - } - endpoints, err := hcn.ListEndpointsOfNetwork(hcnnetwork.Id) - if err != nil { - return nil, fmt.Errorf("failed to list endpoints: %w", err) - } - endpointInfos := make(map[string]*(endpointsInfo)) - for _, ep := range endpoints { - // Add to map with key endpoint ID or IP address - // Storing this is expensive in terms of memory, however there is a bug in Windows Server 2019 that can cause two endpoints to be created with the same IP address. - // TODO: Store by IP only and remove any lookups by endpoint ID. - endpointInfos[ep.Id] = &endpointsInfo{ - ip: ep.IpConfigurations[0].IpAddress, - isLocal: uint32(ep.Flags&hcn.EndpointFlagsRemoteEndpoint) == 0, - macAddress: ep.MacAddress, - hnsID: ep.Id, - hns: hns, - // only ready and not terminating endpoints were added to HNS - ready: true, - serving: true, - terminating: false, - } - endpointInfos[ep.IpConfigurations[0].IpAddress] = endpointInfos[ep.Id] - - if len(ep.IpConfigurations) == 1 { - continue - } - - // If ipFamilyPolicy is RequireDualStack or PreferDualStack, then there will be 2 IPS (iPV4 and IPV6) - // in the endpoint list - endpointDualstack := &endpointsInfo{ - ip: ep.IpConfigurations[1].IpAddress, - isLocal: uint32(ep.Flags&hcn.EndpointFlagsRemoteEndpoint) == 0, - macAddress: ep.MacAddress, - hnsID: ep.Id, - hns: hns, - // only ready and not terminating endpoints were added to HNS - ready: true, - serving: true, - terminating: false, - } - endpointInfos[ep.IpConfigurations[1].IpAddress] = endpointDualstack - } - klog.V(3).InfoS("Queried endpoints from network", "network", networkName) - klog.V(5).InfoS("Queried endpoints details", "network", networkName, "endpointInfos", endpointInfos) - return endpointInfos, nil -} - -func (hns hns) getEndpointByID(id string) (*endpointsInfo, error) { - hnsendpoint, err := hcn.GetEndpointByID(id) - if err != nil { - return nil, err - } - return &endpointsInfo{ //TODO: fill out PA - ip: hnsendpoint.IpConfigurations[0].IpAddress, - isLocal: uint32(hnsendpoint.Flags&hcn.EndpointFlagsRemoteEndpoint) == 0, //TODO: Change isLocal to isRemote - macAddress: hnsendpoint.MacAddress, - hnsID: hnsendpoint.Id, - hns: hns, - }, nil -} -func (hns hns) getEndpointByIpAddress(ip string, networkName string) (*endpointsInfo, error) { - hnsnetwork, err := hcn.GetNetworkByName(networkName) - if err != nil { - klog.ErrorS(err, "Error getting network by name") - return nil, err - } - - endpoints, err := hcn.ListEndpoints() - if err != nil { - return nil, fmt.Errorf("failed to list endpoints: %w", err) - } - for _, endpoint := range endpoints { - equal := false - if endpoint.IpConfigurations != nil && len(endpoint.IpConfigurations) > 0 { - equal = endpoint.IpConfigurations[0].IpAddress == ip - - if !equal && len(endpoint.IpConfigurations) > 1 { - equal = endpoint.IpConfigurations[1].IpAddress == ip - } - } - if equal && strings.EqualFold(endpoint.HostComputeNetwork, hnsnetwork.Id) { - return &endpointsInfo{ - ip: ip, - isLocal: uint32(endpoint.Flags&hcn.EndpointFlagsRemoteEndpoint) == 0, //TODO: Change isLocal to isRemote - macAddress: endpoint.MacAddress, - hnsID: endpoint.Id, - hns: hns, - }, nil - } - } - return nil, fmt.Errorf("Endpoint %v not found on network %s", ip, networkName) -} -func (hns hns) getEndpointByName(name string) (*endpointsInfo, error) { - hnsendpoint, err := hcn.GetEndpointByName(name) - if err != nil { - return nil, err - } - return &endpointsInfo{ //TODO: fill out PA - ip: hnsendpoint.IpConfigurations[0].IpAddress, - isLocal: uint32(hnsendpoint.Flags&hcn.EndpointFlagsRemoteEndpoint) == 0, //TODO: Change isLocal to isRemote - macAddress: hnsendpoint.MacAddress, - hnsID: hnsendpoint.Id, - hns: hns, - }, nil -} -func (hns hns) createEndpoint(ep *endpointsInfo, networkName string) (*endpointsInfo, error) { - hnsNetwork, err := hcn.GetNetworkByName(networkName) - if err != nil { - return nil, err - } - var flags hcn.EndpointFlags - if !ep.isLocal { - flags |= hcn.EndpointFlagsRemoteEndpoint - } - ipConfig := &hcn.IpConfig{ - IpAddress: ep.ip, - } - hnsEndpoint := &hcn.HostComputeEndpoint{ - IpConfigurations: []hcn.IpConfig{*ipConfig}, - MacAddress: ep.macAddress, - Flags: flags, - SchemaVersion: hcn.SchemaVersion{ - Major: 2, - Minor: 0, - }, - } - - var createdEndpoint *hcn.HostComputeEndpoint - if !ep.isLocal { - if len(ep.providerAddress) != 0 { - policySettings := hcn.ProviderAddressEndpointPolicySetting{ - ProviderAddress: ep.providerAddress, - } - policySettingsJson, err := json.Marshal(policySettings) - if err != nil { - return nil, fmt.Errorf("PA Policy creation failed: %v", err) - } - paPolicy := hcn.EndpointPolicy{ - Type: hcn.NetworkProviderAddress, - Settings: policySettingsJson, - } - hnsEndpoint.Policies = append(hnsEndpoint.Policies, paPolicy) - } - createdEndpoint, err = hnsNetwork.CreateRemoteEndpoint(hnsEndpoint) - if err != nil { - return nil, err - } - } else { - createdEndpoint, err = hnsNetwork.CreateEndpoint(hnsEndpoint) - if err != nil { - return nil, err - } - } - return &endpointsInfo{ - ip: createdEndpoint.IpConfigurations[0].IpAddress, - isLocal: uint32(createdEndpoint.Flags&hcn.EndpointFlagsRemoteEndpoint) == 0, - macAddress: createdEndpoint.MacAddress, - hnsID: createdEndpoint.Id, - providerAddress: ep.providerAddress, //TODO get from createdEndpoint - hns: hns, - }, nil -} -func (hns hns) deleteEndpoint(hnsID string) error { - hnsendpoint, err := hcn.GetEndpointByID(hnsID) - if err != nil { - return err - } - err = hnsendpoint.Delete() - if err == nil { - klog.V(3).InfoS("Remote endpoint resource deleted", "hnsID", hnsID) - } - return err -} - -// findLoadBalancerID will construct a id from the provided loadbalancer fields -func findLoadBalancerID(endpoints []endpointsInfo, vip string, protocol, internalPort, externalPort uint16) (loadBalancerIdentifier, error) { - // Compute hash from backends (endpoint IDs) - hash, err := hashEndpoints(endpoints) - if err != nil { - klog.V(2).ErrorS(err, "Error hashing endpoints", "endpoints", endpoints) - return loadBalancerIdentifier{}, err - } - if len(vip) > 0 { - return loadBalancerIdentifier{protocol: protocol, internalPort: internalPort, externalPort: externalPort, vip: vip, endpointsHash: hash}, nil - } - return loadBalancerIdentifier{protocol: protocol, internalPort: internalPort, externalPort: externalPort, endpointsHash: hash}, nil -} - -func (hns hns) getAllLoadBalancers() (map[loadBalancerIdentifier]*loadBalancerInfo, error) { - lbs, err := hcn.ListLoadBalancers() - var id loadBalancerIdentifier - if err != nil { - return nil, err - } - loadBalancers := make(map[loadBalancerIdentifier]*(loadBalancerInfo)) - for _, lb := range lbs { - portMap := lb.PortMappings[0] - // Compute hash from backends (endpoint IDs) - hash, err := hashEndpoints(lb.HostComputeEndpoints) - if err != nil { - klog.V(2).ErrorS(err, "Error hashing endpoints", "policy", lb) - return nil, err - } - if len(lb.FrontendVIPs) == 0 { - // Leave VIP uninitialized - id = loadBalancerIdentifier{protocol: uint16(portMap.Protocol), internalPort: portMap.InternalPort, externalPort: portMap.ExternalPort, endpointsHash: hash} - } else { - id = loadBalancerIdentifier{protocol: uint16(portMap.Protocol), internalPort: portMap.InternalPort, externalPort: portMap.ExternalPort, vip: lb.FrontendVIPs[0], endpointsHash: hash} - } - loadBalancers[id] = &loadBalancerInfo{ - hnsID: lb.Id, - } - } - klog.V(3).InfoS("Queried load balancers", "count", len(lbs)) - return loadBalancers, nil -} - -func (hns hns) getLoadBalancer(endpoints []endpointsInfo, flags loadBalancerFlags, sourceVip string, vip string, protocol uint16, internalPort uint16, externalPort uint16, previousLoadBalancers map[loadBalancerIdentifier]*loadBalancerInfo) (*loadBalancerInfo, error) { - var id loadBalancerIdentifier - vips := []string{} - // Compute hash from backends (endpoint IDs) - hash, err := hashEndpoints(endpoints) - if err != nil { - klog.V(2).ErrorS(err, "Error hashing endpoints", "endpoints", endpoints) - return nil, err - } - if len(vip) > 0 { - id = loadBalancerIdentifier{protocol: protocol, internalPort: internalPort, externalPort: externalPort, vip: vip, endpointsHash: hash} - vips = append(vips, vip) - } else { - id = loadBalancerIdentifier{protocol: protocol, internalPort: internalPort, externalPort: externalPort, endpointsHash: hash} - } - - if lb, found := previousLoadBalancers[id]; found { - klog.V(1).InfoS("Found cached Hns loadbalancer policy resource", "policies", lb) - return lb, nil - } - - lbPortMappingFlags := hcn.LoadBalancerPortMappingFlagsNone - if flags.isILB { - lbPortMappingFlags |= hcn.LoadBalancerPortMappingFlagsILB - } - if flags.useMUX { - lbPortMappingFlags |= hcn.LoadBalancerPortMappingFlagsUseMux - } - if flags.preserveDIP { - lbPortMappingFlags |= hcn.LoadBalancerPortMappingFlagsPreserveDIP - } - if flags.localRoutedVIP { - lbPortMappingFlags |= hcn.LoadBalancerPortMappingFlagsLocalRoutedVIP - } - if flags.isVipExternalIP { - lbPortMappingFlags |= LoadBalancerPortMappingFlagsVipExternalIP - } - - lbFlags := hcn.LoadBalancerFlagsNone - if flags.isDSR { - lbFlags |= hcn.LoadBalancerFlagsDSR - } - - if flags.isIPv6 { - lbFlags |= LoadBalancerFlagsIPv6 - } - - lbDistributionType := hcn.LoadBalancerDistributionNone - - if flags.sessionAffinity { - lbDistributionType = hcn.LoadBalancerDistributionSourceIP - } - - loadBalancer := &hcn.HostComputeLoadBalancer{ - SourceVIP: sourceVip, - PortMappings: []hcn.LoadBalancerPortMapping{ - { - Protocol: uint32(protocol), - InternalPort: internalPort, - ExternalPort: externalPort, - DistributionType: lbDistributionType, - Flags: lbPortMappingFlags, - }, - }, - FrontendVIPs: vips, - SchemaVersion: hcn.SchemaVersion{ - Major: 2, - Minor: 0, - }, - Flags: lbFlags, - } - - for _, ep := range endpoints { - loadBalancer.HostComputeEndpoints = append(loadBalancer.HostComputeEndpoints, ep.hnsID) - } - - lb, err := loadBalancer.Create() - - if err != nil { - return nil, err - } - - klog.V(1).InfoS("Created Hns loadbalancer policy resource", "loadBalancer", lb) - lbInfo := &loadBalancerInfo{ - hnsID: lb.Id, - } - // Add to map of load balancers - previousLoadBalancers[id] = lbInfo - return lbInfo, err -} - -func (hns hns) deleteLoadBalancer(hnsID string) error { - lb, err := hcn.GetLoadBalancerByID(hnsID) - if err != nil { - // Return silently - return nil - } - - err = lb.Delete() - if err != nil { - // There is a bug in Windows Server 2019, that can cause the delete call to fail sometimes. We retry one more time. - // TODO: The logic in syncProxyRules should be rewritten in the future to better stage and handle a call like this failing using the policyApplied fields. - klog.V(1).ErrorS(err, "Error deleting Hns loadbalancer policy resource. Attempting one more time...", "loadBalancer", lb) - return lb.Delete() - } - return err -} - -// Calculates a hash from the given endpoint IDs. -func hashEndpoints[T string | endpointsInfo](endpoints []T) (hash [20]byte, err error) { - var id string - // Recover in case something goes wrong. Return error and null byte array. - defer func() { - if r := recover(); r != nil { - err = r.(error) - hash = [20]byte{} - } - }() - - // Iterate over endpoints, compute hash - for _, ep := range endpoints { - switch x := any(ep).(type) { - case endpointsInfo: - id = strings.ToUpper(x.hnsID) - case string: - id = x - } - if len(id) > 0 { - // We XOR the hashes of endpoints, since they are an unordered set. - // This can cause collisions, but is sufficient since we are using other keys to identify the load balancer. - hash = xor(hash, sha1.Sum(([]byte(id)))) - } - } - return -} - -func xor(b1 [20]byte, b2 [20]byte) (xorbytes [20]byte) { - for i := 0; i < 20; i++ { - xorbytes[i] = b1[i] ^ b2[i] - } - return xorbytes -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/winkernel/metrics.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/winkernel/metrics.go deleted file mode 100644 index e4357e4eceb0..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/winkernel/metrics.go +++ /dev/null @@ -1,39 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package winkernel - -import ( - "sync" - - "k8s.io/component-base/metrics/legacyregistry" - "k8s.io/kubernetes/pkg/proxy/metrics" -) - -var registerMetricsOnce sync.Once - -// RegisterMetrics registers kube-proxy metrics for Windows modes. -func RegisterMetrics() { - registerMetricsOnce.Do(func() { - legacyregistry.MustRegister(metrics.SyncProxyRulesLatency) - legacyregistry.MustRegister(metrics.SyncProxyRulesLastTimestamp) - legacyregistry.MustRegister(metrics.EndpointChangesPending) - legacyregistry.MustRegister(metrics.EndpointChangesTotal) - legacyregistry.MustRegister(metrics.ServiceChangesPending) - legacyregistry.MustRegister(metrics.ServiceChangesTotal) - legacyregistry.MustRegister(metrics.SyncProxyRulesLastQueuedTimestamp) - }) -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/winkernel/proxier.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/winkernel/proxier.go deleted file mode 100644 index 3c14451dbdba..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/winkernel/proxier.go +++ /dev/null @@ -1,1711 +0,0 @@ -//go:build windows -// +build windows - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package winkernel - -import ( - "fmt" - "net" - "os" - "strconv" - "strings" - "sync" - "sync/atomic" - "time" - - "github.com/Microsoft/hcsshim" - "github.com/Microsoft/hcsshim/hcn" - v1 "k8s.io/api/core/v1" - discovery "k8s.io/api/discovery/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/intstr" - apiutil "k8s.io/apimachinery/pkg/util/net" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apimachinery/pkg/util/wait" - utilfeature "k8s.io/apiserver/pkg/util/feature" - "k8s.io/client-go/tools/events" - "k8s.io/klog/v2" - "k8s.io/kubernetes/pkg/apis/core/v1/helper" - kubefeatures "k8s.io/kubernetes/pkg/features" - "k8s.io/kubernetes/pkg/proxy" - "k8s.io/kubernetes/pkg/proxy/apis/config" - proxyconfig "k8s.io/kubernetes/pkg/proxy/config" - "k8s.io/kubernetes/pkg/proxy/healthcheck" - "k8s.io/kubernetes/pkg/proxy/metaproxier" - "k8s.io/kubernetes/pkg/proxy/metrics" - utilproxy "k8s.io/kubernetes/pkg/proxy/util" - "k8s.io/kubernetes/pkg/util/async" - netutils "k8s.io/utils/net" -) - -// KernelCompatTester tests whether the required kernel capabilities are -// present to run the windows kernel proxier. -type KernelCompatTester interface { - IsCompatible() error -} - -// CanUseWinKernelProxier returns true if we should use the Kernel Proxier -// instead of the "classic" userspace Proxier. This is determined by checking -// the windows kernel version and for the existence of kernel features. -func CanUseWinKernelProxier(kcompat KernelCompatTester) (bool, error) { - // Check that the kernel supports what we need. - if err := kcompat.IsCompatible(); err != nil { - return false, err - } - return true, nil -} - -type WindowsKernelCompatTester struct{} - -// IsCompatible returns true if winkernel can support this mode of proxy -func (lkct WindowsKernelCompatTester) IsCompatible() error { - _, err := hcsshim.HNSListPolicyListRequest() - if err != nil { - return fmt.Errorf("Windows kernel is not compatible for Kernel mode") - } - return nil -} - -type externalIPInfo struct { - ip string - hnsID string -} - -type loadBalancerIngressInfo struct { - ip string - hnsID string - healthCheckHnsID string -} - -type loadBalancerInfo struct { - hnsID string -} - -type loadBalancerIdentifier struct { - protocol uint16 - internalPort uint16 - externalPort uint16 - vip string - endpointsHash [20]byte -} - -type loadBalancerFlags struct { - isILB bool - isDSR bool - isVipExternalIP bool - localRoutedVIP bool - useMUX bool - preserveDIP bool - sessionAffinity bool - isIPv6 bool -} - -// internal struct for string service information -type serviceInfo struct { - *proxy.BaseServicePortInfo - targetPort int - externalIPs []*externalIPInfo - loadBalancerIngressIPs []*loadBalancerIngressInfo - hnsID string - nodePorthnsID string - policyApplied bool - remoteEndpoint *endpointsInfo - hns HostNetworkService - preserveDIP bool - localTrafficDSR bool - internalTrafficLocal bool - winProxyOptimization bool -} - -type hnsNetworkInfo struct { - name string - id string - networkType string - remoteSubnets []*remoteSubnetInfo -} - -type remoteSubnetInfo struct { - destinationPrefix string - isolationID uint16 - providerAddress string - drMacAddress string -} - -const ( - NETWORK_TYPE_OVERLAY = "overlay" - // MAX_COUNT_STALE_LOADBALANCERS is the maximum number of stale loadbalancers which cleanedup in single syncproxyrules. - // If there are more stale loadbalancers to clean, it will go to next iteration of syncproxyrules. - MAX_COUNT_STALE_LOADBALANCERS = 20 -) - -func newHostNetworkService() (HostNetworkService, hcn.SupportedFeatures) { - var h HostNetworkService - supportedFeatures := hcn.GetSupportedFeatures() - if supportedFeatures.Api.V2 { - h = hns{} - } else { - panic("Windows HNS Api V2 required. This version of windows does not support API V2") - } - return h, supportedFeatures -} - -// logFormattedEndpoints will log all endpoints and its states which are taking part in endpointmap change. -// This mostly for debugging purpose and verbosity is set to 5. -func logFormattedEndpoints(logMsg string, logLevel klog.Level, svcPortName proxy.ServicePortName, eps []proxy.Endpoint) { - if klog.V(logLevel).Enabled() { - var epInfo string - for _, v := range eps { - epInfo = epInfo + fmt.Sprintf("\n %s={Ready:%v,Serving:%v,Terminating:%v,IsRemote:%v}", v.String(), v.IsReady(), v.IsServing(), v.IsTerminating(), !v.GetIsLocal()) - } - klog.V(logLevel).InfoS(logMsg, "svcPortName", svcPortName, "endpoints", epInfo) - } -} - -// This will cleanup stale load balancers which are pending delete -// in last iteration. This function will act like a self healing of stale -// loadbalancer entries. -func (proxier *Proxier) cleanupStaleLoadbalancers() { - i := 0 - countStaleLB := len(proxier.mapStaleLoadbalancers) - if countStaleLB == 0 { - return - } - klog.V(3).InfoS("Cleanup of stale loadbalancers triggered", "LB Count", countStaleLB) - for lbID := range proxier.mapStaleLoadbalancers { - i++ - if err := proxier.hns.deleteLoadBalancer(lbID); err == nil { - delete(proxier.mapStaleLoadbalancers, lbID) - } - if i == MAX_COUNT_STALE_LOADBALANCERS { - // The remaining stale loadbalancers will be cleaned up in next iteration - break - } - } - countStaleLB = len(proxier.mapStaleLoadbalancers) - if countStaleLB > 0 { - klog.V(3).InfoS("Stale loadbalancers still remaining", "LB Count", countStaleLB, "stale_lb_ids", proxier.mapStaleLoadbalancers) - } -} - -func getNetworkName(hnsNetworkName string) (string, error) { - if len(hnsNetworkName) == 0 { - klog.V(3).InfoS("Flag --network-name not set, checking environment variable") - hnsNetworkName = os.Getenv("KUBE_NETWORK") - if len(hnsNetworkName) == 0 { - return "", fmt.Errorf("Environment variable KUBE_NETWORK and network-flag not initialized") - } - } - return hnsNetworkName, nil -} - -func getNetworkInfo(hns HostNetworkService, hnsNetworkName string) (*hnsNetworkInfo, error) { - hnsNetworkInfo, err := hns.getNetworkByName(hnsNetworkName) - for err != nil { - klog.ErrorS(err, "Unable to find HNS Network specified, please check network name and CNI deployment", "hnsNetworkName", hnsNetworkName) - time.Sleep(1 * time.Second) - hnsNetworkInfo, err = hns.getNetworkByName(hnsNetworkName) - } - return hnsNetworkInfo, err -} - -func isOverlay(hnsNetworkInfo *hnsNetworkInfo) bool { - return strings.EqualFold(hnsNetworkInfo.networkType, NETWORK_TYPE_OVERLAY) -} - -// StackCompatTester tests whether the required kernel and network are dualstack capable -type StackCompatTester interface { - DualStackCompatible(networkName string) bool -} - -type DualStackCompatTester struct{} - -func (t DualStackCompatTester) DualStackCompatible(networkName string) bool { - // First tag of hcsshim that has a proper check for dual stack support is v0.8.22 due to a bug. - if err := hcn.IPv6DualStackSupported(); err != nil { - // Hcn *can* fail the query to grab the version of hcn itself (which this call will do internally before parsing - // to see if dual stack is supported), but the only time this can happen, at least that can be discerned, is if the host - // is pre-1803 and hcn didn't exist. hcsshim should truthfully return a known error if this happened that we can - // check against, and the case where 'err != this known error' would be the 'this feature isn't supported' case, as is being - // used here. For now, seeming as how nothing before ws2019 (1809) is listed as supported for k8s we can pretty much assume - // any error here isn't because the query failed, it's just that dualstack simply isn't supported on the host. With all - // that in mind, just log as info and not error to let the user know we're falling back. - klog.InfoS("This version of Windows does not support dual-stack, falling back to single-stack", "err", err.Error()) - return false - } - - // check if network is using overlay - hns, _ := newHostNetworkService() - networkName, err := getNetworkName(networkName) - if err != nil { - klog.ErrorS(err, "Unable to determine dual-stack status, falling back to single-stack") - return false - } - networkInfo, err := getNetworkInfo(hns, networkName) - if err != nil { - klog.ErrorS(err, "Unable to determine dual-stack status, falling back to single-stack") - return false - } - - if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.WinOverlay) && isOverlay(networkInfo) { - // Overlay (VXLAN) networks on Windows do not support dual-stack networking today - klog.InfoS("Winoverlay does not support dual-stack, falling back to single-stack") - return false - } - - return true -} - -// internal struct for endpoints information -type endpointsInfo struct { - ip string - port uint16 - isLocal bool - macAddress string - hnsID string - refCount *uint16 - providerAddress string - hns HostNetworkService - - // conditions - ready bool - serving bool - terminating bool -} - -// String is part of proxy.Endpoint interface. -func (info *endpointsInfo) String() string { - return net.JoinHostPort(info.ip, strconv.Itoa(int(info.port))) -} - -// GetIsLocal is part of proxy.Endpoint interface. -func (info *endpointsInfo) GetIsLocal() bool { - return info.isLocal -} - -// IsReady returns true if an endpoint is ready and not terminating. -func (info *endpointsInfo) IsReady() bool { - return info.ready -} - -// IsServing returns true if an endpoint is ready, regardless of it's terminating state. -func (info *endpointsInfo) IsServing() bool { - return info.serving -} - -// IsTerminating returns true if an endpoint is terminating. -func (info *endpointsInfo) IsTerminating() bool { - return info.terminating -} - -// GetZoneHint returns the zone hint for the endpoint. -func (info *endpointsInfo) GetZoneHints() sets.String { - return sets.String{} -} - -// IP returns just the IP part of the endpoint, it's a part of proxy.Endpoint interface. -func (info *endpointsInfo) IP() string { - return info.ip -} - -// Port returns just the Port part of the endpoint. -func (info *endpointsInfo) Port() (int, error) { - return int(info.port), nil -} - -// Equal is part of proxy.Endpoint interface. -func (info *endpointsInfo) Equal(other proxy.Endpoint) bool { - return info.String() == other.String() && info.GetIsLocal() == other.GetIsLocal() -} - -// GetNodeName returns the NodeName for this endpoint. -func (info *endpointsInfo) GetNodeName() string { - return "" -} - -// GetZone returns the Zone for this endpoint. -func (info *endpointsInfo) GetZone() string { - return "" -} - -// Uses mac prefix and IPv4 address to return a mac address -// This ensures mac addresses are unique for proper load balancing -// There is a possibility of MAC collisions but this Mac address is used for remote endpoints only -// and not sent on the wire. -func conjureMac(macPrefix string, ip net.IP) string { - if ip4 := ip.To4(); ip4 != nil { - a, b, c, d := ip4[0], ip4[1], ip4[2], ip4[3] - return fmt.Sprintf("%v-%02x-%02x-%02x-%02x", macPrefix, a, b, c, d) - } else if ip6 := ip.To16(); ip6 != nil { - a, b, c, d := ip6[15], ip6[14], ip6[13], ip6[12] - return fmt.Sprintf("%v-%02x-%02x-%02x-%02x", macPrefix, a, b, c, d) - } - return "02-11-22-33-44-55" -} - -func (proxier *Proxier) endpointsMapChange(oldEndpointsMap, newEndpointsMap proxy.EndpointsMap) { - // This will optimize remote endpoint and loadbalancer deletion based on the annotation - var svcPortMap = make(map[proxy.ServicePortName]bool) - var logLevel klog.Level = 5 - for svcPortName, eps := range oldEndpointsMap { - logFormattedEndpoints("endpointsMapChange oldEndpointsMap", logLevel, svcPortName, eps) - svcPortMap[svcPortName] = true - proxier.onEndpointsMapChange(&svcPortName, false) - } - - for svcPortName, eps := range newEndpointsMap { - logFormattedEndpoints("endpointsMapChange newEndpointsMap", logLevel, svcPortName, eps) - // redundantCleanup true means cleanup is called second time on the same svcPort - redundantCleanup := svcPortMap[svcPortName] - proxier.onEndpointsMapChange(&svcPortName, redundantCleanup) - } -} - -func (proxier *Proxier) onEndpointsMapChange(svcPortName *proxy.ServicePortName, redundantCleanup bool) { - - svc, exists := proxier.svcPortMap[*svcPortName] - - if exists { - svcInfo, ok := svc.(*serviceInfo) - - if !ok { - klog.ErrorS(nil, "Failed to cast serviceInfo", "servicePortName", svcPortName) - return - } - - if svcInfo.winProxyOptimization && redundantCleanup { - // This is a second cleanup call. - // Second cleanup on the same svcPort will be ignored if the - // winProxyOptimization is Enabled - return - } - - klog.V(3).InfoS("Endpoints are modified. Service is stale", "servicePortName", svcPortName) - svcInfo.cleanupAllPolicies(proxier.endpointsMap[*svcPortName], proxier.mapStaleLoadbalancers, true) - } else { - // If no service exists, just cleanup the remote endpoints - klog.V(3).InfoS("Endpoints are orphaned, cleaning up") - // Cleanup Endpoints references - epInfos, exists := proxier.endpointsMap[*svcPortName] - - if exists { - // Cleanup Endpoints references - for _, ep := range epInfos { - epInfo, ok := ep.(*endpointsInfo) - - if ok { - epInfo.Cleanup() - } - - } - } - } -} - -func (proxier *Proxier) serviceMapChange(previous, current proxy.ServicePortMap) { - for svcPortName := range current { - proxier.onServiceMapChange(&svcPortName) - } - - for svcPortName := range previous { - if _, ok := current[svcPortName]; ok { - continue - } - proxier.onServiceMapChange(&svcPortName) - } -} - -func (proxier *Proxier) onServiceMapChange(svcPortName *proxy.ServicePortName) { - - svc, exists := proxier.svcPortMap[*svcPortName] - - if exists { - svcInfo, ok := svc.(*serviceInfo) - - if !ok { - klog.ErrorS(nil, "Failed to cast serviceInfo", "servicePortName", svcPortName) - return - } - - klog.V(3).InfoS("Updating existing service port", "servicePortName", svcPortName, "clusterIP", svcInfo.ClusterIP(), "port", svcInfo.Port(), "protocol", svcInfo.Protocol()) - svcInfo.cleanupAllPolicies(proxier.endpointsMap[*svcPortName], proxier.mapStaleLoadbalancers, false) - } -} - -// returns a new proxy.Endpoint which abstracts a endpointsInfo -func (proxier *Proxier) newEndpointInfo(baseInfo *proxy.BaseEndpointInfo, _ *proxy.ServicePortName) proxy.Endpoint { - - portNumber, err := baseInfo.Port() - - if err != nil { - portNumber = 0 - } - - info := &endpointsInfo{ - ip: baseInfo.IP(), - port: uint16(portNumber), - isLocal: baseInfo.GetIsLocal(), - macAddress: conjureMac("02-11", netutils.ParseIPSloppy(baseInfo.IP())), - refCount: new(uint16), - hnsID: "", - hns: proxier.hns, - - ready: baseInfo.Ready, - serving: baseInfo.Serving, - terminating: baseInfo.Terminating, - } - - return info -} - -func newSourceVIP(hns HostNetworkService, network string, ip string, mac string, providerAddress string) (*endpointsInfo, error) { - hnsEndpoint := &endpointsInfo{ - ip: ip, - isLocal: true, - macAddress: mac, - providerAddress: providerAddress, - - ready: true, - serving: true, - terminating: false, - } - ep, err := hns.createEndpoint(hnsEndpoint, network) - return ep, err -} - -func (ep *endpointsInfo) DecrementRefCount() { - klog.V(3).InfoS("Decrementing Endpoint RefCount", "endpointsInfo", ep) - if !ep.GetIsLocal() && ep.refCount != nil && *ep.refCount > 0 { - *ep.refCount-- - } -} - -func (ep *endpointsInfo) Cleanup() { - klog.V(3).InfoS("Endpoint cleanup", "endpointsInfo", ep) - if !ep.GetIsLocal() && ep.refCount != nil { - *ep.refCount-- - - // Remove the remote hns endpoint, if no service is referring it - // Never delete a Local Endpoint. Local Endpoints are already created by other entities. - // Remove only remote endpoints created by this service - if *ep.refCount <= 0 && !ep.GetIsLocal() { - klog.V(4).InfoS("Removing endpoints, since no one is referencing it", "endpoint", ep) - err := ep.hns.deleteEndpoint(ep.hnsID) - if err == nil { - ep.hnsID = "" - } else { - klog.ErrorS(err, "Endpoint deletion failed", "ip", ep.IP()) - } - } - - ep.refCount = nil - } -} - -func (refCountMap endPointsReferenceCountMap) getRefCount(hnsID string) *uint16 { - refCount, exists := refCountMap[hnsID] - if !exists { - refCountMap[hnsID] = new(uint16) - refCount = refCountMap[hnsID] - } - return refCount -} - -// returns a new proxy.ServicePort which abstracts a serviceInfo -func (proxier *Proxier) newServiceInfo(port *v1.ServicePort, service *v1.Service, bsvcPortInfo *proxy.BaseServicePortInfo) proxy.ServicePort { - info := &serviceInfo{BaseServicePortInfo: bsvcPortInfo} - preserveDIP := service.Annotations["preserve-destination"] == "true" - // Annotation introduced to enable optimized loadbalancing - winProxyOptimization := !(strings.ToUpper(service.Annotations["winProxyOptimization"]) == "DISABLED") - localTrafficDSR := service.Spec.ExternalTrafficPolicy == v1.ServiceExternalTrafficPolicyLocal - var internalTrafficLocal bool - if service.Spec.InternalTrafficPolicy != nil { - internalTrafficLocal = *service.Spec.InternalTrafficPolicy == v1.ServiceInternalTrafficPolicyLocal - } - err := hcn.DSRSupported() - if err != nil { - preserveDIP = false - localTrafficDSR = false - } - // targetPort is zero if it is specified as a name in port.TargetPort. - // Its real value would be got later from endpoints. - targetPort := 0 - if port.TargetPort.Type == intstr.Int { - targetPort = port.TargetPort.IntValue() - } - - info.preserveDIP = preserveDIP - info.targetPort = targetPort - info.hns = proxier.hns - info.localTrafficDSR = localTrafficDSR - info.internalTrafficLocal = internalTrafficLocal - info.winProxyOptimization = winProxyOptimization - klog.V(3).InfoS("Flags enabled for service", "service", service.Name, "localTrafficDSR", localTrafficDSR, "internalTrafficLocal", internalTrafficLocal, "preserveDIP", preserveDIP, "winProxyOptimization", winProxyOptimization) - - for _, eip := range service.Spec.ExternalIPs { - info.externalIPs = append(info.externalIPs, &externalIPInfo{ip: eip}) - } - - for _, ingress := range service.Status.LoadBalancer.Ingress { - if netutils.ParseIPSloppy(ingress.IP) != nil { - info.loadBalancerIngressIPs = append(info.loadBalancerIngressIPs, &loadBalancerIngressInfo{ip: ingress.IP}) - } - } - return info -} - -func (network hnsNetworkInfo) findRemoteSubnetProviderAddress(ip string) string { - var providerAddress string - for _, rs := range network.remoteSubnets { - _, ipNet, err := netutils.ParseCIDRSloppy(rs.destinationPrefix) - if err != nil { - klog.ErrorS(err, "Failed to parse CIDR") - } - if ipNet.Contains(netutils.ParseIPSloppy(ip)) { - providerAddress = rs.providerAddress - } - if ip == rs.providerAddress { - providerAddress = rs.providerAddress - } - } - - return providerAddress -} - -type endPointsReferenceCountMap map[string]*uint16 - -// Proxier is an hns based proxy for connections between a localhost:lport -// and services that provide the actual backends. -type Proxier struct { - // TODO(imroc): implement node handler for winkernel proxier. - proxyconfig.NoopNodeHandler - - // endpointsChanges and serviceChanges contains all changes to endpoints and - // services that happened since policies were synced. For a single object, - // changes are accumulated, i.e. previous is state from before all of them, - // current is state after applying all of those. - endpointsChanges *proxy.EndpointChangeTracker - serviceChanges *proxy.ServiceChangeTracker - endPointsRefCount endPointsReferenceCountMap - mu sync.Mutex // protects the following fields - svcPortMap proxy.ServicePortMap - endpointsMap proxy.EndpointsMap - // endpointSlicesSynced and servicesSynced are set to true when corresponding - // objects are synced after startup. This is used to avoid updating hns policies - // with some partial data after kube-proxy restart. - endpointSlicesSynced bool - servicesSynced bool - isIPv6Mode bool - initialized int32 - syncRunner *async.BoundedFrequencyRunner // governs calls to syncProxyRules - // These are effectively const and do not need the mutex to be held. - masqueradeAll bool - masqueradeMark string - clusterCIDR string - hostname string - nodeIP net.IP - recorder events.EventRecorder - - serviceHealthServer healthcheck.ServiceHealthServer - healthzServer healthcheck.ProxierHealthUpdater - - // Since converting probabilities (floats) to strings is expensive - // and we are using only probabilities in the format of 1/n, we are - // precomputing some number of those and cache for future reuse. - precomputedProbabilities []string - - hns HostNetworkService - network hnsNetworkInfo - sourceVip string - hostMac string - isDSR bool - supportedFeatures hcn.SupportedFeatures - healthzPort int - - forwardHealthCheckVip bool - rootHnsEndpointName string - mapStaleLoadbalancers map[string]bool // This maintains entries of stale load balancers which are pending delete in last iteration -} - -type localPort struct { - desc string - ip string - port int - protocol string -} - -func (lp *localPort) String() string { - return fmt.Sprintf("%q (%s:%d/%s)", lp.desc, lp.ip, lp.port, lp.protocol) -} - -func Enum(p v1.Protocol) uint16 { - if p == v1.ProtocolTCP { - return 6 - } - if p == v1.ProtocolUDP { - return 17 - } - if p == v1.ProtocolSCTP { - return 132 - } - return 0 -} - -type closeable interface { - Close() error -} - -// Proxier implements proxy.Provider -var _ proxy.Provider = &Proxier{} - -// NewProxier returns a new Proxier -func NewProxier( - syncPeriod time.Duration, - minSyncPeriod time.Duration, - masqueradeAll bool, - masqueradeBit int, - clusterCIDR string, - hostname string, - nodeIP net.IP, - recorder events.EventRecorder, - healthzServer healthcheck.ProxierHealthUpdater, - config config.KubeProxyWinkernelConfiguration, - healthzPort int, -) (*Proxier, error) { - masqueradeValue := 1 << uint(masqueradeBit) - masqueradeMark := fmt.Sprintf("%#08x/%#08x", masqueradeValue, masqueradeValue) - - if nodeIP == nil { - klog.InfoS("Invalid nodeIP, initializing kube-proxy with 127.0.0.1 as nodeIP") - nodeIP = netutils.ParseIPSloppy("127.0.0.1") - } - - if len(clusterCIDR) == 0 { - klog.InfoS("ClusterCIDR not specified, unable to distinguish between internal and external traffic") - } - - // windows listens to all node addresses - nodePortAddresses := utilproxy.NewNodePortAddresses(nil) - serviceHealthServer := healthcheck.NewServiceHealthServer(hostname, recorder, nodePortAddresses, healthzServer) - - hns, supportedFeatures := newHostNetworkService() - hnsNetworkName, err := getNetworkName(config.NetworkName) - if err != nil { - return nil, err - } - - klog.V(3).InfoS("Cleaning up old HNS policy lists") - deleteAllHnsLoadBalancerPolicy() - - // Get HNS network information - hnsNetworkInfo, err := getNetworkInfo(hns, hnsNetworkName) - if err != nil { - return nil, err - } - - // Network could have been detected before Remote Subnet Routes are applied or ManagementIP is updated - // Sleep and update the network to include new information - if isOverlay(hnsNetworkInfo) { - time.Sleep(10 * time.Second) - hnsNetworkInfo, err = hns.getNetworkByName(hnsNetworkName) - if err != nil { - return nil, fmt.Errorf("could not find HNS network %s", hnsNetworkName) - } - } - - klog.V(1).InfoS("Hns Network loaded", "hnsNetworkInfo", hnsNetworkInfo) - isDSR := config.EnableDSR - if isDSR && !utilfeature.DefaultFeatureGate.Enabled(kubefeatures.WinDSR) { - return nil, fmt.Errorf("WinDSR feature gate not enabled") - } - err = hcn.DSRSupported() - if isDSR && err != nil { - return nil, err - } - - var sourceVip string - var hostMac string - if isOverlay(hnsNetworkInfo) { - if !utilfeature.DefaultFeatureGate.Enabled(kubefeatures.WinOverlay) { - return nil, fmt.Errorf("WinOverlay feature gate not enabled") - } - err = hcn.RemoteSubnetSupported() - if err != nil { - return nil, err - } - sourceVip = config.SourceVip - if len(sourceVip) == 0 { - return nil, fmt.Errorf("source-vip flag not set") - } - - if nodeIP.IsUnspecified() { - // attempt to get the correct ip address - klog.V(2).InfoS("Node ip was unspecified, attempting to find node ip") - nodeIP, err = apiutil.ResolveBindAddress(nodeIP) - if err != nil { - klog.InfoS("Failed to find an ip. You may need set the --bind-address flag", "err", err) - } - } - - interfaces, _ := net.Interfaces() //TODO create interfaces - for _, inter := range interfaces { - addresses, _ := inter.Addrs() - for _, addr := range addresses { - addrIP, _, _ := netutils.ParseCIDRSloppy(addr.String()) - if addrIP.String() == nodeIP.String() { - klog.V(2).InfoS("Record Host MAC address", "addr", inter.HardwareAddr) - hostMac = inter.HardwareAddr.String() - } - } - } - if len(hostMac) == 0 { - return nil, fmt.Errorf("could not find host mac address for %s", nodeIP) - } - } - - isIPv6 := netutils.IsIPv6(nodeIP) - proxier := &Proxier{ - endPointsRefCount: make(endPointsReferenceCountMap), - svcPortMap: make(proxy.ServicePortMap), - endpointsMap: make(proxy.EndpointsMap), - masqueradeAll: masqueradeAll, - masqueradeMark: masqueradeMark, - clusterCIDR: clusterCIDR, - hostname: hostname, - nodeIP: nodeIP, - recorder: recorder, - serviceHealthServer: serviceHealthServer, - healthzServer: healthzServer, - hns: hns, - network: *hnsNetworkInfo, - sourceVip: sourceVip, - hostMac: hostMac, - isDSR: isDSR, - supportedFeatures: supportedFeatures, - isIPv6Mode: isIPv6, - healthzPort: healthzPort, - rootHnsEndpointName: config.RootHnsEndpointName, - forwardHealthCheckVip: config.ForwardHealthCheckVip, - mapStaleLoadbalancers: make(map[string]bool), - } - - ipFamily := v1.IPv4Protocol - if isIPv6 { - ipFamily = v1.IPv6Protocol - } - serviceChanges := proxy.NewServiceChangeTracker(proxier.newServiceInfo, ipFamily, recorder, proxier.serviceMapChange) - endPointChangeTracker := proxy.NewEndpointChangeTracker(hostname, proxier.newEndpointInfo, ipFamily, recorder, proxier.endpointsMapChange) - proxier.endpointsChanges = endPointChangeTracker - proxier.serviceChanges = serviceChanges - - burstSyncs := 2 - klog.V(3).InfoS("Record sync param", "minSyncPeriod", minSyncPeriod, "syncPeriod", syncPeriod, "burstSyncs", burstSyncs) - proxier.syncRunner = async.NewBoundedFrequencyRunner("sync-runner", proxier.syncProxyRules, minSyncPeriod, syncPeriod, burstSyncs) - return proxier, nil -} - -func NewDualStackProxier( - syncPeriod time.Duration, - minSyncPeriod time.Duration, - masqueradeAll bool, - masqueradeBit int, - clusterCIDR string, - hostname string, - nodeIP [2]net.IP, - recorder events.EventRecorder, - healthzServer healthcheck.ProxierHealthUpdater, - config config.KubeProxyWinkernelConfiguration, - healthzPort int, -) (proxy.Provider, error) { - - // Create an ipv4 instance of the single-stack proxier - ipv4Proxier, err := NewProxier(syncPeriod, minSyncPeriod, masqueradeAll, masqueradeBit, - clusterCIDR, hostname, nodeIP[0], recorder, healthzServer, config, healthzPort) - - if err != nil { - return nil, fmt.Errorf("unable to create ipv4 proxier: %v, hostname: %s, clusterCIDR : %s, nodeIP:%v", err, hostname, clusterCIDR, nodeIP[0]) - } - - ipv6Proxier, err := NewProxier(syncPeriod, minSyncPeriod, masqueradeAll, masqueradeBit, - clusterCIDR, hostname, nodeIP[1], recorder, healthzServer, config, healthzPort) - if err != nil { - return nil, fmt.Errorf("unable to create ipv6 proxier: %v, hostname: %s, clusterCIDR : %s, nodeIP:%v", err, hostname, clusterCIDR, nodeIP[1]) - } - - // Return a meta-proxier that dispatch calls between the two - // single-stack proxier instances - return metaproxier.NewMetaProxier(ipv4Proxier, ipv6Proxier), nil -} - -// CleanupLeftovers removes all hns rules created by the Proxier -// It returns true if an error was encountered. Errors are logged. -func CleanupLeftovers() (encounteredError bool) { - // Delete all Hns Load Balancer Policies - deleteAllHnsLoadBalancerPolicy() - // TODO - // Delete all Hns Remote endpoints - - return encounteredError -} - -func (svcInfo *serviceInfo) cleanupAllPolicies(endpoints []proxy.Endpoint, mapStaleLoadbalancers map[string]bool, isEndpointChange bool) { - klog.V(3).InfoS("Service cleanup", "serviceInfo", svcInfo) - // if it's an endpoint change and winProxyOptimization annotation enable, skip lb deletion and remoteEndpoint deletion - winProxyOptimization := isEndpointChange && svcInfo.winProxyOptimization - if winProxyOptimization { - klog.V(3).InfoS("Skipped loadbalancer deletion.", "hnsID", svcInfo.hnsID, "nodePorthnsID", svcInfo.nodePorthnsID, "winProxyOptimization", svcInfo.winProxyOptimization, "isEndpointChange", isEndpointChange) - } else { - // Skip the svcInfo.policyApplied check to remove all the policies - svcInfo.deleteLoadBalancerPolicy(mapStaleLoadbalancers) - } - // Cleanup Endpoints references - for _, ep := range endpoints { - epInfo, ok := ep.(*endpointsInfo) - if ok { - if winProxyOptimization { - epInfo.DecrementRefCount() - } else { - epInfo.Cleanup() - } - } - } - if svcInfo.remoteEndpoint != nil { - svcInfo.remoteEndpoint.Cleanup() - } - - svcInfo.policyApplied = false -} - -func (svcInfo *serviceInfo) deleteLoadBalancerPolicy(mapStaleLoadbalancer map[string]bool) { - // Remove the Hns Policy corresponding to this service - hns := svcInfo.hns - if err := hns.deleteLoadBalancer(svcInfo.hnsID); err != nil { - mapStaleLoadbalancer[svcInfo.hnsID] = true - klog.V(1).ErrorS(err, "Error deleting Hns loadbalancer policy resource.", "hnsID", svcInfo.hnsID, "ClusterIP", svcInfo.ClusterIP()) - } else { - // On successful delete, remove hnsId - svcInfo.hnsID = "" - } - - if err := hns.deleteLoadBalancer(svcInfo.nodePorthnsID); err != nil { - mapStaleLoadbalancer[svcInfo.nodePorthnsID] = true - klog.V(1).ErrorS(err, "Error deleting Hns NodePort policy resource.", "hnsID", svcInfo.nodePorthnsID, "NodePort", svcInfo.NodePort()) - } else { - // On successful delete, remove hnsId - svcInfo.nodePorthnsID = "" - } - - for _, externalIP := range svcInfo.externalIPs { - mapStaleLoadbalancer[externalIP.hnsID] = true - if err := hns.deleteLoadBalancer(externalIP.hnsID); err != nil { - klog.V(1).ErrorS(err, "Error deleting Hns ExternalIP policy resource.", "hnsID", externalIP.hnsID, "IP", externalIP.ip) - } else { - // On successful delete, remove hnsId - externalIP.hnsID = "" - } - } - for _, lbIngressIP := range svcInfo.loadBalancerIngressIPs { - klog.V(3).InfoS("Loadbalancer Hns LoadBalancer delete triggered for loadBalancer Ingress resources in cleanup", "lbIngressIP", lbIngressIP) - if err := hns.deleteLoadBalancer(lbIngressIP.hnsID); err != nil { - mapStaleLoadbalancer[lbIngressIP.hnsID] = true - klog.V(1).ErrorS(err, "Error deleting Hns IngressIP policy resource.", "hnsID", lbIngressIP.hnsID, "IP", lbIngressIP.ip) - } else { - // On successful delete, remove hnsId - lbIngressIP.hnsID = "" - } - - if lbIngressIP.healthCheckHnsID != "" { - if err := hns.deleteLoadBalancer(lbIngressIP.healthCheckHnsID); err != nil { - mapStaleLoadbalancer[lbIngressIP.healthCheckHnsID] = true - klog.V(1).ErrorS(err, "Error deleting Hns IngressIP HealthCheck policy resource.", "hnsID", lbIngressIP.healthCheckHnsID, "IP", lbIngressIP.ip) - } else { - // On successful delete, remove hnsId - lbIngressIP.healthCheckHnsID = "" - } - } - } -} - -func deleteAllHnsLoadBalancerPolicy() { - plists, err := hcsshim.HNSListPolicyListRequest() - if err != nil { - return - } - for _, plist := range plists { - klog.V(3).InfoS("Remove policy", "policies", plist) - _, err = plist.Delete() - if err != nil { - klog.ErrorS(err, "Failed to delete policy list") - } - } - -} - -func getHnsNetworkInfo(hnsNetworkName string) (*hnsNetworkInfo, error) { - hnsnetwork, err := hcsshim.GetHNSNetworkByName(hnsNetworkName) - if err != nil { - klog.ErrorS(err, "Failed to get HNS Network by name") - return nil, err - } - - return &hnsNetworkInfo{ - id: hnsnetwork.Id, - name: hnsnetwork.Name, - networkType: hnsnetwork.Type, - }, nil -} - -// Sync is called to synchronize the proxier state to hns as soon as possible. -func (proxier *Proxier) Sync() { - if proxier.healthzServer != nil { - proxier.healthzServer.QueuedUpdate() - } - metrics.SyncProxyRulesLastQueuedTimestamp.SetToCurrentTime() - proxier.syncRunner.Run() -} - -// SyncLoop runs periodic work. This is expected to run as a goroutine or as the main loop of the app. It does not return. -func (proxier *Proxier) SyncLoop() { - // Update healthz timestamp at beginning in case Sync() never succeeds. - if proxier.healthzServer != nil { - proxier.healthzServer.Updated() - } - // synthesize "last change queued" time as the informers are syncing. - metrics.SyncProxyRulesLastQueuedTimestamp.SetToCurrentTime() - proxier.syncRunner.Loop(wait.NeverStop) -} - -func (proxier *Proxier) setInitialized(value bool) { - var initialized int32 - if value { - initialized = 1 - } - atomic.StoreInt32(&proxier.initialized, initialized) -} - -func (proxier *Proxier) isInitialized() bool { - return atomic.LoadInt32(&proxier.initialized) > 0 -} - -// OnServiceAdd is called whenever creation of new service object -// is observed. -func (proxier *Proxier) OnServiceAdd(service *v1.Service) { - proxier.OnServiceUpdate(nil, service) -} - -// OnServiceUpdate is called whenever modification of an existing -// service object is observed. -func (proxier *Proxier) OnServiceUpdate(oldService, service *v1.Service) { - if proxier.serviceChanges.Update(oldService, service) && proxier.isInitialized() { - proxier.Sync() - } -} - -// OnServiceDelete is called whenever deletion of an existing service -// object is observed. -func (proxier *Proxier) OnServiceDelete(service *v1.Service) { - proxier.OnServiceUpdate(service, nil) -} - -// OnServiceSynced is called once all the initial event handlers were -// called and the state is fully propagated to local cache. -func (proxier *Proxier) OnServiceSynced() { - proxier.mu.Lock() - proxier.servicesSynced = true - proxier.setInitialized(proxier.endpointSlicesSynced) - proxier.mu.Unlock() - - // Sync unconditionally - this is called once per lifetime. - proxier.syncProxyRules() -} - -func shouldSkipService(svcName types.NamespacedName, service *v1.Service) bool { - // if ClusterIP is "None" or empty, skip proxying - if !helper.IsServiceIPSet(service) { - klog.V(3).InfoS("Skipping service due to clusterIP", "serviceName", svcName, "clusterIP", service.Spec.ClusterIP) - return true - } - // Even if ClusterIP is set, ServiceTypeExternalName services don't get proxied - if service.Spec.Type == v1.ServiceTypeExternalName { - klog.V(3).InfoS("Skipping service due to Type=ExternalName", "serviceName", svcName) - return true - } - return false -} - -// OnEndpointSliceAdd is called whenever creation of a new endpoint slice object -// is observed. -func (proxier *Proxier) OnEndpointSliceAdd(endpointSlice *discovery.EndpointSlice) { - if proxier.endpointsChanges.EndpointSliceUpdate(endpointSlice, false) && proxier.isInitialized() { - proxier.Sync() - } -} - -// OnEndpointSliceUpdate is called whenever modification of an existing endpoint -// slice object is observed. -func (proxier *Proxier) OnEndpointSliceUpdate(_, endpointSlice *discovery.EndpointSlice) { - if proxier.endpointsChanges.EndpointSliceUpdate(endpointSlice, false) && proxier.isInitialized() { - proxier.Sync() - } -} - -// OnEndpointSliceDelete is called whenever deletion of an existing endpoint slice -// object is observed. -func (proxier *Proxier) OnEndpointSliceDelete(endpointSlice *discovery.EndpointSlice) { - if proxier.endpointsChanges.EndpointSliceUpdate(endpointSlice, true) && proxier.isInitialized() { - proxier.Sync() - } -} - -// OnEndpointSlicesSynced is called once all the initial event handlers were -// called and the state is fully propagated to local cache. -func (proxier *Proxier) OnEndpointSlicesSynced() { - proxier.mu.Lock() - proxier.endpointSlicesSynced = true - proxier.setInitialized(proxier.servicesSynced) - proxier.mu.Unlock() - - // Sync unconditionally - this is called once per lifetime. - proxier.syncProxyRules() -} - -func (proxier *Proxier) cleanupAllPolicies() { - for svcName, svc := range proxier.svcPortMap { - svcInfo, ok := svc.(*serviceInfo) - if !ok { - klog.ErrorS(nil, "Failed to cast serviceInfo", "serviceName", svcName) - continue - } - svcInfo.cleanupAllPolicies(proxier.endpointsMap[svcName], proxier.mapStaleLoadbalancers, false) - } -} - -func isNetworkNotFoundError(err error) bool { - if err == nil { - return false - } - if _, ok := err.(hcn.NetworkNotFoundError); ok { - return true - } - if _, ok := err.(hcsshim.NetworkNotFoundError); ok { - return true - } - return false -} - -// isAllEndpointsTerminating function will return true if all the endpoints are terminating. -// If atleast one is not terminating, then return false -func (proxier *Proxier) isAllEndpointsTerminating(svcName proxy.ServicePortName, isLocalTrafficDSR bool) bool { - for _, epInfo := range proxier.endpointsMap[svcName] { - ep, ok := epInfo.(*endpointsInfo) - if !ok { - continue - } - if isLocalTrafficDSR && !ep.GetIsLocal() { - // KEP-1669: Ignore remote endpoints when the ExternalTrafficPolicy is Local (DSR Mode) - continue - } - // If Readiness Probe fails and pod is not under delete, then - // the state of the endpoint will be - Ready:False, Serving:False, Terminating:False - if !ep.IsReady() && !ep.IsTerminating() { - // Ready:false, Terminating:False, ignore - continue - } - if !ep.IsTerminating() { - return false - } - } - return true -} - -// isAllEndpointsNonServing function will return true if all the endpoints are non serving. -// If atleast one is serving, then return false -func (proxier *Proxier) isAllEndpointsNonServing(svcName proxy.ServicePortName, isLocalTrafficDSR bool) bool { - for _, epInfo := range proxier.endpointsMap[svcName] { - ep, ok := epInfo.(*endpointsInfo) - if !ok { - continue - } - if isLocalTrafficDSR && !ep.GetIsLocal() { - continue - } - if ep.IsServing() { - return false - } - } - return true -} - -// updateQueriedEndpoints updates the queriedEndpoints map with newly created endpoint details -func updateQueriedEndpoints(newHnsEndpoint *endpointsInfo, queriedEndpoints map[string]*endpointsInfo) { - // store newly created endpoints in queriedEndpoints - queriedEndpoints[newHnsEndpoint.hnsID] = newHnsEndpoint - queriedEndpoints[newHnsEndpoint.ip] = newHnsEndpoint -} - -// This is where all of the hns save/restore calls happen. -// assumes proxier.mu is held -func (proxier *Proxier) syncProxyRules() { - proxier.mu.Lock() - defer proxier.mu.Unlock() - - // don't sync rules till we've received services and endpoints - if !proxier.isInitialized() { - klog.V(2).InfoS("Not syncing hns until Services and Endpoints have been received from master") - return - } - - // Keep track of how long syncs take. - start := time.Now() - defer func() { - metrics.SyncProxyRulesLatency.Observe(metrics.SinceInSeconds(start)) - klog.V(4).InfoS("Syncing proxy rules complete", "elapsed", time.Since(start)) - }() - - hnsNetworkName := proxier.network.name - hns := proxier.hns - - var gatewayHnsendpoint *endpointsInfo - if proxier.forwardHealthCheckVip { - gatewayHnsendpoint, _ = hns.getEndpointByName(proxier.rootHnsEndpointName) - } - - prevNetworkID := proxier.network.id - updatedNetwork, err := hns.getNetworkByName(hnsNetworkName) - if updatedNetwork == nil || updatedNetwork.id != prevNetworkID || isNetworkNotFoundError(err) { - klog.InfoS("The HNS network is not present or has changed since the last sync, please check the CNI deployment", "hnsNetworkName", hnsNetworkName) - proxier.cleanupAllPolicies() - if updatedNetwork != nil { - proxier.network = *updatedNetwork - } - return - } - - // We assume that if this was called, we really want to sync them, - // even if nothing changed in the meantime. In other words, callers are - // responsible for detecting no-op changes and not calling this function. - serviceUpdateResult := proxier.svcPortMap.Update(proxier.serviceChanges) - endpointUpdateResult := proxier.endpointsMap.Update(proxier.endpointsChanges) - - deletedUDPClusterIPs := serviceUpdateResult.DeletedUDPClusterIPs - // merge stale services gathered from updateEndpointsMap - for _, svcPortName := range endpointUpdateResult.NewlyActiveUDPServices { - if svcInfo, ok := proxier.svcPortMap[svcPortName]; ok && svcInfo != nil && svcInfo.Protocol() == v1.ProtocolUDP { - klog.V(2).InfoS("Newly-active UDP service may have stale conntrack entries", "servicePortName", svcPortName) - deletedUDPClusterIPs.Insert(svcInfo.ClusterIP().String()) - } - } - // Query HNS for endpoints and load balancers - queriedEndpoints, err := hns.getAllEndpointsByNetwork(hnsNetworkName) - if err != nil { - klog.ErrorS(err, "Querying HNS for endpoints failed") - return - } - if queriedEndpoints == nil { - klog.V(4).InfoS("No existing endpoints found in HNS") - queriedEndpoints = make(map[string]*(endpointsInfo)) - } - queriedLoadBalancers, err := hns.getAllLoadBalancers() - if queriedLoadBalancers == nil { - klog.V(4).InfoS("No existing load balancers found in HNS") - queriedLoadBalancers = make(map[loadBalancerIdentifier]*(loadBalancerInfo)) - } - if err != nil { - klog.ErrorS(err, "Querying HNS for load balancers failed") - return - } - if strings.EqualFold(proxier.network.networkType, NETWORK_TYPE_OVERLAY) { - if _, ok := queriedEndpoints[proxier.sourceVip]; !ok { - _, err = newSourceVIP(hns, hnsNetworkName, proxier.sourceVip, proxier.hostMac, proxier.nodeIP.String()) - if err != nil { - klog.ErrorS(err, "Source Vip endpoint creation failed") - return - } - } - } - - klog.V(3).InfoS("Syncing Policies") - - // Program HNS by adding corresponding policies for each service. - for svcName, svc := range proxier.svcPortMap { - svcInfo, ok := svc.(*serviceInfo) - if !ok { - klog.ErrorS(nil, "Failed to cast serviceInfo", "serviceName", svcName) - continue - } - - if svcInfo.policyApplied { - klog.V(4).InfoS("Policy already applied", "serviceInfo", svcInfo) - continue - } - - if strings.EqualFold(proxier.network.networkType, NETWORK_TYPE_OVERLAY) { - serviceVipEndpoint := queriedEndpoints[svcInfo.ClusterIP().String()] - if serviceVipEndpoint == nil { - klog.V(4).InfoS("No existing remote endpoint", "IP", svcInfo.ClusterIP()) - hnsEndpoint := &endpointsInfo{ - ip: svcInfo.ClusterIP().String(), - isLocal: false, - macAddress: proxier.hostMac, - providerAddress: proxier.nodeIP.String(), - } - - newHnsEndpoint, err := hns.createEndpoint(hnsEndpoint, hnsNetworkName) - if err != nil { - klog.ErrorS(err, "Remote endpoint creation failed for service VIP") - continue - } - - newHnsEndpoint.refCount = proxier.endPointsRefCount.getRefCount(newHnsEndpoint.hnsID) - *newHnsEndpoint.refCount++ - svcInfo.remoteEndpoint = newHnsEndpoint - updateQueriedEndpoints(newHnsEndpoint, queriedEndpoints) - } - } - - var hnsEndpoints []endpointsInfo - var hnsLocalEndpoints []endpointsInfo - klog.V(4).InfoS("Applying Policy", "serviceInfo", svcName) - // Create Remote endpoints for every endpoint, corresponding to the service - containsPublicIP := false - containsNodeIP := false - var allEndpointsTerminating, allEndpointsNonServing bool - someEndpointsServing := true - - if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.ProxyTerminatingEndpoints) && len(svcInfo.loadBalancerIngressIPs) > 0 { - // Check should be done only if comes under the feature gate or enabled - // The check should be done only if Spec.Type == Loadbalancer. - allEndpointsTerminating = proxier.isAllEndpointsTerminating(svcName, svcInfo.localTrafficDSR) - allEndpointsNonServing = proxier.isAllEndpointsNonServing(svcName, svcInfo.localTrafficDSR) - someEndpointsServing = !allEndpointsNonServing - klog.V(4).InfoS("Terminating status checked for all endpoints", "svcClusterIP", svcInfo.ClusterIP(), "allEndpointsTerminating", allEndpointsTerminating, "allEndpointsNonServing", allEndpointsNonServing, "localTrafficDSR", svcInfo.localTrafficDSR) - } else { - klog.V(4).InfoS("Skipped terminating status check for all endpoints", "svcClusterIP", svcInfo.ClusterIP(), "proxyEndpointsFeatureGateEnabled", utilfeature.DefaultFeatureGate.Enabled(kubefeatures.ProxyTerminatingEndpoints), "ingressLBCount", len(svcInfo.loadBalancerIngressIPs)) - } - - for _, epInfo := range proxier.endpointsMap[svcName] { - ep, ok := epInfo.(*endpointsInfo) - if !ok { - klog.ErrorS(nil, "Failed to cast endpointsInfo", "serviceName", svcName) - continue - } - - if svcInfo.internalTrafficLocal && svcInfo.localTrafficDSR && !ep.GetIsLocal() { - // No need to use or create remote endpoint when internal and external traffic policy is remote - klog.V(3).InfoS("Skipping the endpoint. Both internalTraffic and external traffic policies are local", "EpIP", ep.ip, " EpPort", ep.port) - continue - } - - if someEndpointsServing { - - if !allEndpointsTerminating && !ep.IsReady() { - klog.V(3).InfoS("Skipping the endpoint for LB creation. Endpoint is either not ready or all not all endpoints are terminating", "EpIP", ep.ip, " EpPort", ep.port, "allEndpointsTerminating", allEndpointsTerminating, "IsEpReady", ep.IsReady()) - continue - } - if !ep.IsServing() { - klog.V(3).InfoS("Skipping the endpoint for LB creation. Endpoint is not serving", "EpIP", ep.ip, " EpPort", ep.port, "IsEpServing", ep.IsServing()) - continue - } - - } - - var newHnsEndpoint *endpointsInfo - hnsNetworkName := proxier.network.name - var err error - - // targetPort is zero if it is specified as a name in port.TargetPort, so the real port should be got from endpoints. - // Note that hcsshim.AddLoadBalancer() doesn't support endpoints with different ports, so only port from first endpoint is used. - // TODO(feiskyer): add support of different endpoint ports after hcsshim.AddLoadBalancer() add that. - if svcInfo.targetPort == 0 { - svcInfo.targetPort = int(ep.port) - } - // There is a bug in Windows Server 2019 that can cause two endpoints to be created with the same IP address, so we need to check using endpoint ID first. - // TODO: Remove lookup by endpoint ID, and use the IP address only, so we don't need to maintain multiple keys for lookup. - if len(ep.hnsID) > 0 { - newHnsEndpoint = queriedEndpoints[ep.hnsID] - } - - if newHnsEndpoint == nil { - // First check if an endpoint resource exists for this IP, on the current host - // A Local endpoint could exist here already - // A remote endpoint was already created and proxy was restarted - newHnsEndpoint = queriedEndpoints[ep.IP()] - } - - if newHnsEndpoint == nil { - if ep.GetIsLocal() { - klog.ErrorS(err, "Local endpoint not found: on network", "ip", ep.IP(), "hnsNetworkName", hnsNetworkName) - continue - } - - if strings.EqualFold(proxier.network.networkType, NETWORK_TYPE_OVERLAY) { - klog.InfoS("Updating network to check for new remote subnet policies", "networkName", proxier.network.name) - networkName := proxier.network.name - updatedNetwork, err := hns.getNetworkByName(networkName) - if err != nil { - klog.ErrorS(err, "Unable to find HNS Network specified, please check network name and CNI deployment", "hnsNetworkName", hnsNetworkName) - proxier.cleanupAllPolicies() - return - } - proxier.network = *updatedNetwork - providerAddress := proxier.network.findRemoteSubnetProviderAddress(ep.IP()) - if len(providerAddress) == 0 { - klog.InfoS("Could not find provider address, assuming it is a public IP", "IP", ep.IP()) - providerAddress = proxier.nodeIP.String() - } - - hnsEndpoint := &endpointsInfo{ - ip: ep.ip, - isLocal: false, - macAddress: conjureMac("02-11", netutils.ParseIPSloppy(ep.ip)), - providerAddress: providerAddress, - } - - newHnsEndpoint, err = hns.createEndpoint(hnsEndpoint, hnsNetworkName) - if err != nil { - klog.ErrorS(err, "Remote endpoint creation failed", "endpointsInfo", hnsEndpoint) - continue - } - updateQueriedEndpoints(newHnsEndpoint, queriedEndpoints) - } else { - - hnsEndpoint := &endpointsInfo{ - ip: ep.ip, - isLocal: false, - macAddress: ep.macAddress, - } - - newHnsEndpoint, err = hns.createEndpoint(hnsEndpoint, hnsNetworkName) - if err != nil { - klog.ErrorS(err, "Remote endpoint creation failed") - continue - } - updateQueriedEndpoints(newHnsEndpoint, queriedEndpoints) - } - } - // For Overlay networks 'SourceVIP' on an Load balancer Policy can either be chosen as - // a) Source VIP configured on kube-proxy (or) - // b) Node IP of the current node - // - // For L2Bridge network the Source VIP is always the NodeIP of the current node and the same - // would be configured on kube-proxy as SourceVIP - // - // The logic for choosing the SourceVIP in Overlay networks is based on the backend endpoints: - // a) Endpoints are any IP's outside the cluster ==> Choose NodeIP as the SourceVIP - // b) Endpoints are IP addresses of a remote node => Choose NodeIP as the SourceVIP - // c) Everything else (Local POD's, Remote POD's, Node IP of current node) ==> Choose the configured SourceVIP - if strings.EqualFold(proxier.network.networkType, NETWORK_TYPE_OVERLAY) && !ep.GetIsLocal() { - providerAddress := proxier.network.findRemoteSubnetProviderAddress(ep.IP()) - - isNodeIP := (ep.IP() == providerAddress) - isPublicIP := (len(providerAddress) == 0) - klog.InfoS("Endpoint on overlay network", "ip", ep.IP(), "hnsNetworkName", hnsNetworkName, "isNodeIP", isNodeIP, "isPublicIP", isPublicIP) - - containsNodeIP = containsNodeIP || isNodeIP - containsPublicIP = containsPublicIP || isPublicIP - } - - // Save the hnsId for reference - klog.V(1).InfoS("Hns endpoint resource", "endpointsInfo", newHnsEndpoint) - - hnsEndpoints = append(hnsEndpoints, *newHnsEndpoint) - if newHnsEndpoint.GetIsLocal() { - hnsLocalEndpoints = append(hnsLocalEndpoints, *newHnsEndpoint) - } else { - // We only share the refCounts for remote endpoints - ep.refCount = proxier.endPointsRefCount.getRefCount(newHnsEndpoint.hnsID) - *ep.refCount++ - } - - ep.hnsID = newHnsEndpoint.hnsID - - klog.V(3).InfoS("Endpoint resource found", "endpointsInfo", ep) - } - - klog.V(3).InfoS("Associated endpoints for service", "endpointsInfo", hnsEndpoints, "serviceName", svcName) - - if len(svcInfo.hnsID) > 0 { - // This should not happen - klog.InfoS("Load Balancer already exists -- Debug ", "hnsID", svcInfo.hnsID) - } - - // In ETP:Cluster, if all endpoints are under termination, - // it will have serving and terminating, else only ready and serving - if len(hnsEndpoints) == 0 { - if svcInfo.winProxyOptimization { - // Deleting loadbalancers when there are no endpoints to serve. - klog.V(3).InfoS("Cleanup existing ", "endpointsInfo", hnsEndpoints, "serviceName", svcName) - svcInfo.deleteLoadBalancerPolicy(proxier.mapStaleLoadbalancers) - } - klog.ErrorS(nil, "Endpoint information not available for service, not applying any policy", "serviceName", svcName) - continue - } - - klog.V(4).InfoS("Trying to apply Policies for service", "serviceInfo", svcInfo) - var hnsLoadBalancer *loadBalancerInfo - var sourceVip = proxier.sourceVip - if containsPublicIP || containsNodeIP { - sourceVip = proxier.nodeIP.String() - } - - sessionAffinityClientIP := svcInfo.SessionAffinityType() == v1.ServiceAffinityClientIP - if sessionAffinityClientIP && !proxier.supportedFeatures.SessionAffinity { - klog.InfoS("Session Affinity is not supported on this version of Windows") - } - - endpointsAvailableForLB := !allEndpointsTerminating && !allEndpointsNonServing - proxier.deleteExistingLoadBalancer(hns, svcInfo.winProxyOptimization, &svcInfo.hnsID, sourceVip, Enum(svcInfo.Protocol()), uint16(svcInfo.targetPort), uint16(svcInfo.Port()), hnsEndpoints, queriedLoadBalancers) - - // clusterIPEndpoints is the endpoint list used for creating ClusterIP loadbalancer. - clusterIPEndpoints := hnsEndpoints - if svcInfo.internalTrafficLocal { - // Take local endpoints for clusterip loadbalancer when internal traffic policy is local. - clusterIPEndpoints = hnsLocalEndpoints - } - - if len(clusterIPEndpoints) > 0 { - - // If all endpoints are terminating, then no need to create Cluster IP LoadBalancer - // Cluster IP LoadBalancer creation - hnsLoadBalancer, err := hns.getLoadBalancer( - clusterIPEndpoints, - loadBalancerFlags{isDSR: proxier.isDSR, isIPv6: proxier.isIPv6Mode, sessionAffinity: sessionAffinityClientIP}, - sourceVip, - svcInfo.ClusterIP().String(), - Enum(svcInfo.Protocol()), - uint16(svcInfo.targetPort), - uint16(svcInfo.Port()), - queriedLoadBalancers, - ) - if err != nil { - klog.ErrorS(err, "Policy creation failed") - continue - } - - svcInfo.hnsID = hnsLoadBalancer.hnsID - klog.V(3).InfoS("Hns LoadBalancer resource created for cluster ip resources", "clusterIP", svcInfo.ClusterIP(), "hnsID", hnsLoadBalancer.hnsID) - - } else { - klog.V(3).InfoS("Skipped creating Hns LoadBalancer for cluster ip resources. Reason : all endpoints are terminating", "clusterIP", svcInfo.ClusterIP(), "nodeport", svcInfo.NodePort(), "allEndpointsTerminating", allEndpointsTerminating) - } - - // If nodePort is specified, user should be able to use nodeIP:nodePort to reach the backend endpoints - if svcInfo.NodePort() > 0 { - // If the preserve-destination service annotation is present, we will disable routing mesh for NodePort. - // This means that health services can use Node Port without falsely getting results from a different node. - nodePortEndpoints := hnsEndpoints - if svcInfo.preserveDIP || svcInfo.localTrafficDSR { - nodePortEndpoints = hnsLocalEndpoints - } - - proxier.deleteExistingLoadBalancer(hns, svcInfo.winProxyOptimization, &svcInfo.nodePorthnsID, sourceVip, Enum(svcInfo.Protocol()), uint16(svcInfo.targetPort), uint16(svcInfo.Port()), nodePortEndpoints, queriedLoadBalancers) - - if len(nodePortEndpoints) > 0 && endpointsAvailableForLB { - // If all endpoints are in terminating stage, then no need to create Node Port LoadBalancer - hnsLoadBalancer, err := hns.getLoadBalancer( - nodePortEndpoints, - loadBalancerFlags{isVipExternalIP: true, isDSR: svcInfo.localTrafficDSR, localRoutedVIP: true, sessionAffinity: sessionAffinityClientIP, isIPv6: proxier.isIPv6Mode}, - sourceVip, - "", - Enum(svcInfo.Protocol()), - uint16(svcInfo.targetPort), - uint16(svcInfo.NodePort()), - queriedLoadBalancers, - ) - if err != nil { - klog.ErrorS(err, "Policy creation failed") - continue - } - - svcInfo.nodePorthnsID = hnsLoadBalancer.hnsID - klog.V(3).InfoS("Hns LoadBalancer resource created for nodePort resources", "clusterIP", svcInfo.ClusterIP(), "nodeport", svcInfo.NodePort(), "hnsID", hnsLoadBalancer.hnsID) - } else { - klog.V(3).InfoS("Skipped creating Hns LoadBalancer for nodePort resources", "clusterIP", svcInfo.ClusterIP(), "nodeport", svcInfo.NodePort(), "allEndpointsTerminating", allEndpointsTerminating) - } - } - - // Create a Load Balancer Policy for each external IP - for _, externalIP := range svcInfo.externalIPs { - // Disable routing mesh if ExternalTrafficPolicy is set to local - externalIPEndpoints := hnsEndpoints - if svcInfo.localTrafficDSR { - externalIPEndpoints = hnsLocalEndpoints - } - - proxier.deleteExistingLoadBalancer(hns, svcInfo.winProxyOptimization, &externalIP.hnsID, sourceVip, Enum(svcInfo.Protocol()), uint16(svcInfo.targetPort), uint16(svcInfo.Port()), externalIPEndpoints, queriedLoadBalancers) - - if len(externalIPEndpoints) > 0 && endpointsAvailableForLB { - // If all endpoints are in terminating stage, then no need to External IP LoadBalancer - // Try loading existing policies, if already available - hnsLoadBalancer, err = hns.getLoadBalancer( - externalIPEndpoints, - loadBalancerFlags{isVipExternalIP: true, isDSR: svcInfo.localTrafficDSR, sessionAffinity: sessionAffinityClientIP, isIPv6: proxier.isIPv6Mode}, - sourceVip, - externalIP.ip, - Enum(svcInfo.Protocol()), - uint16(svcInfo.targetPort), - uint16(svcInfo.Port()), - queriedLoadBalancers, - ) - if err != nil { - klog.ErrorS(err, "Policy creation failed") - continue - } - externalIP.hnsID = hnsLoadBalancer.hnsID - klog.V(3).InfoS("Hns LoadBalancer resource created for externalIP resources", "externalIP", externalIP, "hnsID", hnsLoadBalancer.hnsID) - } else { - klog.V(3).InfoS("Skipped creating Hns LoadBalancer for externalIP resources", "externalIP", externalIP, "allEndpointsTerminating", allEndpointsTerminating) - } - } - // Create a Load Balancer Policy for each loadbalancer ingress - for _, lbIngressIP := range svcInfo.loadBalancerIngressIPs { - // Try loading existing policies, if already available - lbIngressEndpoints := hnsEndpoints - if svcInfo.preserveDIP || svcInfo.localTrafficDSR { - lbIngressEndpoints = hnsLocalEndpoints - } - - proxier.deleteExistingLoadBalancer(hns, svcInfo.winProxyOptimization, &lbIngressIP.hnsID, sourceVip, Enum(svcInfo.Protocol()), uint16(svcInfo.targetPort), uint16(svcInfo.Port()), lbIngressEndpoints, queriedLoadBalancers) - - if len(lbIngressEndpoints) > 0 { - hnsLoadBalancer, err := hns.getLoadBalancer( - lbIngressEndpoints, - loadBalancerFlags{isVipExternalIP: true, isDSR: svcInfo.preserveDIP || svcInfo.localTrafficDSR, useMUX: svcInfo.preserveDIP, preserveDIP: svcInfo.preserveDIP, sessionAffinity: sessionAffinityClientIP, isIPv6: proxier.isIPv6Mode}, - sourceVip, - lbIngressIP.ip, - Enum(svcInfo.Protocol()), - uint16(svcInfo.targetPort), - uint16(svcInfo.Port()), - queriedLoadBalancers, - ) - if err != nil { - klog.ErrorS(err, "Policy creation failed") - continue - } - lbIngressIP.hnsID = hnsLoadBalancer.hnsID - klog.V(3).InfoS("Hns LoadBalancer resource created for loadBalancer Ingress resources", "lbIngressIP", lbIngressIP) - } else { - klog.V(3).InfoS("Skipped creating Hns LoadBalancer for loadBalancer Ingress resources", "lbIngressIP", lbIngressIP) - } - - if proxier.forwardHealthCheckVip && gatewayHnsendpoint != nil && endpointsAvailableForLB { - // Avoid creating health check loadbalancer if all the endpoints are terminating - nodeport := proxier.healthzPort - if svcInfo.HealthCheckNodePort() != 0 { - nodeport = svcInfo.HealthCheckNodePort() - } - - proxier.deleteExistingLoadBalancer(hns, svcInfo.winProxyOptimization, &lbIngressIP.healthCheckHnsID, sourceVip, Enum(svcInfo.Protocol()), uint16(svcInfo.targetPort), uint16(svcInfo.Port()), []endpointsInfo{*gatewayHnsendpoint}, queriedLoadBalancers) - - hnsHealthCheckLoadBalancer, err := hns.getLoadBalancer( - []endpointsInfo{*gatewayHnsendpoint}, - loadBalancerFlags{isDSR: false, useMUX: svcInfo.preserveDIP, preserveDIP: svcInfo.preserveDIP}, - sourceVip, - lbIngressIP.ip, - Enum(svcInfo.Protocol()), - uint16(nodeport), - uint16(nodeport), - queriedLoadBalancers, - ) - if err != nil { - klog.ErrorS(err, "Policy creation failed") - continue - } - lbIngressIP.healthCheckHnsID = hnsHealthCheckLoadBalancer.hnsID - klog.V(3).InfoS("Hns Health Check LoadBalancer resource created for loadBalancer Ingress resources", "ip", lbIngressIP) - } else { - klog.V(3).InfoS("Skipped creating Hns Health Check LoadBalancer for loadBalancer Ingress resources", "ip", lbIngressIP, "allEndpointsTerminating", allEndpointsTerminating) - } - } - svcInfo.policyApplied = true - klog.V(2).InfoS("Policy successfully applied for service", "serviceInfo", svcInfo) - } - - if proxier.healthzServer != nil { - proxier.healthzServer.Updated() - } - metrics.SyncProxyRulesLastTimestamp.SetToCurrentTime() - - // Update service healthchecks. The endpoints list might include services that are - // not "OnlyLocal", but the services list will not, and the serviceHealthServer - // will just drop those endpoints. - if err := proxier.serviceHealthServer.SyncServices(proxier.svcPortMap.HealthCheckNodePorts()); err != nil { - klog.ErrorS(err, "Error syncing healthcheck services") - } - if err := proxier.serviceHealthServer.SyncEndpoints(proxier.endpointsMap.LocalReadyEndpoints()); err != nil { - klog.ErrorS(err, "Error syncing healthcheck endpoints") - } - - // Finish housekeeping. - // TODO: these could be made more consistent. - for _, svcIP := range deletedUDPClusterIPs.UnsortedList() { - // TODO : Check if this is required to cleanup stale services here - klog.V(5).InfoS("Pending delete stale service IP connections", "IP", svcIP) - } - - // remove stale endpoint refcount entries - for hnsID, referenceCount := range proxier.endPointsRefCount { - if *referenceCount <= 0 { - klog.V(3).InfoS("Deleting unreferenced remote endpoint", "hnsID", hnsID) - proxier.hns.deleteEndpoint(hnsID) - delete(proxier.endPointsRefCount, hnsID) - } - } - // This will cleanup stale load balancers which are pending delete - // in last iteration - proxier.cleanupStaleLoadbalancers() -} - -// deleteExistingLoadBalancer checks whether loadbalancer delete is needed or not. -// If it is needed, the function will delete the existing loadbalancer and return true, else false. -func (proxier *Proxier) deleteExistingLoadBalancer(hns HostNetworkService, winProxyOptimization bool, lbHnsID *string, sourceVip string, protocol, intPort, extPort uint16, endpoints []endpointsInfo, queriedLoadBalancers map[loadBalancerIdentifier]*loadBalancerInfo) bool { - - if !winProxyOptimization || *lbHnsID == "" { - // Loadbalancer delete not needed - return false - } - - lbID, lbIdErr := findLoadBalancerID( - endpoints, - sourceVip, - protocol, - intPort, - extPort, - ) - - if lbIdErr != nil { - return proxier.deleteLoadBalancer(hns, lbHnsID) - } - - if _, ok := queriedLoadBalancers[lbID]; ok { - // The existing loadbalancer in the system is same as what we try to delete and recreate. So we skip deleting. - return false - } - - return proxier.deleteLoadBalancer(hns, lbHnsID) -} - -func (proxier *Proxier) deleteLoadBalancer(hns HostNetworkService, lbHnsID *string) bool { - klog.V(3).InfoS("Hns LoadBalancer delete triggered for loadBalancer resources", "lbHnsID", *lbHnsID) - if err := hns.deleteLoadBalancer(*lbHnsID); err != nil { - // This will be cleanup by cleanupStaleLoadbalancer fnction. - proxier.mapStaleLoadbalancers[*lbHnsID] = true - } - *lbHnsID = "" - return true -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/registry/core/service/allocator/interfaces.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/registry/core/service/allocator/interfaces.go index 41f966858b7b..1328425ffcfb 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/registry/core/service/allocator/interfaces.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/registry/core/service/allocator/interfaces.go @@ -23,11 +23,7 @@ type Interface interface { AllocateNext() (int, bool, error) Release(int) error ForEach(func(int)) - - // For testing Has(int) bool - - // For testing Free() int // Destroy shuts down all internal structures. diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/scheme/scheme.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/scheme/scheme.go index 375b49b569ac..9121eff657c1 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/scheme/scheme.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/scheme/scheme.go @@ -22,7 +22,6 @@ import ( utilruntime "k8s.io/apimachinery/pkg/util/runtime" config "k8s.io/kubernetes/pkg/scheduler/apis/config" configv1 "k8s.io/kubernetes/pkg/scheduler/apis/config/v1" - configv1beta2 "k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta2" configv1beta3 "k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta3" ) @@ -41,12 +40,10 @@ func init() { // AddToScheme builds the kubescheduler scheme using all known versions of the kubescheduler api. func AddToScheme(scheme *runtime.Scheme) { utilruntime.Must(config.AddToScheme(scheme)) - utilruntime.Must(configv1beta2.AddToScheme(scheme)) utilruntime.Must(configv1beta3.AddToScheme(scheme)) utilruntime.Must(configv1.AddToScheme(scheme)) utilruntime.Must(scheme.SetVersionPriority( configv1.SchemeGroupVersion, configv1beta3.SchemeGroupVersion, - configv1beta2.SchemeGroupVersion, )) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/types.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/types.go index 8db4e35c987e..a87a8d1d6b46 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/types.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/types.go @@ -96,6 +96,12 @@ type KubeSchedulerConfiguration struct { // Extenders are the list of scheduler extenders, each holding the values of how to communicate // with the extender. These extenders are shared by all scheduler profiles. Extenders []Extender + + // DelayCacheUntilActive specifies when to start caching. If this is true and leader election is enabled, + // the scheduler will wait to fill informer caches until it is the leader. Doing so will have slower + // failover with the benefit of lower memory overhead while waiting to become leader. + // Defaults to false. + DelayCacheUntilActive bool } // KubeSchedulerProfile is a scheduling profile. @@ -247,13 +253,13 @@ func (p *Plugins) Names() []string { p.Permit, p.QueueSort, } - n := sets.NewString() + n := sets.New[string]() for _, e := range extensions { for _, pg := range e.Enabled { n.Insert(pg.Name) } } - return n.List() + return sets.List(n) } // Extender holds the parameters used to communicate with the extender. If a verb is unspecified/empty, diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/default_plugins.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/default_plugins.go index bdb8f78bf502..a7d5a602619d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/default_plugins.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/default_plugins.go @@ -110,7 +110,7 @@ type pluginIndex struct { } func mergePluginSet(logger klog.Logger, defaultPluginSet, customPluginSet v1.PluginSet) v1.PluginSet { - disabledPlugins := sets.NewString() + disabledPlugins := sets.New[string]() enabledCustomPlugins := make(map[string]pluginIndex) // replacedPluginIndex is a set of index of plugins, which have replaced the default plugins. replacedPluginIndex := sets.NewInt() diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/defaults.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/defaults.go index e6ec0ab613b1..6746f23a9620 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/defaults.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/defaults.go @@ -57,13 +57,13 @@ func pluginsNames(p *configv1.Plugins) []string { p.PreEnqueue, p.QueueSort, } - n := sets.NewString() + n := sets.New[string]() for _, e := range extensions { for _, pg := range e.Enabled { n.Insert(pg.Name) } } - return n.List() + return sets.List(n) } func setDefaults_KubeSchedulerProfile(logger klog.Logger, prof *configv1.KubeSchedulerProfile) { @@ -71,7 +71,7 @@ func setDefaults_KubeSchedulerProfile(logger klog.Logger, prof *configv1.KubeSch prof.Plugins = mergePlugins(logger, getDefaultPlugins(), prof.Plugins) // Set default plugin configs. scheme := GetPluginArgConversionScheme() - existingConfigs := sets.NewString() + existingConfigs := sets.New[string]() for j := range prof.PluginConfig { existingConfigs.Insert(prof.PluginConfig[j].Name) args := prof.PluginConfig[j].Args.Object diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/zz_generated.conversion.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/zz_generated.conversion.go index dd8d9b231094..ca9be957b426 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/zz_generated.conversion.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/zz_generated.conversion.go @@ -429,6 +429,7 @@ func autoConvert_v1_KubeSchedulerConfiguration_To_config_KubeSchedulerConfigurat out.Profiles = nil } out.Extenders = *(*[]config.Extender)(unsafe.Pointer(&in.Extenders)) + out.DelayCacheUntilActive = in.DelayCacheUntilActive return nil } @@ -466,6 +467,7 @@ func autoConvert_config_KubeSchedulerConfiguration_To_v1_KubeSchedulerConfigurat out.Profiles = nil } out.Extenders = *(*[]v1.Extender)(unsafe.Pointer(&in.Extenders)) + out.DelayCacheUntilActive = in.DelayCacheUntilActive return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta2/conversion.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta2/conversion.go deleted file mode 100644 index c0d89d75ee91..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta2/conversion.go +++ /dev/null @@ -1,117 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta2 - -import ( - "fmt" - "sync" - - "k8s.io/apimachinery/pkg/conversion" - "k8s.io/apimachinery/pkg/runtime" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/kube-scheduler/config/v1beta2" - "k8s.io/kubernetes/pkg/scheduler/apis/config" -) - -var ( - // pluginArgConversionScheme is a scheme with internal and v1beta2 registered, - // used for defaulting/converting typed PluginConfig Args. - // Access via getPluginArgConversionScheme() - pluginArgConversionScheme *runtime.Scheme - initPluginArgConversionScheme sync.Once -) - -func GetPluginArgConversionScheme() *runtime.Scheme { - initPluginArgConversionScheme.Do(func() { - // set up the scheme used for plugin arg conversion - pluginArgConversionScheme = runtime.NewScheme() - utilruntime.Must(AddToScheme(pluginArgConversionScheme)) - utilruntime.Must(config.AddToScheme(pluginArgConversionScheme)) - }) - return pluginArgConversionScheme -} - -func Convert_v1beta2_KubeSchedulerConfiguration_To_config_KubeSchedulerConfiguration(in *v1beta2.KubeSchedulerConfiguration, out *config.KubeSchedulerConfiguration, s conversion.Scope) error { - if err := autoConvert_v1beta2_KubeSchedulerConfiguration_To_config_KubeSchedulerConfiguration(in, out, s); err != nil { - return err - } - return convertToInternalPluginConfigArgs(out) -} - -// convertToInternalPluginConfigArgs converts PluginConfig#Args into internal -// types using a scheme, after applying defaults. -func convertToInternalPluginConfigArgs(out *config.KubeSchedulerConfiguration) error { - scheme := GetPluginArgConversionScheme() - for i := range out.Profiles { - prof := &out.Profiles[i] - for j := range prof.PluginConfig { - args := prof.PluginConfig[j].Args - if args == nil { - continue - } - if _, isUnknown := args.(*runtime.Unknown); isUnknown { - continue - } - internalArgs, err := scheme.ConvertToVersion(args, config.SchemeGroupVersion) - if err != nil { - return fmt.Errorf("converting .Profiles[%d].PluginConfig[%d].Args into internal type: %w", i, j, err) - } - prof.PluginConfig[j].Args = internalArgs - } - } - return nil -} - -func Convert_config_KubeSchedulerConfiguration_To_v1beta2_KubeSchedulerConfiguration(in *config.KubeSchedulerConfiguration, out *v1beta2.KubeSchedulerConfiguration, s conversion.Scope) error { - if err := autoConvert_config_KubeSchedulerConfiguration_To_v1beta2_KubeSchedulerConfiguration(in, out, s); err != nil { - return err - } - return convertToExternalPluginConfigArgs(out) -} - -// convertToExternalPluginConfigArgs converts PluginConfig#Args into -// external (versioned) types using a scheme. -func convertToExternalPluginConfigArgs(out *v1beta2.KubeSchedulerConfiguration) error { - scheme := GetPluginArgConversionScheme() - for i := range out.Profiles { - for j := range out.Profiles[i].PluginConfig { - args := out.Profiles[i].PluginConfig[j].Args - if args.Object == nil { - continue - } - if _, isUnknown := args.Object.(*runtime.Unknown); isUnknown { - continue - } - externalArgs, err := scheme.ConvertToVersion(args.Object, SchemeGroupVersion) - if err != nil { - return err - } - out.Profiles[i].PluginConfig[j].Args.Object = externalArgs - } - } - return nil -} - -// Convert_config_KubeSchedulerProfile_To_v1beta2_KubeSchedulerProfile uses auto coversion by -// ignoring per profile PercentageOfNodesToScore. -func Convert_config_KubeSchedulerProfile_To_v1beta2_KubeSchedulerProfile(in *config.KubeSchedulerProfile, out *v1beta2.KubeSchedulerProfile, s conversion.Scope) error { - return autoConvert_config_KubeSchedulerProfile_To_v1beta2_KubeSchedulerProfile(in, out, s) -} - -func Convert_config_Plugins_To_v1beta2_Plugins(in *config.Plugins, out *v1beta2.Plugins, s conversion.Scope) error { - return autoConvert_config_Plugins_To_v1beta2_Plugins(in, out, s) -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta2/default_plugins.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta2/default_plugins.go deleted file mode 100644 index 6bd76b704aeb..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta2/default_plugins.go +++ /dev/null @@ -1,189 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta2 - -import ( - "k8s.io/apimachinery/pkg/util/sets" - utilfeature "k8s.io/apiserver/pkg/util/feature" - "k8s.io/klog/v2" - "k8s.io/kube-scheduler/config/v1beta2" - "k8s.io/kubernetes/pkg/features" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/names" - "k8s.io/utils/pointer" -) - -// getDefaultPlugins returns the default set of plugins. -func getDefaultPlugins() *v1beta2.Plugins { - plugins := &v1beta2.Plugins{ - QueueSort: v1beta2.PluginSet{ - Enabled: []v1beta2.Plugin{ - {Name: names.PrioritySort}, - }, - }, - PreFilter: v1beta2.PluginSet{ - Enabled: []v1beta2.Plugin{ - {Name: names.NodeResourcesFit}, - {Name: names.NodePorts}, - {Name: names.VolumeRestrictions}, - {Name: names.PodTopologySpread}, - {Name: names.InterPodAffinity}, - {Name: names.VolumeBinding}, - {Name: names.VolumeZone}, - {Name: names.NodeAffinity}, - }, - }, - Filter: v1beta2.PluginSet{ - Enabled: []v1beta2.Plugin{ - {Name: names.NodeUnschedulable}, - {Name: names.NodeName}, - {Name: names.TaintToleration}, - {Name: names.NodeAffinity}, - {Name: names.NodePorts}, - {Name: names.NodeResourcesFit}, - {Name: names.VolumeRestrictions}, - {Name: names.EBSLimits}, - {Name: names.GCEPDLimits}, - {Name: names.NodeVolumeLimits}, - {Name: names.AzureDiskLimits}, - {Name: names.VolumeBinding}, - {Name: names.VolumeZone}, - {Name: names.PodTopologySpread}, - {Name: names.InterPodAffinity}, - }, - }, - PostFilter: v1beta2.PluginSet{ - Enabled: []v1beta2.Plugin{ - {Name: names.DefaultPreemption}, - }, - }, - PreScore: v1beta2.PluginSet{ - Enabled: []v1beta2.Plugin{ - {Name: names.InterPodAffinity}, - {Name: names.PodTopologySpread}, - {Name: names.TaintToleration}, - {Name: names.NodeAffinity}, - {Name: names.NodeResourcesFit}, - {Name: names.NodeResourcesBalancedAllocation}, - }, - }, - Score: v1beta2.PluginSet{ - Enabled: []v1beta2.Plugin{ - {Name: names.NodeResourcesBalancedAllocation, Weight: pointer.Int32(1)}, - {Name: names.ImageLocality, Weight: pointer.Int32(1)}, - {Name: names.InterPodAffinity, Weight: pointer.Int32(1)}, - {Name: names.NodeResourcesFit, Weight: pointer.Int32(1)}, - {Name: names.NodeAffinity, Weight: pointer.Int32(1)}, - // Weight is doubled because: - // - This is a score coming from user preference. - // - It makes its signal comparable to NodeResourcesFit.LeastAllocated. - {Name: names.PodTopologySpread, Weight: pointer.Int32(2)}, - {Name: names.TaintToleration, Weight: pointer.Int32(1)}, - }, - }, - Reserve: v1beta2.PluginSet{ - Enabled: []v1beta2.Plugin{ - {Name: names.VolumeBinding}, - }, - }, - PreBind: v1beta2.PluginSet{ - Enabled: []v1beta2.Plugin{ - {Name: names.VolumeBinding}, - }, - }, - Bind: v1beta2.PluginSet{ - Enabled: []v1beta2.Plugin{ - {Name: names.DefaultBinder}, - }, - }, - } - applyFeatureGates(plugins) - - return plugins -} - -func applyFeatureGates(config *v1beta2.Plugins) { - if utilfeature.DefaultFeatureGate.Enabled(features.VolumeCapacityPriority) { - config.Score.Enabled = append(config.Score.Enabled, v1beta2.Plugin{Name: names.VolumeBinding, Weight: pointer.Int32(1)}) - } - if utilfeature.DefaultFeatureGate.Enabled(features.PodSchedulingReadiness) { - config.PreEnqueue.Enabled = append(config.PreEnqueue.Enabled, v1beta2.Plugin{Name: names.SchedulingGates}) - } -} - -// mergePlugins merges the custom set into the given default one, handling disabled sets. -func mergePlugins(defaultPlugins, customPlugins *v1beta2.Plugins) *v1beta2.Plugins { - if customPlugins == nil { - return defaultPlugins - } - - defaultPlugins.QueueSort = mergePluginSet(defaultPlugins.QueueSort, customPlugins.QueueSort) - defaultPlugins.PreFilter = mergePluginSet(defaultPlugins.PreFilter, customPlugins.PreFilter) - defaultPlugins.Filter = mergePluginSet(defaultPlugins.Filter, customPlugins.Filter) - defaultPlugins.PostFilter = mergePluginSet(defaultPlugins.PostFilter, customPlugins.PostFilter) - defaultPlugins.PreScore = mergePluginSet(defaultPlugins.PreScore, customPlugins.PreScore) - defaultPlugins.Score = mergePluginSet(defaultPlugins.Score, customPlugins.Score) - defaultPlugins.Reserve = mergePluginSet(defaultPlugins.Reserve, customPlugins.Reserve) - defaultPlugins.Permit = mergePluginSet(defaultPlugins.Permit, customPlugins.Permit) - defaultPlugins.PreBind = mergePluginSet(defaultPlugins.PreBind, customPlugins.PreBind) - defaultPlugins.Bind = mergePluginSet(defaultPlugins.Bind, customPlugins.Bind) - defaultPlugins.PostBind = mergePluginSet(defaultPlugins.PostBind, customPlugins.PostBind) - return defaultPlugins -} - -type pluginIndex struct { - index int - plugin v1beta2.Plugin -} - -func mergePluginSet(defaultPluginSet, customPluginSet v1beta2.PluginSet) v1beta2.PluginSet { - disabledPlugins := sets.NewString() - enabledCustomPlugins := make(map[string]pluginIndex) - // replacedPluginIndex is a set of index of plugins, which have replaced the default plugins. - replacedPluginIndex := sets.NewInt() - for _, disabledPlugin := range customPluginSet.Disabled { - disabledPlugins.Insert(disabledPlugin.Name) - } - for index, enabledPlugin := range customPluginSet.Enabled { - enabledCustomPlugins[enabledPlugin.Name] = pluginIndex{index, enabledPlugin} - } - var enabledPlugins []v1beta2.Plugin - if !disabledPlugins.Has("*") { - for _, defaultEnabledPlugin := range defaultPluginSet.Enabled { - if disabledPlugins.Has(defaultEnabledPlugin.Name) { - continue - } - // The default plugin is explicitly re-configured, update the default plugin accordingly. - if customPlugin, ok := enabledCustomPlugins[defaultEnabledPlugin.Name]; ok { - klog.InfoS("Default plugin is explicitly re-configured; overriding", "plugin", defaultEnabledPlugin.Name) - // Update the default plugin in place to preserve order. - defaultEnabledPlugin = customPlugin.plugin - replacedPluginIndex.Insert(customPlugin.index) - } - enabledPlugins = append(enabledPlugins, defaultEnabledPlugin) - } - } - - // Append all the custom plugins which haven't replaced any default plugins. - // Note: duplicated custom plugins will still be appended here. - // If so, the instantiation of scheduler framework will detect it and abort. - for index, plugin := range customPluginSet.Enabled { - if !replacedPluginIndex.Has(index) { - enabledPlugins = append(enabledPlugins, plugin) - } - } - return v1beta2.PluginSet{Enabled: enabledPlugins} -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta2/defaults.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta2/defaults.go deleted file mode 100644 index 1bf12080673d..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta2/defaults.go +++ /dev/null @@ -1,241 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta2 - -import ( - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apiserver/pkg/util/feature" - componentbaseconfigv1alpha1 "k8s.io/component-base/config/v1alpha1" - "k8s.io/kube-scheduler/config/v1beta2" - "k8s.io/kubernetes/pkg/features" - "k8s.io/kubernetes/pkg/scheduler/apis/config" - "k8s.io/utils/pointer" -) - -var defaultResourceSpec = []v1beta2.ResourceSpec{ - {Name: string(v1.ResourceCPU), Weight: 1}, - {Name: string(v1.ResourceMemory), Weight: 1}, -} - -func addDefaultingFuncs(scheme *runtime.Scheme) error { - return RegisterDefaults(scheme) -} - -func pluginsNames(p *v1beta2.Plugins) []string { - if p == nil { - return nil - } - extensions := []v1beta2.PluginSet{ - p.PreFilter, - p.Filter, - p.PostFilter, - p.Reserve, - p.PreScore, - p.Score, - p.PreBind, - p.Bind, - p.PostBind, - p.Permit, - p.QueueSort, - } - n := sets.NewString() - for _, e := range extensions { - for _, pg := range e.Enabled { - n.Insert(pg.Name) - } - } - return n.List() -} - -func setDefaults_KubeSchedulerProfile(prof *v1beta2.KubeSchedulerProfile) { - // Set default plugins. - prof.Plugins = mergePlugins(getDefaultPlugins(), prof.Plugins) - - // Set default plugin configs. - scheme := GetPluginArgConversionScheme() - existingConfigs := sets.NewString() - for j := range prof.PluginConfig { - existingConfigs.Insert(prof.PluginConfig[j].Name) - args := prof.PluginConfig[j].Args.Object - if _, isUnknown := args.(*runtime.Unknown); isUnknown { - continue - } - scheme.Default(args) - } - - // Append default configs for plugins that didn't have one explicitly set. - for _, name := range pluginsNames(prof.Plugins) { - if existingConfigs.Has(name) { - continue - } - gvk := v1beta2.SchemeGroupVersion.WithKind(name + "Args") - args, err := scheme.New(gvk) - if err != nil { - // This plugin is out-of-tree or doesn't require configuration. - continue - } - scheme.Default(args) - args.GetObjectKind().SetGroupVersionKind(gvk) - prof.PluginConfig = append(prof.PluginConfig, v1beta2.PluginConfig{ - Name: name, - Args: runtime.RawExtension{Object: args}, - }) - } -} - -// SetDefaults_KubeSchedulerConfiguration sets additional defaults -func SetDefaults_KubeSchedulerConfiguration(obj *v1beta2.KubeSchedulerConfiguration) { - if obj.Parallelism == nil { - obj.Parallelism = pointer.Int32(16) - } - - if len(obj.Profiles) == 0 { - obj.Profiles = append(obj.Profiles, v1beta2.KubeSchedulerProfile{}) - } - // Only apply a default scheduler name when there is a single profile. - // Validation will ensure that every profile has a non-empty unique name. - if len(obj.Profiles) == 1 && obj.Profiles[0].SchedulerName == nil { - obj.Profiles[0].SchedulerName = pointer.String(v1.DefaultSchedulerName) - } - - // Add the default set of plugins and apply the configuration. - for i := range obj.Profiles { - prof := &obj.Profiles[i] - setDefaults_KubeSchedulerProfile(prof) - } - - if obj.PercentageOfNodesToScore == nil { - obj.PercentageOfNodesToScore = pointer.Int32(config.DefaultPercentageOfNodesToScore) - } - - if len(obj.LeaderElection.ResourceLock) == 0 { - // Use lease-based leader election to reduce cost. - // We migrated for EndpointsLease lock in 1.17 and starting in 1.20 we - // migrated to Lease lock. - obj.LeaderElection.ResourceLock = "leases" - } - if len(obj.LeaderElection.ResourceNamespace) == 0 { - obj.LeaderElection.ResourceNamespace = v1beta2.SchedulerDefaultLockObjectNamespace - } - if len(obj.LeaderElection.ResourceName) == 0 { - obj.LeaderElection.ResourceName = v1beta2.SchedulerDefaultLockObjectName - } - - if len(obj.ClientConnection.ContentType) == 0 { - obj.ClientConnection.ContentType = "application/vnd.kubernetes.protobuf" - } - // Scheduler has an opinion about QPS/Burst, setting specific defaults for itself, instead of generic settings. - if obj.ClientConnection.QPS == 0.0 { - obj.ClientConnection.QPS = 50.0 - } - if obj.ClientConnection.Burst == 0 { - obj.ClientConnection.Burst = 100 - } - - // Use the default LeaderElectionConfiguration options - componentbaseconfigv1alpha1.RecommendedDefaultLeaderElectionConfiguration(&obj.LeaderElection) - - if obj.PodInitialBackoffSeconds == nil { - obj.PodInitialBackoffSeconds = pointer.Int64(1) - } - - if obj.PodMaxBackoffSeconds == nil { - obj.PodMaxBackoffSeconds = pointer.Int64(10) - } - - // Enable profiling by default in the scheduler - if obj.EnableProfiling == nil { - obj.EnableProfiling = pointer.Bool(true) - } - - // Enable contention profiling by default if profiling is enabled - if *obj.EnableProfiling && obj.EnableContentionProfiling == nil { - obj.EnableContentionProfiling = pointer.Bool(true) - } -} - -func SetDefaults_DefaultPreemptionArgs(obj *v1beta2.DefaultPreemptionArgs) { - if obj.MinCandidateNodesPercentage == nil { - obj.MinCandidateNodesPercentage = pointer.Int32(10) - } - if obj.MinCandidateNodesAbsolute == nil { - obj.MinCandidateNodesAbsolute = pointer.Int32(100) - } -} - -func SetDefaults_InterPodAffinityArgs(obj *v1beta2.InterPodAffinityArgs) { - if obj.HardPodAffinityWeight == nil { - obj.HardPodAffinityWeight = pointer.Int32(1) - } -} - -func SetDefaults_VolumeBindingArgs(obj *v1beta2.VolumeBindingArgs) { - if obj.BindTimeoutSeconds == nil { - obj.BindTimeoutSeconds = pointer.Int64(600) - } - if len(obj.Shape) == 0 && feature.DefaultFeatureGate.Enabled(features.VolumeCapacityPriority) { - obj.Shape = []v1beta2.UtilizationShapePoint{ - { - Utilization: 0, - Score: 0, - }, - { - Utilization: 100, - Score: int32(config.MaxCustomPriorityScore), - }, - } - } -} - -func SetDefaults_NodeResourcesBalancedAllocationArgs(obj *v1beta2.NodeResourcesBalancedAllocationArgs) { - if len(obj.Resources) == 0 { - obj.Resources = defaultResourceSpec - return - } - // If the weight is not set or it is explicitly set to 0, then apply the default weight(1) instead. - for i := range obj.Resources { - if obj.Resources[i].Weight == 0 { - obj.Resources[i].Weight = 1 - } - } -} - -func SetDefaults_PodTopologySpreadArgs(obj *v1beta2.PodTopologySpreadArgs) { - if obj.DefaultingType == "" { - obj.DefaultingType = v1beta2.SystemDefaulting - } -} - -func SetDefaults_NodeResourcesFitArgs(obj *v1beta2.NodeResourcesFitArgs) { - if obj.ScoringStrategy == nil { - obj.ScoringStrategy = &v1beta2.ScoringStrategy{ - Type: v1beta2.ScoringStrategyType(config.LeastAllocated), - Resources: defaultResourceSpec, - } - } - if len(obj.ScoringStrategy.Resources) == 0 { - // If no resources specified, use the default set. - obj.ScoringStrategy.Resources = append(obj.ScoringStrategy.Resources, defaultResourceSpec...) - } - for i := range obj.ScoringStrategy.Resources { - if obj.ScoringStrategy.Resources[i].Weight == 0 { - obj.ScoringStrategy.Resources[i].Weight = 1 - } - } -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta2/doc.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta2/doc.go deleted file mode 100644 index 0c8b9e17e84f..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta2/doc.go +++ /dev/null @@ -1,24 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package -// +k8s:conversion-gen=k8s.io/kubernetes/pkg/scheduler/apis/config -// +k8s:conversion-gen-external-types=k8s.io/kube-scheduler/config/v1beta2 -// +k8s:defaulter-gen=TypeMeta -// +k8s:defaulter-gen-input=k8s.io/kube-scheduler/config/v1beta2 -// +groupName=kubescheduler.config.k8s.io - -package v1beta2 // import "k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta2" diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta2/register.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta2/register.go deleted file mode 100644 index b8ca76de5f5a..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta2/register.go +++ /dev/null @@ -1,42 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta2 - -import ( - "k8s.io/kube-scheduler/config/v1beta2" -) - -// GroupName is the group name used in this package -const GroupName = v1beta2.GroupName - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = v1beta2.SchemeGroupVersion - -var ( - // localSchemeBuilder extends the SchemeBuilder instance with the external types. In this package, - // defaulting and conversion init funcs are registered as well. - localSchemeBuilder = &v1beta2.SchemeBuilder - // AddToScheme is a global function that registers this API group & version to a scheme - AddToScheme = localSchemeBuilder.AddToScheme -) - -func init() { - // We only register manually written functions here. The registration of the - // generated functions takes place in the generated files. The separation - // makes the code compile even when the generated files are missing. - localSchemeBuilder.Register(addDefaultingFuncs) -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta2/zz_generated.conversion.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta2/zz_generated.conversion.go deleted file mode 100644 index e5c9fb73fa65..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta2/zz_generated.conversion.go +++ /dev/null @@ -1,945 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by conversion-gen. DO NOT EDIT. - -package v1beta2 - -import ( - unsafe "unsafe" - - corev1 "k8s.io/api/core/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - v1alpha1 "k8s.io/component-base/config/v1alpha1" - v1beta2 "k8s.io/kube-scheduler/config/v1beta2" - config "k8s.io/kubernetes/pkg/scheduler/apis/config" -) - -func init() { - localSchemeBuilder.Register(RegisterConversions) -} - -// RegisterConversions adds conversion functions to the given scheme. -// Public to allow building arbitrary schemes. -func RegisterConversions(s *runtime.Scheme) error { - if err := s.AddGeneratedConversionFunc((*v1beta2.DefaultPreemptionArgs)(nil), (*config.DefaultPreemptionArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_DefaultPreemptionArgs_To_config_DefaultPreemptionArgs(a.(*v1beta2.DefaultPreemptionArgs), b.(*config.DefaultPreemptionArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.DefaultPreemptionArgs)(nil), (*v1beta2.DefaultPreemptionArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_DefaultPreemptionArgs_To_v1beta2_DefaultPreemptionArgs(a.(*config.DefaultPreemptionArgs), b.(*v1beta2.DefaultPreemptionArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.Extender)(nil), (*config.Extender)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_Extender_To_config_Extender(a.(*v1beta2.Extender), b.(*config.Extender), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.Extender)(nil), (*v1beta2.Extender)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_Extender_To_v1beta2_Extender(a.(*config.Extender), b.(*v1beta2.Extender), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.ExtenderManagedResource)(nil), (*config.ExtenderManagedResource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_ExtenderManagedResource_To_config_ExtenderManagedResource(a.(*v1beta2.ExtenderManagedResource), b.(*config.ExtenderManagedResource), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.ExtenderManagedResource)(nil), (*v1beta2.ExtenderManagedResource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_ExtenderManagedResource_To_v1beta2_ExtenderManagedResource(a.(*config.ExtenderManagedResource), b.(*v1beta2.ExtenderManagedResource), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.ExtenderTLSConfig)(nil), (*config.ExtenderTLSConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_ExtenderTLSConfig_To_config_ExtenderTLSConfig(a.(*v1beta2.ExtenderTLSConfig), b.(*config.ExtenderTLSConfig), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.ExtenderTLSConfig)(nil), (*v1beta2.ExtenderTLSConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_ExtenderTLSConfig_To_v1beta2_ExtenderTLSConfig(a.(*config.ExtenderTLSConfig), b.(*v1beta2.ExtenderTLSConfig), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.InterPodAffinityArgs)(nil), (*config.InterPodAffinityArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_InterPodAffinityArgs_To_config_InterPodAffinityArgs(a.(*v1beta2.InterPodAffinityArgs), b.(*config.InterPodAffinityArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.InterPodAffinityArgs)(nil), (*v1beta2.InterPodAffinityArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_InterPodAffinityArgs_To_v1beta2_InterPodAffinityArgs(a.(*config.InterPodAffinityArgs), b.(*v1beta2.InterPodAffinityArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.KubeSchedulerProfile)(nil), (*config.KubeSchedulerProfile)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_KubeSchedulerProfile_To_config_KubeSchedulerProfile(a.(*v1beta2.KubeSchedulerProfile), b.(*config.KubeSchedulerProfile), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.NodeAffinityArgs)(nil), (*config.NodeAffinityArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_NodeAffinityArgs_To_config_NodeAffinityArgs(a.(*v1beta2.NodeAffinityArgs), b.(*config.NodeAffinityArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.NodeAffinityArgs)(nil), (*v1beta2.NodeAffinityArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_NodeAffinityArgs_To_v1beta2_NodeAffinityArgs(a.(*config.NodeAffinityArgs), b.(*v1beta2.NodeAffinityArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.NodeResourcesBalancedAllocationArgs)(nil), (*config.NodeResourcesBalancedAllocationArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_NodeResourcesBalancedAllocationArgs_To_config_NodeResourcesBalancedAllocationArgs(a.(*v1beta2.NodeResourcesBalancedAllocationArgs), b.(*config.NodeResourcesBalancedAllocationArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.NodeResourcesBalancedAllocationArgs)(nil), (*v1beta2.NodeResourcesBalancedAllocationArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_NodeResourcesBalancedAllocationArgs_To_v1beta2_NodeResourcesBalancedAllocationArgs(a.(*config.NodeResourcesBalancedAllocationArgs), b.(*v1beta2.NodeResourcesBalancedAllocationArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.NodeResourcesFitArgs)(nil), (*config.NodeResourcesFitArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_NodeResourcesFitArgs_To_config_NodeResourcesFitArgs(a.(*v1beta2.NodeResourcesFitArgs), b.(*config.NodeResourcesFitArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.NodeResourcesFitArgs)(nil), (*v1beta2.NodeResourcesFitArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_NodeResourcesFitArgs_To_v1beta2_NodeResourcesFitArgs(a.(*config.NodeResourcesFitArgs), b.(*v1beta2.NodeResourcesFitArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.Plugin)(nil), (*config.Plugin)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_Plugin_To_config_Plugin(a.(*v1beta2.Plugin), b.(*config.Plugin), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.Plugin)(nil), (*v1beta2.Plugin)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_Plugin_To_v1beta2_Plugin(a.(*config.Plugin), b.(*v1beta2.Plugin), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.PluginConfig)(nil), (*config.PluginConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_PluginConfig_To_config_PluginConfig(a.(*v1beta2.PluginConfig), b.(*config.PluginConfig), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.PluginConfig)(nil), (*v1beta2.PluginConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_PluginConfig_To_v1beta2_PluginConfig(a.(*config.PluginConfig), b.(*v1beta2.PluginConfig), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.PluginSet)(nil), (*config.PluginSet)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_PluginSet_To_config_PluginSet(a.(*v1beta2.PluginSet), b.(*config.PluginSet), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.PluginSet)(nil), (*v1beta2.PluginSet)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_PluginSet_To_v1beta2_PluginSet(a.(*config.PluginSet), b.(*v1beta2.PluginSet), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.Plugins)(nil), (*config.Plugins)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_Plugins_To_config_Plugins(a.(*v1beta2.Plugins), b.(*config.Plugins), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.PodTopologySpreadArgs)(nil), (*config.PodTopologySpreadArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_PodTopologySpreadArgs_To_config_PodTopologySpreadArgs(a.(*v1beta2.PodTopologySpreadArgs), b.(*config.PodTopologySpreadArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.PodTopologySpreadArgs)(nil), (*v1beta2.PodTopologySpreadArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_PodTopologySpreadArgs_To_v1beta2_PodTopologySpreadArgs(a.(*config.PodTopologySpreadArgs), b.(*v1beta2.PodTopologySpreadArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.RequestedToCapacityRatioParam)(nil), (*config.RequestedToCapacityRatioParam)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_RequestedToCapacityRatioParam_To_config_RequestedToCapacityRatioParam(a.(*v1beta2.RequestedToCapacityRatioParam), b.(*config.RequestedToCapacityRatioParam), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.RequestedToCapacityRatioParam)(nil), (*v1beta2.RequestedToCapacityRatioParam)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_RequestedToCapacityRatioParam_To_v1beta2_RequestedToCapacityRatioParam(a.(*config.RequestedToCapacityRatioParam), b.(*v1beta2.RequestedToCapacityRatioParam), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.ResourceSpec)(nil), (*config.ResourceSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_ResourceSpec_To_config_ResourceSpec(a.(*v1beta2.ResourceSpec), b.(*config.ResourceSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.ResourceSpec)(nil), (*v1beta2.ResourceSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_ResourceSpec_To_v1beta2_ResourceSpec(a.(*config.ResourceSpec), b.(*v1beta2.ResourceSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.ScoringStrategy)(nil), (*config.ScoringStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_ScoringStrategy_To_config_ScoringStrategy(a.(*v1beta2.ScoringStrategy), b.(*config.ScoringStrategy), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.ScoringStrategy)(nil), (*v1beta2.ScoringStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_ScoringStrategy_To_v1beta2_ScoringStrategy(a.(*config.ScoringStrategy), b.(*v1beta2.ScoringStrategy), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.UtilizationShapePoint)(nil), (*config.UtilizationShapePoint)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_UtilizationShapePoint_To_config_UtilizationShapePoint(a.(*v1beta2.UtilizationShapePoint), b.(*config.UtilizationShapePoint), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.UtilizationShapePoint)(nil), (*v1beta2.UtilizationShapePoint)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_UtilizationShapePoint_To_v1beta2_UtilizationShapePoint(a.(*config.UtilizationShapePoint), b.(*v1beta2.UtilizationShapePoint), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.VolumeBindingArgs)(nil), (*config.VolumeBindingArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_VolumeBindingArgs_To_config_VolumeBindingArgs(a.(*v1beta2.VolumeBindingArgs), b.(*config.VolumeBindingArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.VolumeBindingArgs)(nil), (*v1beta2.VolumeBindingArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_VolumeBindingArgs_To_v1beta2_VolumeBindingArgs(a.(*config.VolumeBindingArgs), b.(*v1beta2.VolumeBindingArgs), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*config.KubeSchedulerConfiguration)(nil), (*v1beta2.KubeSchedulerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_KubeSchedulerConfiguration_To_v1beta2_KubeSchedulerConfiguration(a.(*config.KubeSchedulerConfiguration), b.(*v1beta2.KubeSchedulerConfiguration), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*config.KubeSchedulerProfile)(nil), (*v1beta2.KubeSchedulerProfile)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_KubeSchedulerProfile_To_v1beta2_KubeSchedulerProfile(a.(*config.KubeSchedulerProfile), b.(*v1beta2.KubeSchedulerProfile), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*config.Plugins)(nil), (*v1beta2.Plugins)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_Plugins_To_v1beta2_Plugins(a.(*config.Plugins), b.(*v1beta2.Plugins), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*v1beta2.KubeSchedulerConfiguration)(nil), (*config.KubeSchedulerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_KubeSchedulerConfiguration_To_config_KubeSchedulerConfiguration(a.(*v1beta2.KubeSchedulerConfiguration), b.(*config.KubeSchedulerConfiguration), scope) - }); err != nil { - return err - } - return nil -} - -func autoConvert_v1beta2_DefaultPreemptionArgs_To_config_DefaultPreemptionArgs(in *v1beta2.DefaultPreemptionArgs, out *config.DefaultPreemptionArgs, s conversion.Scope) error { - if err := v1.Convert_Pointer_int32_To_int32(&in.MinCandidateNodesPercentage, &out.MinCandidateNodesPercentage, s); err != nil { - return err - } - if err := v1.Convert_Pointer_int32_To_int32(&in.MinCandidateNodesAbsolute, &out.MinCandidateNodesAbsolute, s); err != nil { - return err - } - return nil -} - -// Convert_v1beta2_DefaultPreemptionArgs_To_config_DefaultPreemptionArgs is an autogenerated conversion function. -func Convert_v1beta2_DefaultPreemptionArgs_To_config_DefaultPreemptionArgs(in *v1beta2.DefaultPreemptionArgs, out *config.DefaultPreemptionArgs, s conversion.Scope) error { - return autoConvert_v1beta2_DefaultPreemptionArgs_To_config_DefaultPreemptionArgs(in, out, s) -} - -func autoConvert_config_DefaultPreemptionArgs_To_v1beta2_DefaultPreemptionArgs(in *config.DefaultPreemptionArgs, out *v1beta2.DefaultPreemptionArgs, s conversion.Scope) error { - if err := v1.Convert_int32_To_Pointer_int32(&in.MinCandidateNodesPercentage, &out.MinCandidateNodesPercentage, s); err != nil { - return err - } - if err := v1.Convert_int32_To_Pointer_int32(&in.MinCandidateNodesAbsolute, &out.MinCandidateNodesAbsolute, s); err != nil { - return err - } - return nil -} - -// Convert_config_DefaultPreemptionArgs_To_v1beta2_DefaultPreemptionArgs is an autogenerated conversion function. -func Convert_config_DefaultPreemptionArgs_To_v1beta2_DefaultPreemptionArgs(in *config.DefaultPreemptionArgs, out *v1beta2.DefaultPreemptionArgs, s conversion.Scope) error { - return autoConvert_config_DefaultPreemptionArgs_To_v1beta2_DefaultPreemptionArgs(in, out, s) -} - -func autoConvert_v1beta2_Extender_To_config_Extender(in *v1beta2.Extender, out *config.Extender, s conversion.Scope) error { - out.URLPrefix = in.URLPrefix - out.FilterVerb = in.FilterVerb - out.PreemptVerb = in.PreemptVerb - out.PrioritizeVerb = in.PrioritizeVerb - out.Weight = in.Weight - out.BindVerb = in.BindVerb - out.EnableHTTPS = in.EnableHTTPS - out.TLSConfig = (*config.ExtenderTLSConfig)(unsafe.Pointer(in.TLSConfig)) - out.HTTPTimeout = in.HTTPTimeout - out.NodeCacheCapable = in.NodeCacheCapable - out.ManagedResources = *(*[]config.ExtenderManagedResource)(unsafe.Pointer(&in.ManagedResources)) - out.Ignorable = in.Ignorable - return nil -} - -// Convert_v1beta2_Extender_To_config_Extender is an autogenerated conversion function. -func Convert_v1beta2_Extender_To_config_Extender(in *v1beta2.Extender, out *config.Extender, s conversion.Scope) error { - return autoConvert_v1beta2_Extender_To_config_Extender(in, out, s) -} - -func autoConvert_config_Extender_To_v1beta2_Extender(in *config.Extender, out *v1beta2.Extender, s conversion.Scope) error { - out.URLPrefix = in.URLPrefix - out.FilterVerb = in.FilterVerb - out.PreemptVerb = in.PreemptVerb - out.PrioritizeVerb = in.PrioritizeVerb - out.Weight = in.Weight - out.BindVerb = in.BindVerb - out.EnableHTTPS = in.EnableHTTPS - out.TLSConfig = (*v1beta2.ExtenderTLSConfig)(unsafe.Pointer(in.TLSConfig)) - out.HTTPTimeout = in.HTTPTimeout - out.NodeCacheCapable = in.NodeCacheCapable - out.ManagedResources = *(*[]v1beta2.ExtenderManagedResource)(unsafe.Pointer(&in.ManagedResources)) - out.Ignorable = in.Ignorable - return nil -} - -// Convert_config_Extender_To_v1beta2_Extender is an autogenerated conversion function. -func Convert_config_Extender_To_v1beta2_Extender(in *config.Extender, out *v1beta2.Extender, s conversion.Scope) error { - return autoConvert_config_Extender_To_v1beta2_Extender(in, out, s) -} - -func autoConvert_v1beta2_ExtenderManagedResource_To_config_ExtenderManagedResource(in *v1beta2.ExtenderManagedResource, out *config.ExtenderManagedResource, s conversion.Scope) error { - out.Name = in.Name - out.IgnoredByScheduler = in.IgnoredByScheduler - return nil -} - -// Convert_v1beta2_ExtenderManagedResource_To_config_ExtenderManagedResource is an autogenerated conversion function. -func Convert_v1beta2_ExtenderManagedResource_To_config_ExtenderManagedResource(in *v1beta2.ExtenderManagedResource, out *config.ExtenderManagedResource, s conversion.Scope) error { - return autoConvert_v1beta2_ExtenderManagedResource_To_config_ExtenderManagedResource(in, out, s) -} - -func autoConvert_config_ExtenderManagedResource_To_v1beta2_ExtenderManagedResource(in *config.ExtenderManagedResource, out *v1beta2.ExtenderManagedResource, s conversion.Scope) error { - out.Name = in.Name - out.IgnoredByScheduler = in.IgnoredByScheduler - return nil -} - -// Convert_config_ExtenderManagedResource_To_v1beta2_ExtenderManagedResource is an autogenerated conversion function. -func Convert_config_ExtenderManagedResource_To_v1beta2_ExtenderManagedResource(in *config.ExtenderManagedResource, out *v1beta2.ExtenderManagedResource, s conversion.Scope) error { - return autoConvert_config_ExtenderManagedResource_To_v1beta2_ExtenderManagedResource(in, out, s) -} - -func autoConvert_v1beta2_ExtenderTLSConfig_To_config_ExtenderTLSConfig(in *v1beta2.ExtenderTLSConfig, out *config.ExtenderTLSConfig, s conversion.Scope) error { - out.Insecure = in.Insecure - out.ServerName = in.ServerName - out.CertFile = in.CertFile - out.KeyFile = in.KeyFile - out.CAFile = in.CAFile - out.CertData = *(*[]byte)(unsafe.Pointer(&in.CertData)) - out.KeyData = *(*[]byte)(unsafe.Pointer(&in.KeyData)) - out.CAData = *(*[]byte)(unsafe.Pointer(&in.CAData)) - return nil -} - -// Convert_v1beta2_ExtenderTLSConfig_To_config_ExtenderTLSConfig is an autogenerated conversion function. -func Convert_v1beta2_ExtenderTLSConfig_To_config_ExtenderTLSConfig(in *v1beta2.ExtenderTLSConfig, out *config.ExtenderTLSConfig, s conversion.Scope) error { - return autoConvert_v1beta2_ExtenderTLSConfig_To_config_ExtenderTLSConfig(in, out, s) -} - -func autoConvert_config_ExtenderTLSConfig_To_v1beta2_ExtenderTLSConfig(in *config.ExtenderTLSConfig, out *v1beta2.ExtenderTLSConfig, s conversion.Scope) error { - out.Insecure = in.Insecure - out.ServerName = in.ServerName - out.CertFile = in.CertFile - out.KeyFile = in.KeyFile - out.CAFile = in.CAFile - out.CertData = *(*[]byte)(unsafe.Pointer(&in.CertData)) - out.KeyData = *(*[]byte)(unsafe.Pointer(&in.KeyData)) - out.CAData = *(*[]byte)(unsafe.Pointer(&in.CAData)) - return nil -} - -// Convert_config_ExtenderTLSConfig_To_v1beta2_ExtenderTLSConfig is an autogenerated conversion function. -func Convert_config_ExtenderTLSConfig_To_v1beta2_ExtenderTLSConfig(in *config.ExtenderTLSConfig, out *v1beta2.ExtenderTLSConfig, s conversion.Scope) error { - return autoConvert_config_ExtenderTLSConfig_To_v1beta2_ExtenderTLSConfig(in, out, s) -} - -func autoConvert_v1beta2_InterPodAffinityArgs_To_config_InterPodAffinityArgs(in *v1beta2.InterPodAffinityArgs, out *config.InterPodAffinityArgs, s conversion.Scope) error { - if err := v1.Convert_Pointer_int32_To_int32(&in.HardPodAffinityWeight, &out.HardPodAffinityWeight, s); err != nil { - return err - } - out.IgnorePreferredTermsOfExistingPods = in.IgnorePreferredTermsOfExistingPods - return nil -} - -// Convert_v1beta2_InterPodAffinityArgs_To_config_InterPodAffinityArgs is an autogenerated conversion function. -func Convert_v1beta2_InterPodAffinityArgs_To_config_InterPodAffinityArgs(in *v1beta2.InterPodAffinityArgs, out *config.InterPodAffinityArgs, s conversion.Scope) error { - return autoConvert_v1beta2_InterPodAffinityArgs_To_config_InterPodAffinityArgs(in, out, s) -} - -func autoConvert_config_InterPodAffinityArgs_To_v1beta2_InterPodAffinityArgs(in *config.InterPodAffinityArgs, out *v1beta2.InterPodAffinityArgs, s conversion.Scope) error { - if err := v1.Convert_int32_To_Pointer_int32(&in.HardPodAffinityWeight, &out.HardPodAffinityWeight, s); err != nil { - return err - } - out.IgnorePreferredTermsOfExistingPods = in.IgnorePreferredTermsOfExistingPods - return nil -} - -// Convert_config_InterPodAffinityArgs_To_v1beta2_InterPodAffinityArgs is an autogenerated conversion function. -func Convert_config_InterPodAffinityArgs_To_v1beta2_InterPodAffinityArgs(in *config.InterPodAffinityArgs, out *v1beta2.InterPodAffinityArgs, s conversion.Scope) error { - return autoConvert_config_InterPodAffinityArgs_To_v1beta2_InterPodAffinityArgs(in, out, s) -} - -func autoConvert_v1beta2_KubeSchedulerConfiguration_To_config_KubeSchedulerConfiguration(in *v1beta2.KubeSchedulerConfiguration, out *config.KubeSchedulerConfiguration, s conversion.Scope) error { - if err := v1.Convert_Pointer_int32_To_int32(&in.Parallelism, &out.Parallelism, s); err != nil { - return err - } - if err := v1alpha1.Convert_v1alpha1_LeaderElectionConfiguration_To_config_LeaderElectionConfiguration(&in.LeaderElection, &out.LeaderElection, s); err != nil { - return err - } - if err := v1alpha1.Convert_v1alpha1_ClientConnectionConfiguration_To_config_ClientConnectionConfiguration(&in.ClientConnection, &out.ClientConnection, s); err != nil { - return err - } - if err := v1.Convert_Pointer_string_To_string(&in.HealthzBindAddress, &out.HealthzBindAddress, s); err != nil { - return err - } - if err := v1.Convert_Pointer_string_To_string(&in.MetricsBindAddress, &out.MetricsBindAddress, s); err != nil { - return err - } - if err := v1alpha1.Convert_v1alpha1_DebuggingConfiguration_To_config_DebuggingConfiguration(&in.DebuggingConfiguration, &out.DebuggingConfiguration, s); err != nil { - return err - } - out.PercentageOfNodesToScore = (*int32)(unsafe.Pointer(in.PercentageOfNodesToScore)) - if err := v1.Convert_Pointer_int64_To_int64(&in.PodInitialBackoffSeconds, &out.PodInitialBackoffSeconds, s); err != nil { - return err - } - if err := v1.Convert_Pointer_int64_To_int64(&in.PodMaxBackoffSeconds, &out.PodMaxBackoffSeconds, s); err != nil { - return err - } - if in.Profiles != nil { - in, out := &in.Profiles, &out.Profiles - *out = make([]config.KubeSchedulerProfile, len(*in)) - for i := range *in { - if err := Convert_v1beta2_KubeSchedulerProfile_To_config_KubeSchedulerProfile(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Profiles = nil - } - out.Extenders = *(*[]config.Extender)(unsafe.Pointer(&in.Extenders)) - return nil -} - -func autoConvert_config_KubeSchedulerConfiguration_To_v1beta2_KubeSchedulerConfiguration(in *config.KubeSchedulerConfiguration, out *v1beta2.KubeSchedulerConfiguration, s conversion.Scope) error { - if err := v1.Convert_int32_To_Pointer_int32(&in.Parallelism, &out.Parallelism, s); err != nil { - return err - } - if err := v1alpha1.Convert_config_LeaderElectionConfiguration_To_v1alpha1_LeaderElectionConfiguration(&in.LeaderElection, &out.LeaderElection, s); err != nil { - return err - } - if err := v1alpha1.Convert_config_ClientConnectionConfiguration_To_v1alpha1_ClientConnectionConfiguration(&in.ClientConnection, &out.ClientConnection, s); err != nil { - return err - } - if err := v1.Convert_string_To_Pointer_string(&in.HealthzBindAddress, &out.HealthzBindAddress, s); err != nil { - return err - } - if err := v1.Convert_string_To_Pointer_string(&in.MetricsBindAddress, &out.MetricsBindAddress, s); err != nil { - return err - } - if err := v1alpha1.Convert_config_DebuggingConfiguration_To_v1alpha1_DebuggingConfiguration(&in.DebuggingConfiguration, &out.DebuggingConfiguration, s); err != nil { - return err - } - out.PercentageOfNodesToScore = (*int32)(unsafe.Pointer(in.PercentageOfNodesToScore)) - if err := v1.Convert_int64_To_Pointer_int64(&in.PodInitialBackoffSeconds, &out.PodInitialBackoffSeconds, s); err != nil { - return err - } - if err := v1.Convert_int64_To_Pointer_int64(&in.PodMaxBackoffSeconds, &out.PodMaxBackoffSeconds, s); err != nil { - return err - } - if in.Profiles != nil { - in, out := &in.Profiles, &out.Profiles - *out = make([]v1beta2.KubeSchedulerProfile, len(*in)) - for i := range *in { - if err := Convert_config_KubeSchedulerProfile_To_v1beta2_KubeSchedulerProfile(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Profiles = nil - } - out.Extenders = *(*[]v1beta2.Extender)(unsafe.Pointer(&in.Extenders)) - return nil -} - -func autoConvert_v1beta2_KubeSchedulerProfile_To_config_KubeSchedulerProfile(in *v1beta2.KubeSchedulerProfile, out *config.KubeSchedulerProfile, s conversion.Scope) error { - if err := v1.Convert_Pointer_string_To_string(&in.SchedulerName, &out.SchedulerName, s); err != nil { - return err - } - if in.Plugins != nil { - in, out := &in.Plugins, &out.Plugins - *out = new(config.Plugins) - if err := Convert_v1beta2_Plugins_To_config_Plugins(*in, *out, s); err != nil { - return err - } - } else { - out.Plugins = nil - } - if in.PluginConfig != nil { - in, out := &in.PluginConfig, &out.PluginConfig - *out = make([]config.PluginConfig, len(*in)) - for i := range *in { - if err := Convert_v1beta2_PluginConfig_To_config_PluginConfig(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.PluginConfig = nil - } - return nil -} - -// Convert_v1beta2_KubeSchedulerProfile_To_config_KubeSchedulerProfile is an autogenerated conversion function. -func Convert_v1beta2_KubeSchedulerProfile_To_config_KubeSchedulerProfile(in *v1beta2.KubeSchedulerProfile, out *config.KubeSchedulerProfile, s conversion.Scope) error { - return autoConvert_v1beta2_KubeSchedulerProfile_To_config_KubeSchedulerProfile(in, out, s) -} - -func autoConvert_config_KubeSchedulerProfile_To_v1beta2_KubeSchedulerProfile(in *config.KubeSchedulerProfile, out *v1beta2.KubeSchedulerProfile, s conversion.Scope) error { - if err := v1.Convert_string_To_Pointer_string(&in.SchedulerName, &out.SchedulerName, s); err != nil { - return err - } - // WARNING: in.PercentageOfNodesToScore requires manual conversion: does not exist in peer-type - if in.Plugins != nil { - in, out := &in.Plugins, &out.Plugins - *out = new(v1beta2.Plugins) - if err := Convert_config_Plugins_To_v1beta2_Plugins(*in, *out, s); err != nil { - return err - } - } else { - out.Plugins = nil - } - if in.PluginConfig != nil { - in, out := &in.PluginConfig, &out.PluginConfig - *out = make([]v1beta2.PluginConfig, len(*in)) - for i := range *in { - if err := Convert_config_PluginConfig_To_v1beta2_PluginConfig(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.PluginConfig = nil - } - return nil -} - -func autoConvert_v1beta2_NodeAffinityArgs_To_config_NodeAffinityArgs(in *v1beta2.NodeAffinityArgs, out *config.NodeAffinityArgs, s conversion.Scope) error { - out.AddedAffinity = (*corev1.NodeAffinity)(unsafe.Pointer(in.AddedAffinity)) - return nil -} - -// Convert_v1beta2_NodeAffinityArgs_To_config_NodeAffinityArgs is an autogenerated conversion function. -func Convert_v1beta2_NodeAffinityArgs_To_config_NodeAffinityArgs(in *v1beta2.NodeAffinityArgs, out *config.NodeAffinityArgs, s conversion.Scope) error { - return autoConvert_v1beta2_NodeAffinityArgs_To_config_NodeAffinityArgs(in, out, s) -} - -func autoConvert_config_NodeAffinityArgs_To_v1beta2_NodeAffinityArgs(in *config.NodeAffinityArgs, out *v1beta2.NodeAffinityArgs, s conversion.Scope) error { - out.AddedAffinity = (*corev1.NodeAffinity)(unsafe.Pointer(in.AddedAffinity)) - return nil -} - -// Convert_config_NodeAffinityArgs_To_v1beta2_NodeAffinityArgs is an autogenerated conversion function. -func Convert_config_NodeAffinityArgs_To_v1beta2_NodeAffinityArgs(in *config.NodeAffinityArgs, out *v1beta2.NodeAffinityArgs, s conversion.Scope) error { - return autoConvert_config_NodeAffinityArgs_To_v1beta2_NodeAffinityArgs(in, out, s) -} - -func autoConvert_v1beta2_NodeResourcesBalancedAllocationArgs_To_config_NodeResourcesBalancedAllocationArgs(in *v1beta2.NodeResourcesBalancedAllocationArgs, out *config.NodeResourcesBalancedAllocationArgs, s conversion.Scope) error { - out.Resources = *(*[]config.ResourceSpec)(unsafe.Pointer(&in.Resources)) - return nil -} - -// Convert_v1beta2_NodeResourcesBalancedAllocationArgs_To_config_NodeResourcesBalancedAllocationArgs is an autogenerated conversion function. -func Convert_v1beta2_NodeResourcesBalancedAllocationArgs_To_config_NodeResourcesBalancedAllocationArgs(in *v1beta2.NodeResourcesBalancedAllocationArgs, out *config.NodeResourcesBalancedAllocationArgs, s conversion.Scope) error { - return autoConvert_v1beta2_NodeResourcesBalancedAllocationArgs_To_config_NodeResourcesBalancedAllocationArgs(in, out, s) -} - -func autoConvert_config_NodeResourcesBalancedAllocationArgs_To_v1beta2_NodeResourcesBalancedAllocationArgs(in *config.NodeResourcesBalancedAllocationArgs, out *v1beta2.NodeResourcesBalancedAllocationArgs, s conversion.Scope) error { - out.Resources = *(*[]v1beta2.ResourceSpec)(unsafe.Pointer(&in.Resources)) - return nil -} - -// Convert_config_NodeResourcesBalancedAllocationArgs_To_v1beta2_NodeResourcesBalancedAllocationArgs is an autogenerated conversion function. -func Convert_config_NodeResourcesBalancedAllocationArgs_To_v1beta2_NodeResourcesBalancedAllocationArgs(in *config.NodeResourcesBalancedAllocationArgs, out *v1beta2.NodeResourcesBalancedAllocationArgs, s conversion.Scope) error { - return autoConvert_config_NodeResourcesBalancedAllocationArgs_To_v1beta2_NodeResourcesBalancedAllocationArgs(in, out, s) -} - -func autoConvert_v1beta2_NodeResourcesFitArgs_To_config_NodeResourcesFitArgs(in *v1beta2.NodeResourcesFitArgs, out *config.NodeResourcesFitArgs, s conversion.Scope) error { - out.IgnoredResources = *(*[]string)(unsafe.Pointer(&in.IgnoredResources)) - out.IgnoredResourceGroups = *(*[]string)(unsafe.Pointer(&in.IgnoredResourceGroups)) - out.ScoringStrategy = (*config.ScoringStrategy)(unsafe.Pointer(in.ScoringStrategy)) - return nil -} - -// Convert_v1beta2_NodeResourcesFitArgs_To_config_NodeResourcesFitArgs is an autogenerated conversion function. -func Convert_v1beta2_NodeResourcesFitArgs_To_config_NodeResourcesFitArgs(in *v1beta2.NodeResourcesFitArgs, out *config.NodeResourcesFitArgs, s conversion.Scope) error { - return autoConvert_v1beta2_NodeResourcesFitArgs_To_config_NodeResourcesFitArgs(in, out, s) -} - -func autoConvert_config_NodeResourcesFitArgs_To_v1beta2_NodeResourcesFitArgs(in *config.NodeResourcesFitArgs, out *v1beta2.NodeResourcesFitArgs, s conversion.Scope) error { - out.IgnoredResources = *(*[]string)(unsafe.Pointer(&in.IgnoredResources)) - out.IgnoredResourceGroups = *(*[]string)(unsafe.Pointer(&in.IgnoredResourceGroups)) - out.ScoringStrategy = (*v1beta2.ScoringStrategy)(unsafe.Pointer(in.ScoringStrategy)) - return nil -} - -// Convert_config_NodeResourcesFitArgs_To_v1beta2_NodeResourcesFitArgs is an autogenerated conversion function. -func Convert_config_NodeResourcesFitArgs_To_v1beta2_NodeResourcesFitArgs(in *config.NodeResourcesFitArgs, out *v1beta2.NodeResourcesFitArgs, s conversion.Scope) error { - return autoConvert_config_NodeResourcesFitArgs_To_v1beta2_NodeResourcesFitArgs(in, out, s) -} - -func autoConvert_v1beta2_Plugin_To_config_Plugin(in *v1beta2.Plugin, out *config.Plugin, s conversion.Scope) error { - out.Name = in.Name - if err := v1.Convert_Pointer_int32_To_int32(&in.Weight, &out.Weight, s); err != nil { - return err - } - return nil -} - -// Convert_v1beta2_Plugin_To_config_Plugin is an autogenerated conversion function. -func Convert_v1beta2_Plugin_To_config_Plugin(in *v1beta2.Plugin, out *config.Plugin, s conversion.Scope) error { - return autoConvert_v1beta2_Plugin_To_config_Plugin(in, out, s) -} - -func autoConvert_config_Plugin_To_v1beta2_Plugin(in *config.Plugin, out *v1beta2.Plugin, s conversion.Scope) error { - out.Name = in.Name - if err := v1.Convert_int32_To_Pointer_int32(&in.Weight, &out.Weight, s); err != nil { - return err - } - return nil -} - -// Convert_config_Plugin_To_v1beta2_Plugin is an autogenerated conversion function. -func Convert_config_Plugin_To_v1beta2_Plugin(in *config.Plugin, out *v1beta2.Plugin, s conversion.Scope) error { - return autoConvert_config_Plugin_To_v1beta2_Plugin(in, out, s) -} - -func autoConvert_v1beta2_PluginConfig_To_config_PluginConfig(in *v1beta2.PluginConfig, out *config.PluginConfig, s conversion.Scope) error { - out.Name = in.Name - if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&in.Args, &out.Args, s); err != nil { - return err - } - return nil -} - -// Convert_v1beta2_PluginConfig_To_config_PluginConfig is an autogenerated conversion function. -func Convert_v1beta2_PluginConfig_To_config_PluginConfig(in *v1beta2.PluginConfig, out *config.PluginConfig, s conversion.Scope) error { - return autoConvert_v1beta2_PluginConfig_To_config_PluginConfig(in, out, s) -} - -func autoConvert_config_PluginConfig_To_v1beta2_PluginConfig(in *config.PluginConfig, out *v1beta2.PluginConfig, s conversion.Scope) error { - out.Name = in.Name - if err := runtime.Convert_runtime_Object_To_runtime_RawExtension(&in.Args, &out.Args, s); err != nil { - return err - } - return nil -} - -// Convert_config_PluginConfig_To_v1beta2_PluginConfig is an autogenerated conversion function. -func Convert_config_PluginConfig_To_v1beta2_PluginConfig(in *config.PluginConfig, out *v1beta2.PluginConfig, s conversion.Scope) error { - return autoConvert_config_PluginConfig_To_v1beta2_PluginConfig(in, out, s) -} - -func autoConvert_v1beta2_PluginSet_To_config_PluginSet(in *v1beta2.PluginSet, out *config.PluginSet, s conversion.Scope) error { - if in.Enabled != nil { - in, out := &in.Enabled, &out.Enabled - *out = make([]config.Plugin, len(*in)) - for i := range *in { - if err := Convert_v1beta2_Plugin_To_config_Plugin(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Enabled = nil - } - if in.Disabled != nil { - in, out := &in.Disabled, &out.Disabled - *out = make([]config.Plugin, len(*in)) - for i := range *in { - if err := Convert_v1beta2_Plugin_To_config_Plugin(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Disabled = nil - } - return nil -} - -// Convert_v1beta2_PluginSet_To_config_PluginSet is an autogenerated conversion function. -func Convert_v1beta2_PluginSet_To_config_PluginSet(in *v1beta2.PluginSet, out *config.PluginSet, s conversion.Scope) error { - return autoConvert_v1beta2_PluginSet_To_config_PluginSet(in, out, s) -} - -func autoConvert_config_PluginSet_To_v1beta2_PluginSet(in *config.PluginSet, out *v1beta2.PluginSet, s conversion.Scope) error { - if in.Enabled != nil { - in, out := &in.Enabled, &out.Enabled - *out = make([]v1beta2.Plugin, len(*in)) - for i := range *in { - if err := Convert_config_Plugin_To_v1beta2_Plugin(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Enabled = nil - } - if in.Disabled != nil { - in, out := &in.Disabled, &out.Disabled - *out = make([]v1beta2.Plugin, len(*in)) - for i := range *in { - if err := Convert_config_Plugin_To_v1beta2_Plugin(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Disabled = nil - } - return nil -} - -// Convert_config_PluginSet_To_v1beta2_PluginSet is an autogenerated conversion function. -func Convert_config_PluginSet_To_v1beta2_PluginSet(in *config.PluginSet, out *v1beta2.PluginSet, s conversion.Scope) error { - return autoConvert_config_PluginSet_To_v1beta2_PluginSet(in, out, s) -} - -func autoConvert_v1beta2_Plugins_To_config_Plugins(in *v1beta2.Plugins, out *config.Plugins, s conversion.Scope) error { - if err := Convert_v1beta2_PluginSet_To_config_PluginSet(&in.PreEnqueue, &out.PreEnqueue, s); err != nil { - return err - } - if err := Convert_v1beta2_PluginSet_To_config_PluginSet(&in.QueueSort, &out.QueueSort, s); err != nil { - return err - } - if err := Convert_v1beta2_PluginSet_To_config_PluginSet(&in.PreFilter, &out.PreFilter, s); err != nil { - return err - } - if err := Convert_v1beta2_PluginSet_To_config_PluginSet(&in.Filter, &out.Filter, s); err != nil { - return err - } - if err := Convert_v1beta2_PluginSet_To_config_PluginSet(&in.PostFilter, &out.PostFilter, s); err != nil { - return err - } - if err := Convert_v1beta2_PluginSet_To_config_PluginSet(&in.PreScore, &out.PreScore, s); err != nil { - return err - } - if err := Convert_v1beta2_PluginSet_To_config_PluginSet(&in.Score, &out.Score, s); err != nil { - return err - } - if err := Convert_v1beta2_PluginSet_To_config_PluginSet(&in.Reserve, &out.Reserve, s); err != nil { - return err - } - if err := Convert_v1beta2_PluginSet_To_config_PluginSet(&in.Permit, &out.Permit, s); err != nil { - return err - } - if err := Convert_v1beta2_PluginSet_To_config_PluginSet(&in.PreBind, &out.PreBind, s); err != nil { - return err - } - if err := Convert_v1beta2_PluginSet_To_config_PluginSet(&in.Bind, &out.Bind, s); err != nil { - return err - } - if err := Convert_v1beta2_PluginSet_To_config_PluginSet(&in.PostBind, &out.PostBind, s); err != nil { - return err - } - if err := Convert_v1beta2_PluginSet_To_config_PluginSet(&in.MultiPoint, &out.MultiPoint, s); err != nil { - return err - } - return nil -} - -// Convert_v1beta2_Plugins_To_config_Plugins is an autogenerated conversion function. -func Convert_v1beta2_Plugins_To_config_Plugins(in *v1beta2.Plugins, out *config.Plugins, s conversion.Scope) error { - return autoConvert_v1beta2_Plugins_To_config_Plugins(in, out, s) -} - -func autoConvert_config_Plugins_To_v1beta2_Plugins(in *config.Plugins, out *v1beta2.Plugins, s conversion.Scope) error { - if err := Convert_config_PluginSet_To_v1beta2_PluginSet(&in.PreEnqueue, &out.PreEnqueue, s); err != nil { - return err - } - if err := Convert_config_PluginSet_To_v1beta2_PluginSet(&in.QueueSort, &out.QueueSort, s); err != nil { - return err - } - if err := Convert_config_PluginSet_To_v1beta2_PluginSet(&in.PreFilter, &out.PreFilter, s); err != nil { - return err - } - if err := Convert_config_PluginSet_To_v1beta2_PluginSet(&in.Filter, &out.Filter, s); err != nil { - return err - } - if err := Convert_config_PluginSet_To_v1beta2_PluginSet(&in.PostFilter, &out.PostFilter, s); err != nil { - return err - } - if err := Convert_config_PluginSet_To_v1beta2_PluginSet(&in.PreScore, &out.PreScore, s); err != nil { - return err - } - if err := Convert_config_PluginSet_To_v1beta2_PluginSet(&in.Score, &out.Score, s); err != nil { - return err - } - if err := Convert_config_PluginSet_To_v1beta2_PluginSet(&in.Reserve, &out.Reserve, s); err != nil { - return err - } - if err := Convert_config_PluginSet_To_v1beta2_PluginSet(&in.Permit, &out.Permit, s); err != nil { - return err - } - if err := Convert_config_PluginSet_To_v1beta2_PluginSet(&in.PreBind, &out.PreBind, s); err != nil { - return err - } - if err := Convert_config_PluginSet_To_v1beta2_PluginSet(&in.Bind, &out.Bind, s); err != nil { - return err - } - if err := Convert_config_PluginSet_To_v1beta2_PluginSet(&in.PostBind, &out.PostBind, s); err != nil { - return err - } - if err := Convert_config_PluginSet_To_v1beta2_PluginSet(&in.MultiPoint, &out.MultiPoint, s); err != nil { - return err - } - return nil -} - -func autoConvert_v1beta2_PodTopologySpreadArgs_To_config_PodTopologySpreadArgs(in *v1beta2.PodTopologySpreadArgs, out *config.PodTopologySpreadArgs, s conversion.Scope) error { - out.DefaultConstraints = *(*[]corev1.TopologySpreadConstraint)(unsafe.Pointer(&in.DefaultConstraints)) - out.DefaultingType = config.PodTopologySpreadConstraintsDefaulting(in.DefaultingType) - return nil -} - -// Convert_v1beta2_PodTopologySpreadArgs_To_config_PodTopologySpreadArgs is an autogenerated conversion function. -func Convert_v1beta2_PodTopologySpreadArgs_To_config_PodTopologySpreadArgs(in *v1beta2.PodTopologySpreadArgs, out *config.PodTopologySpreadArgs, s conversion.Scope) error { - return autoConvert_v1beta2_PodTopologySpreadArgs_To_config_PodTopologySpreadArgs(in, out, s) -} - -func autoConvert_config_PodTopologySpreadArgs_To_v1beta2_PodTopologySpreadArgs(in *config.PodTopologySpreadArgs, out *v1beta2.PodTopologySpreadArgs, s conversion.Scope) error { - out.DefaultConstraints = *(*[]corev1.TopologySpreadConstraint)(unsafe.Pointer(&in.DefaultConstraints)) - out.DefaultingType = v1beta2.PodTopologySpreadConstraintsDefaulting(in.DefaultingType) - return nil -} - -// Convert_config_PodTopologySpreadArgs_To_v1beta2_PodTopologySpreadArgs is an autogenerated conversion function. -func Convert_config_PodTopologySpreadArgs_To_v1beta2_PodTopologySpreadArgs(in *config.PodTopologySpreadArgs, out *v1beta2.PodTopologySpreadArgs, s conversion.Scope) error { - return autoConvert_config_PodTopologySpreadArgs_To_v1beta2_PodTopologySpreadArgs(in, out, s) -} - -func autoConvert_v1beta2_RequestedToCapacityRatioParam_To_config_RequestedToCapacityRatioParam(in *v1beta2.RequestedToCapacityRatioParam, out *config.RequestedToCapacityRatioParam, s conversion.Scope) error { - out.Shape = *(*[]config.UtilizationShapePoint)(unsafe.Pointer(&in.Shape)) - return nil -} - -// Convert_v1beta2_RequestedToCapacityRatioParam_To_config_RequestedToCapacityRatioParam is an autogenerated conversion function. -func Convert_v1beta2_RequestedToCapacityRatioParam_To_config_RequestedToCapacityRatioParam(in *v1beta2.RequestedToCapacityRatioParam, out *config.RequestedToCapacityRatioParam, s conversion.Scope) error { - return autoConvert_v1beta2_RequestedToCapacityRatioParam_To_config_RequestedToCapacityRatioParam(in, out, s) -} - -func autoConvert_config_RequestedToCapacityRatioParam_To_v1beta2_RequestedToCapacityRatioParam(in *config.RequestedToCapacityRatioParam, out *v1beta2.RequestedToCapacityRatioParam, s conversion.Scope) error { - out.Shape = *(*[]v1beta2.UtilizationShapePoint)(unsafe.Pointer(&in.Shape)) - return nil -} - -// Convert_config_RequestedToCapacityRatioParam_To_v1beta2_RequestedToCapacityRatioParam is an autogenerated conversion function. -func Convert_config_RequestedToCapacityRatioParam_To_v1beta2_RequestedToCapacityRatioParam(in *config.RequestedToCapacityRatioParam, out *v1beta2.RequestedToCapacityRatioParam, s conversion.Scope) error { - return autoConvert_config_RequestedToCapacityRatioParam_To_v1beta2_RequestedToCapacityRatioParam(in, out, s) -} - -func autoConvert_v1beta2_ResourceSpec_To_config_ResourceSpec(in *v1beta2.ResourceSpec, out *config.ResourceSpec, s conversion.Scope) error { - out.Name = in.Name - out.Weight = in.Weight - return nil -} - -// Convert_v1beta2_ResourceSpec_To_config_ResourceSpec is an autogenerated conversion function. -func Convert_v1beta2_ResourceSpec_To_config_ResourceSpec(in *v1beta2.ResourceSpec, out *config.ResourceSpec, s conversion.Scope) error { - return autoConvert_v1beta2_ResourceSpec_To_config_ResourceSpec(in, out, s) -} - -func autoConvert_config_ResourceSpec_To_v1beta2_ResourceSpec(in *config.ResourceSpec, out *v1beta2.ResourceSpec, s conversion.Scope) error { - out.Name = in.Name - out.Weight = in.Weight - return nil -} - -// Convert_config_ResourceSpec_To_v1beta2_ResourceSpec is an autogenerated conversion function. -func Convert_config_ResourceSpec_To_v1beta2_ResourceSpec(in *config.ResourceSpec, out *v1beta2.ResourceSpec, s conversion.Scope) error { - return autoConvert_config_ResourceSpec_To_v1beta2_ResourceSpec(in, out, s) -} - -func autoConvert_v1beta2_ScoringStrategy_To_config_ScoringStrategy(in *v1beta2.ScoringStrategy, out *config.ScoringStrategy, s conversion.Scope) error { - out.Type = config.ScoringStrategyType(in.Type) - out.Resources = *(*[]config.ResourceSpec)(unsafe.Pointer(&in.Resources)) - out.RequestedToCapacityRatio = (*config.RequestedToCapacityRatioParam)(unsafe.Pointer(in.RequestedToCapacityRatio)) - return nil -} - -// Convert_v1beta2_ScoringStrategy_To_config_ScoringStrategy is an autogenerated conversion function. -func Convert_v1beta2_ScoringStrategy_To_config_ScoringStrategy(in *v1beta2.ScoringStrategy, out *config.ScoringStrategy, s conversion.Scope) error { - return autoConvert_v1beta2_ScoringStrategy_To_config_ScoringStrategy(in, out, s) -} - -func autoConvert_config_ScoringStrategy_To_v1beta2_ScoringStrategy(in *config.ScoringStrategy, out *v1beta2.ScoringStrategy, s conversion.Scope) error { - out.Type = v1beta2.ScoringStrategyType(in.Type) - out.Resources = *(*[]v1beta2.ResourceSpec)(unsafe.Pointer(&in.Resources)) - out.RequestedToCapacityRatio = (*v1beta2.RequestedToCapacityRatioParam)(unsafe.Pointer(in.RequestedToCapacityRatio)) - return nil -} - -// Convert_config_ScoringStrategy_To_v1beta2_ScoringStrategy is an autogenerated conversion function. -func Convert_config_ScoringStrategy_To_v1beta2_ScoringStrategy(in *config.ScoringStrategy, out *v1beta2.ScoringStrategy, s conversion.Scope) error { - return autoConvert_config_ScoringStrategy_To_v1beta2_ScoringStrategy(in, out, s) -} - -func autoConvert_v1beta2_UtilizationShapePoint_To_config_UtilizationShapePoint(in *v1beta2.UtilizationShapePoint, out *config.UtilizationShapePoint, s conversion.Scope) error { - out.Utilization = in.Utilization - out.Score = in.Score - return nil -} - -// Convert_v1beta2_UtilizationShapePoint_To_config_UtilizationShapePoint is an autogenerated conversion function. -func Convert_v1beta2_UtilizationShapePoint_To_config_UtilizationShapePoint(in *v1beta2.UtilizationShapePoint, out *config.UtilizationShapePoint, s conversion.Scope) error { - return autoConvert_v1beta2_UtilizationShapePoint_To_config_UtilizationShapePoint(in, out, s) -} - -func autoConvert_config_UtilizationShapePoint_To_v1beta2_UtilizationShapePoint(in *config.UtilizationShapePoint, out *v1beta2.UtilizationShapePoint, s conversion.Scope) error { - out.Utilization = in.Utilization - out.Score = in.Score - return nil -} - -// Convert_config_UtilizationShapePoint_To_v1beta2_UtilizationShapePoint is an autogenerated conversion function. -func Convert_config_UtilizationShapePoint_To_v1beta2_UtilizationShapePoint(in *config.UtilizationShapePoint, out *v1beta2.UtilizationShapePoint, s conversion.Scope) error { - return autoConvert_config_UtilizationShapePoint_To_v1beta2_UtilizationShapePoint(in, out, s) -} - -func autoConvert_v1beta2_VolumeBindingArgs_To_config_VolumeBindingArgs(in *v1beta2.VolumeBindingArgs, out *config.VolumeBindingArgs, s conversion.Scope) error { - if err := v1.Convert_Pointer_int64_To_int64(&in.BindTimeoutSeconds, &out.BindTimeoutSeconds, s); err != nil { - return err - } - out.Shape = *(*[]config.UtilizationShapePoint)(unsafe.Pointer(&in.Shape)) - return nil -} - -// Convert_v1beta2_VolumeBindingArgs_To_config_VolumeBindingArgs is an autogenerated conversion function. -func Convert_v1beta2_VolumeBindingArgs_To_config_VolumeBindingArgs(in *v1beta2.VolumeBindingArgs, out *config.VolumeBindingArgs, s conversion.Scope) error { - return autoConvert_v1beta2_VolumeBindingArgs_To_config_VolumeBindingArgs(in, out, s) -} - -func autoConvert_config_VolumeBindingArgs_To_v1beta2_VolumeBindingArgs(in *config.VolumeBindingArgs, out *v1beta2.VolumeBindingArgs, s conversion.Scope) error { - if err := v1.Convert_int64_To_Pointer_int64(&in.BindTimeoutSeconds, &out.BindTimeoutSeconds, s); err != nil { - return err - } - out.Shape = *(*[]v1beta2.UtilizationShapePoint)(unsafe.Pointer(&in.Shape)) - return nil -} - -// Convert_config_VolumeBindingArgs_To_v1beta2_VolumeBindingArgs is an autogenerated conversion function. -func Convert_config_VolumeBindingArgs_To_v1beta2_VolumeBindingArgs(in *config.VolumeBindingArgs, out *v1beta2.VolumeBindingArgs, s conversion.Scope) error { - return autoConvert_config_VolumeBindingArgs_To_v1beta2_VolumeBindingArgs(in, out, s) -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta2/zz_generated.deepcopy.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta2/zz_generated.deepcopy.go deleted file mode 100644 index e38909ba4a91..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta2/zz_generated.deepcopy.go +++ /dev/null @@ -1,22 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1beta2 diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta2/zz_generated.defaults.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta2/zz_generated.defaults.go deleted file mode 100644 index 359ee6437660..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta2/zz_generated.defaults.go +++ /dev/null @@ -1,73 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by defaulter-gen. DO NOT EDIT. - -package v1beta2 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" - v1beta2 "k8s.io/kube-scheduler/config/v1beta2" -) - -// RegisterDefaults adds defaulters functions to the given scheme. -// Public to allow building arbitrary schemes. -// All generated defaulters are covering - they call all nested defaulters. -func RegisterDefaults(scheme *runtime.Scheme) error { - scheme.AddTypeDefaultingFunc(&v1beta2.DefaultPreemptionArgs{}, func(obj interface{}) { SetObjectDefaults_DefaultPreemptionArgs(obj.(*v1beta2.DefaultPreemptionArgs)) }) - scheme.AddTypeDefaultingFunc(&v1beta2.InterPodAffinityArgs{}, func(obj interface{}) { SetObjectDefaults_InterPodAffinityArgs(obj.(*v1beta2.InterPodAffinityArgs)) }) - scheme.AddTypeDefaultingFunc(&v1beta2.KubeSchedulerConfiguration{}, func(obj interface{}) { - SetObjectDefaults_KubeSchedulerConfiguration(obj.(*v1beta2.KubeSchedulerConfiguration)) - }) - scheme.AddTypeDefaultingFunc(&v1beta2.NodeResourcesBalancedAllocationArgs{}, func(obj interface{}) { - SetObjectDefaults_NodeResourcesBalancedAllocationArgs(obj.(*v1beta2.NodeResourcesBalancedAllocationArgs)) - }) - scheme.AddTypeDefaultingFunc(&v1beta2.NodeResourcesFitArgs{}, func(obj interface{}) { SetObjectDefaults_NodeResourcesFitArgs(obj.(*v1beta2.NodeResourcesFitArgs)) }) - scheme.AddTypeDefaultingFunc(&v1beta2.PodTopologySpreadArgs{}, func(obj interface{}) { SetObjectDefaults_PodTopologySpreadArgs(obj.(*v1beta2.PodTopologySpreadArgs)) }) - scheme.AddTypeDefaultingFunc(&v1beta2.VolumeBindingArgs{}, func(obj interface{}) { SetObjectDefaults_VolumeBindingArgs(obj.(*v1beta2.VolumeBindingArgs)) }) - return nil -} - -func SetObjectDefaults_DefaultPreemptionArgs(in *v1beta2.DefaultPreemptionArgs) { - SetDefaults_DefaultPreemptionArgs(in) -} - -func SetObjectDefaults_InterPodAffinityArgs(in *v1beta2.InterPodAffinityArgs) { - SetDefaults_InterPodAffinityArgs(in) -} - -func SetObjectDefaults_KubeSchedulerConfiguration(in *v1beta2.KubeSchedulerConfiguration) { - SetDefaults_KubeSchedulerConfiguration(in) -} - -func SetObjectDefaults_NodeResourcesBalancedAllocationArgs(in *v1beta2.NodeResourcesBalancedAllocationArgs) { - SetDefaults_NodeResourcesBalancedAllocationArgs(in) -} - -func SetObjectDefaults_NodeResourcesFitArgs(in *v1beta2.NodeResourcesFitArgs) { - SetDefaults_NodeResourcesFitArgs(in) -} - -func SetObjectDefaults_PodTopologySpreadArgs(in *v1beta2.PodTopologySpreadArgs) { - SetDefaults_PodTopologySpreadArgs(in) -} - -func SetObjectDefaults_VolumeBindingArgs(in *v1beta2.VolumeBindingArgs) { - SetDefaults_VolumeBindingArgs(in) -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta3/default_plugins.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta3/default_plugins.go index 13d479fece57..41d1a2aa8e3a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta3/default_plugins.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta3/default_plugins.go @@ -92,7 +92,7 @@ type pluginIndex struct { } func mergePluginSet(defaultPluginSet, customPluginSet v1beta3.PluginSet) v1beta3.PluginSet { - disabledPlugins := sets.NewString() + disabledPlugins := sets.New[string]() enabledCustomPlugins := make(map[string]pluginIndex) // replacedPluginIndex is a set of index of plugins, which have replaced the default plugins. replacedPluginIndex := sets.NewInt() diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta3/defaults.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta3/defaults.go index b3816d36a005..374bcb5474e2 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta3/defaults.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta3/defaults.go @@ -55,13 +55,13 @@ func pluginsNames(p *v1beta3.Plugins) []string { p.Permit, p.QueueSort, } - n := sets.NewString() + n := sets.New[string]() for _, e := range extensions { for _, pg := range e.Enabled { n.Insert(pg.Name) } } - return n.List() + return sets.List(n) } func setDefaults_KubeSchedulerProfile(prof *v1beta3.KubeSchedulerProfile) { @@ -69,7 +69,7 @@ func setDefaults_KubeSchedulerProfile(prof *v1beta3.KubeSchedulerProfile) { prof.Plugins = mergePlugins(getDefaultPlugins(), prof.Plugins) // Set default plugin configs. scheme := GetPluginArgConversionScheme() - existingConfigs := sets.NewString() + existingConfigs := sets.New[string]() for j := range prof.PluginConfig { existingConfigs.Insert(prof.PluginConfig[j].Name) args := prof.PluginConfig[j].Args.Object diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta3/zz_generated.conversion.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta3/zz_generated.conversion.go index 841c537c3f7e..a12860fe77b3 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta3/zz_generated.conversion.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta3/zz_generated.conversion.go @@ -466,6 +466,7 @@ func autoConvert_config_KubeSchedulerConfiguration_To_v1beta3_KubeSchedulerConfi out.Profiles = nil } out.Extenders = *(*[]v1beta3.Extender)(unsafe.Pointer(&in.Extenders)) + // WARNING: in.DelayCacheUntilActive requires manual conversion: does not exist in peer-type return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/validation/validation.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/validation/validation.go index e284101607f0..d6153ed5d9d7 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/validation/validation.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/validation/validation.go @@ -33,7 +33,6 @@ import ( componentbasevalidation "k8s.io/component-base/config/validation" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" "k8s.io/kubernetes/pkg/scheduler/apis/config" - "k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta2" "k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta3" ) @@ -142,10 +141,6 @@ type invalidPlugins struct { // Remember to add an entry to that list when creating a new component config // version (even if the list of invalid plugins is empty). var invalidPluginsByVersion = []invalidPlugins{ - { - schemeGroupVersion: v1beta2.SchemeGroupVersion.String(), - plugins: []string{}, - }, { schemeGroupVersion: v1beta3.SchemeGroupVersion.String(), plugins: []string{}, @@ -227,7 +222,7 @@ func validatePluginConfig(path *field.Path, apiVersion string, profile *config.K } } - seenPluginConfig := make(sets.String) + seenPluginConfig := sets.New[string]() for i := range profile.PluginConfig { pluginConfigPath := path.Child("pluginConfig").Index(i) @@ -298,7 +293,7 @@ func validateCommonQueueSort(path *field.Path, profiles []config.KubeSchedulerPr func validateExtenders(fldPath *field.Path, extenders []config.Extender) []error { var errs []error binders := 0 - extenderManagedResources := sets.NewString() + extenderManagedResources := sets.New[string]() for i, extender := range extenders { path := fldPath.Index(i) if len(extender.PrioritizeVerb) > 0 && extender.Weight <= 0 { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/validation/validation_pluginargs.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/validation/validation_pluginargs.go index 325a6bc6779f..15c9ddfaa31a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/validation/validation_pluginargs.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/validation/validation_pluginargs.go @@ -31,7 +31,8 @@ import ( "k8s.io/kubernetes/pkg/scheduler/apis/config" ) -var supportedScoringStrategyTypes = sets.NewString( +// supportedScoringStrategyTypes has to be a set of strings for use with field.Unsupported +var supportedScoringStrategyTypes = sets.New( string(config.LeastAllocated), string(config.MostAllocated), string(config.RequestedToCapacityRatio), @@ -148,13 +149,13 @@ func validateTopologyKey(p *field.Path, v string) field.ErrorList { } func validateWhenUnsatisfiable(p *field.Path, v v1.UnsatisfiableConstraintAction) *field.Error { - supportedScheduleActions := sets.NewString(string(v1.DoNotSchedule), string(v1.ScheduleAnyway)) + supportedScheduleActions := sets.New(string(v1.DoNotSchedule), string(v1.ScheduleAnyway)) if len(v) == 0 { return field.Required(p, "can not be empty") } if !supportedScheduleActions.Has(string(v)) { - return field.NotSupported(p, v, supportedScheduleActions.List()) + return field.NotSupported(p, v, sets.List(supportedScheduleActions)) } return nil } @@ -221,7 +222,7 @@ func validateResources(resources []config.ResourceSpec, p *field.Path) field.Err // ValidateNodeResourcesBalancedAllocationArgs validates that NodeResourcesBalancedAllocationArgs are set correctly. func ValidateNodeResourcesBalancedAllocationArgs(path *field.Path, args *config.NodeResourcesBalancedAllocationArgs) error { var allErrs field.ErrorList - seenResources := sets.NewString() + seenResources := sets.New[string]() for i, resource := range args.Resources { if seenResources.Has(resource.Name) { allErrs = append(allErrs, field.Duplicate(path.Child("resources").Index(i).Child("name"), resource.Name)) @@ -270,7 +271,7 @@ func ValidateVolumeBindingArgs(path *field.Path, args *config.VolumeBindingArgs) }) } -// ValidateVolumeBindingArgs validates that VolumeBindingArgs with scheduler features. +// ValidateVolumeBindingArgsWithOptions validates that VolumeBindingArgs and VolumeBindingArgsValidationOptions with scheduler features. func ValidateVolumeBindingArgsWithOptions(path *field.Path, args *config.VolumeBindingArgs, opts VolumeBindingArgsValidationOptions) error { var allErrs field.ErrorList @@ -313,7 +314,7 @@ func ValidateNodeResourcesFitArgs(path *field.Path, args *config.NodeResourcesFi strategyPath := path.Child("scoringStrategy") if args.ScoringStrategy != nil { if !supportedScoringStrategyTypes.Has(string(args.ScoringStrategy.Type)) { - allErrs = append(allErrs, field.NotSupported(strategyPath.Child("type"), args.ScoringStrategy.Type, supportedScoringStrategyTypes.List())) + allErrs = append(allErrs, field.NotSupported(strategyPath.Child("type"), args.ScoringStrategy.Type, sets.List(supportedScoringStrategyTypes))) } allErrs = append(allErrs, validateResources(args.ScoringStrategy.Resources, strategyPath.Child("resources"))...) if args.ScoringStrategy.RequestedToCapacityRatio != nil { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/eventhandlers.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/eventhandlers.go index 95ed57114b56..15944ceb1630 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/eventhandlers.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/eventhandlers.go @@ -43,9 +43,10 @@ import ( ) func (sched *Scheduler) onStorageClassAdd(obj interface{}) { + logger := sched.logger sc, ok := obj.(*storagev1.StorageClass) if !ok { - klog.ErrorS(nil, "Cannot convert to *storagev1.StorageClass", "obj", obj) + logger.Error(nil, "Cannot convert to *storagev1.StorageClass", "obj", obj) return } @@ -56,42 +57,45 @@ func (sched *Scheduler) onStorageClassAdd(obj interface{}) { // We don't need to invalidate cached results because results will not be // cached for pod that has unbound immediate PVCs. if sc.VolumeBindingMode != nil && *sc.VolumeBindingMode == storagev1.VolumeBindingWaitForFirstConsumer { - sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(queue.StorageClassAdd, nil) + sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(logger, queue.StorageClassAdd, nil) } } func (sched *Scheduler) addNodeToCache(obj interface{}) { + logger := sched.logger node, ok := obj.(*v1.Node) if !ok { - klog.ErrorS(nil, "Cannot convert to *v1.Node", "obj", obj) + logger.Error(nil, "Cannot convert to *v1.Node", "obj", obj) return } - nodeInfo := sched.Cache.AddNode(node) - klog.V(3).InfoS("Add event for node", "node", klog.KObj(node)) - sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(queue.NodeAdd, preCheckForNode(nodeInfo)) + nodeInfo := sched.Cache.AddNode(logger, node) + logger.V(3).Info("Add event for node", "node", klog.KObj(node)) + sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(logger, queue.NodeAdd, preCheckForNode(nodeInfo)) } func (sched *Scheduler) updateNodeInCache(oldObj, newObj interface{}) { + logger := sched.logger oldNode, ok := oldObj.(*v1.Node) if !ok { - klog.ErrorS(nil, "Cannot convert oldObj to *v1.Node", "oldObj", oldObj) + logger.Error(nil, "Cannot convert oldObj to *v1.Node", "oldObj", oldObj) return } newNode, ok := newObj.(*v1.Node) if !ok { - klog.ErrorS(nil, "Cannot convert newObj to *v1.Node", "newObj", newObj) + logger.Error(nil, "Cannot convert newObj to *v1.Node", "newObj", newObj) return } - nodeInfo := sched.Cache.UpdateNode(oldNode, newNode) + nodeInfo := sched.Cache.UpdateNode(logger, oldNode, newNode) // Only requeue unschedulable pods if the node became more schedulable. if event := nodeSchedulingPropertiesChange(newNode, oldNode); event != nil { - sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(*event, preCheckForNode(nodeInfo)) + sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(logger, *event, preCheckForNode(nodeInfo)) } } func (sched *Scheduler) deleteNodeFromCache(obj interface{}) { + logger := sched.logger var node *v1.Node switch t := obj.(type) { case *v1.Node: @@ -100,28 +104,30 @@ func (sched *Scheduler) deleteNodeFromCache(obj interface{}) { var ok bool node, ok = t.Obj.(*v1.Node) if !ok { - klog.ErrorS(nil, "Cannot convert to *v1.Node", "obj", t.Obj) + logger.Error(nil, "Cannot convert to *v1.Node", "obj", t.Obj) return } default: - klog.ErrorS(nil, "Cannot convert to *v1.Node", "obj", t) + logger.Error(nil, "Cannot convert to *v1.Node", "obj", t) return } - klog.V(3).InfoS("Delete event for node", "node", klog.KObj(node)) - if err := sched.Cache.RemoveNode(node); err != nil { - klog.ErrorS(err, "Scheduler cache RemoveNode failed") + logger.V(3).Info("Delete event for node", "node", klog.KObj(node)) + if err := sched.Cache.RemoveNode(logger, node); err != nil { + logger.Error(err, "Scheduler cache RemoveNode failed") } } func (sched *Scheduler) addPodToSchedulingQueue(obj interface{}) { + logger := sched.logger pod := obj.(*v1.Pod) - klog.V(3).InfoS("Add event for unscheduled pod", "pod", klog.KObj(pod)) - if err := sched.SchedulingQueue.Add(pod); err != nil { + logger.V(3).Info("Add event for unscheduled pod", "pod", klog.KObj(pod)) + if err := sched.SchedulingQueue.Add(logger, pod); err != nil { utilruntime.HandleError(fmt.Errorf("unable to queue %T: %v", obj, err)) } } func (sched *Scheduler) updatePodInSchedulingQueue(oldObj, newObj interface{}) { + logger := sched.logger oldPod, newPod := oldObj.(*v1.Pod), newObj.(*v1.Pod) // Bypass update event that carries identical objects; otherwise, a duplicated // Pod may go through scheduling and cause unexpected behavior (see #96071). @@ -137,12 +143,13 @@ func (sched *Scheduler) updatePodInSchedulingQueue(oldObj, newObj interface{}) { return } - if err := sched.SchedulingQueue.Update(oldPod, newPod); err != nil { + if err := sched.SchedulingQueue.Update(logger, oldPod, newPod); err != nil { utilruntime.HandleError(fmt.Errorf("unable to update %T: %v", newObj, err)) } } func (sched *Scheduler) deletePodFromSchedulingQueue(obj interface{}) { + logger := sched.logger var pod *v1.Pod switch t := obj.(type) { case *v1.Pod: @@ -158,7 +165,7 @@ func (sched *Scheduler) deletePodFromSchedulingQueue(obj interface{}) { utilruntime.HandleError(fmt.Errorf("unable to handle object in %T: %T", sched, obj)) return } - klog.V(3).InfoS("Delete event for unscheduled pod", "pod", klog.KObj(pod)) + logger.V(3).Info("Delete event for unscheduled pod", "pod", klog.KObj(pod)) if err := sched.SchedulingQueue.Delete(pod); err != nil { utilruntime.HandleError(fmt.Errorf("unable to dequeue %T: %v", obj, err)) } @@ -166,53 +173,56 @@ func (sched *Scheduler) deletePodFromSchedulingQueue(obj interface{}) { if err != nil { // This shouldn't happen, because we only accept for scheduling the pods // which specify a scheduler name that matches one of the profiles. - klog.ErrorS(err, "Unable to get profile", "pod", klog.KObj(pod)) + logger.Error(err, "Unable to get profile", "pod", klog.KObj(pod)) return } // If a waiting pod is rejected, it indicates it's previously assumed and we're // removing it from the scheduler cache. In this case, signal a AssignedPodDelete // event to immediately retry some unscheduled Pods. if fwk.RejectWaitingPod(pod.UID) { - sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(queue.AssignedPodDelete, nil) + sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(logger, queue.AssignedPodDelete, nil) } } func (sched *Scheduler) addPodToCache(obj interface{}) { + logger := sched.logger pod, ok := obj.(*v1.Pod) if !ok { - klog.ErrorS(nil, "Cannot convert to *v1.Pod", "obj", obj) + logger.Error(nil, "Cannot convert to *v1.Pod", "obj", obj) return } - klog.V(3).InfoS("Add event for scheduled pod", "pod", klog.KObj(pod)) + logger.V(3).Info("Add event for scheduled pod", "pod", klog.KObj(pod)) - if err := sched.Cache.AddPod(pod); err != nil { - klog.ErrorS(err, "Scheduler cache AddPod failed", "pod", klog.KObj(pod)) + if err := sched.Cache.AddPod(logger, pod); err != nil { + logger.Error(err, "Scheduler cache AddPod failed", "pod", klog.KObj(pod)) } - sched.SchedulingQueue.AssignedPodAdded(pod) + sched.SchedulingQueue.AssignedPodAdded(logger, pod) } func (sched *Scheduler) updatePodInCache(oldObj, newObj interface{}) { + logger := sched.logger oldPod, ok := oldObj.(*v1.Pod) if !ok { - klog.ErrorS(nil, "Cannot convert oldObj to *v1.Pod", "oldObj", oldObj) + logger.Error(nil, "Cannot convert oldObj to *v1.Pod", "oldObj", oldObj) return } newPod, ok := newObj.(*v1.Pod) if !ok { - klog.ErrorS(nil, "Cannot convert newObj to *v1.Pod", "newObj", newObj) + logger.Error(nil, "Cannot convert newObj to *v1.Pod", "newObj", newObj) return } - klog.V(4).InfoS("Update event for scheduled pod", "pod", klog.KObj(oldPod)) + logger.V(4).Info("Update event for scheduled pod", "pod", klog.KObj(oldPod)) - if err := sched.Cache.UpdatePod(oldPod, newPod); err != nil { - klog.ErrorS(err, "Scheduler cache UpdatePod failed", "pod", klog.KObj(oldPod)) + if err := sched.Cache.UpdatePod(logger, oldPod, newPod); err != nil { + logger.Error(err, "Scheduler cache UpdatePod failed", "pod", klog.KObj(oldPod)) } - sched.SchedulingQueue.AssignedPodUpdated(newPod) + sched.SchedulingQueue.AssignedPodUpdated(logger, newPod) } func (sched *Scheduler) deletePodFromCache(obj interface{}) { + logger := sched.logger var pod *v1.Pod switch t := obj.(type) { case *v1.Pod: @@ -221,19 +231,19 @@ func (sched *Scheduler) deletePodFromCache(obj interface{}) { var ok bool pod, ok = t.Obj.(*v1.Pod) if !ok { - klog.ErrorS(nil, "Cannot convert to *v1.Pod", "obj", t.Obj) + logger.Error(nil, "Cannot convert to *v1.Pod", "obj", t.Obj) return } default: - klog.ErrorS(nil, "Cannot convert to *v1.Pod", "obj", t) + logger.Error(nil, "Cannot convert to *v1.Pod", "obj", t) return } - klog.V(3).InfoS("Delete event for scheduled pod", "pod", klog.KObj(pod)) - if err := sched.Cache.RemovePod(pod); err != nil { - klog.ErrorS(err, "Scheduler cache RemovePod failed", "pod", klog.KObj(pod)) + logger.V(3).Info("Delete event for scheduled pod", "pod", klog.KObj(pod)) + if err := sched.Cache.RemovePod(logger, pod); err != nil { + logger.Error(err, "Scheduler cache RemovePod failed", "pod", klog.KObj(pod)) } - sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(queue.AssignedPodDelete, nil) + sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(logger, queue.AssignedPodDelete, nil) } // assignedPod selects pods that are assigned (scheduled and running). @@ -317,24 +327,25 @@ func addAllEventHandlers( }, ) + logger := sched.logger buildEvtResHandler := func(at framework.ActionType, gvk framework.GVK, shortGVK string) cache.ResourceEventHandlerFuncs { funcs := cache.ResourceEventHandlerFuncs{} if at&framework.Add != 0 { evt := framework.ClusterEvent{Resource: gvk, ActionType: framework.Add, Label: fmt.Sprintf("%vAdd", shortGVK)} funcs.AddFunc = func(_ interface{}) { - sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(evt, nil) + sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(logger, evt, nil) } } if at&framework.Update != 0 { evt := framework.ClusterEvent{Resource: gvk, ActionType: framework.Update, Label: fmt.Sprintf("%vUpdate", shortGVK)} funcs.UpdateFunc = func(_, _ interface{}) { - sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(evt, nil) + sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(logger, evt, nil) } } if at&framework.Delete != 0 { evt := framework.ClusterEvent{Resource: gvk, ActionType: framework.Delete, Label: fmt.Sprintf("%vDelete", shortGVK)} funcs.DeleteFunc = func(_ interface{}) { - sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(evt, nil) + sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(logger, evt, nil) } } return funcs @@ -402,7 +413,7 @@ func addAllEventHandlers( informerFactory.Storage().V1().StorageClasses().Informer().AddEventHandler( cache.ResourceEventHandlerFuncs{ UpdateFunc: func(_, _ interface{}) { - sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(queue.StorageClassUpdate, nil) + sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(logger, queue.StorageClassUpdate, nil) }, }, ) @@ -421,7 +432,7 @@ func addAllEventHandlers( // - foos.v1 (2 sections) // - foo.v1.example.com (the first section should be plural) if strings.Count(string(gvk), ".") < 2 { - klog.ErrorS(nil, "incorrect event registration", "gvk", gvk) + logger.Error(nil, "incorrect event registration", "gvk", gvk) continue } // Fall back to try dynamic informers. diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/extender.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/extender.go index 8350e04c95af..7ccb9e6ca691 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/extender.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/extender.go @@ -48,7 +48,7 @@ type HTTPExtender struct { weight int64 client *http.Client nodeCacheCapable bool - managedResources sets.String + managedResources sets.Set[string] ignorable bool } @@ -96,7 +96,7 @@ func NewHTTPExtender(config *schedulerapi.Extender) (framework.Extender, error) Transport: transport, Timeout: config.HTTPTimeout.Duration, } - managedResources := sets.NewString() + managedResources := sets.New[string]() for _, r := range config.ManagedResources { managedResources.Insert(string(r.Name)) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/interface.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/interface.go index 374efa16ee81..43ba35674825 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/interface.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/interface.go @@ -35,6 +35,7 @@ import ( clientset "k8s.io/client-go/kubernetes" restclient "k8s.io/client-go/rest" "k8s.io/client-go/tools/events" + "k8s.io/klog/v2" "k8s.io/kubernetes/pkg/scheduler/apis/config" "k8s.io/kubernetes/pkg/scheduler/framework/parallelize" ) @@ -641,7 +642,7 @@ type Handle interface { type PreFilterResult struct { // The set of nodes that should be considered downstream; if nil then // all nodes are eligible. - NodeNames sets.String + NodeNames sets.Set[string] } func (p *PreFilterResult) AllNodes() bool { @@ -704,11 +705,11 @@ func (ni *NominatingInfo) Mode() NominatingMode { type PodNominator interface { // AddNominatedPod adds the given pod to the nominator or // updates it if it already exists. - AddNominatedPod(pod *PodInfo, nominatingInfo *NominatingInfo) + AddNominatedPod(logger klog.Logger, pod *PodInfo, nominatingInfo *NominatingInfo) // DeleteNominatedPodIfExists deletes nominatedPod from internal cache. It's a no-op if it doesn't exist. DeleteNominatedPodIfExists(pod *v1.Pod) // UpdateNominatedPod updates the with . - UpdateNominatedPod(oldPod *v1.Pod, newPodInfo *PodInfo) + UpdateNominatedPod(logger klog.Logger, oldPod *v1.Pod, newPodInfo *PodInfo) // NominatedPodsForNode returns nominatedPods on the given node. NominatedPodsForNode(nodeName string) []*PodInfo } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultpreemption/default_preemption.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultpreemption/default_preemption.go index b096f93ed147..a2d20968d110 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultpreemption/default_preemption.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultpreemption/default_preemption.go @@ -142,6 +142,7 @@ func (pl *DefaultPreemption) SelectVictimsOnNode( pod *v1.Pod, nodeInfo *framework.NodeInfo, pdbs []*policy.PodDisruptionBudget) ([]*v1.Pod, int, *framework.Status) { + logger := klog.FromContext(ctx) var potentialVictims []*framework.PodInfo removePod := func(rpi *framework.PodInfo) error { if err := nodeInfo.RemovePod(rpi.Pod); err != nil { @@ -207,7 +208,7 @@ func (pl *DefaultPreemption) SelectVictimsOnNode( } rpi := pi.Pod victims = append(victims, rpi) - klog.V(5).InfoS("Pod is a potential preemption victim on node", "pod", klog.KObj(rpi), "node", klog.KObj(nodeInfo.Node())) + logger.V(5).Info("Pod is a potential preemption victim on node", "pod", klog.KObj(rpi), "node", klog.KObj(nodeInfo.Node())) } return fits, nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/interpodaffinity/filtering.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/interpodaffinity/filtering.go index 02a7c7926930..b5a83e810489 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/interpodaffinity/filtering.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/interpodaffinity/filtering.go @@ -23,7 +23,6 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/labels" - "k8s.io/klog/v2" "k8s.io/kubernetes/pkg/scheduler/framework" ) @@ -158,10 +157,7 @@ func (pl *InterPodAffinity) getExistingAntiAffinityCounts(ctx context.Context, p processNode := func(i int) { nodeInfo := nodes[i] node := nodeInfo.Node() - if node == nil { - klog.ErrorS(nil, "Node not found") - return - } + topoMap := make(topologyToMatchedTermCount) for _, existingPod := range nodeInfo.PodsWithRequiredAntiAffinity { topoMap.updateWithAntiAffinityTerms(existingPod.RequiredAntiAffinityTerms, pod, nsLabels, node, 1) @@ -197,10 +193,7 @@ func (pl *InterPodAffinity) getIncomingAffinityAntiAffinityCounts(ctx context.Co processNode := func(i int) { nodeInfo := allNodes[i] node := nodeInfo.Node() - if node == nil { - klog.ErrorS(nil, "Node not found") - return - } + affinity := make(topologyToMatchedTermCount) antiAffinity := make(topologyToMatchedTermCount) for _, existingPod := range nodeInfo.Pods { @@ -369,9 +362,6 @@ func satisfyPodAffinity(state *preFilterState, nodeInfo *framework.NodeInfo) boo // Filter invoked at the filter extension point. // It checks if a pod can be scheduled on the specified node with pod affinity/anti-affinity configuration. func (pl *InterPodAffinity) Filter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status { - if nodeInfo.Node() == nil { - return framework.NewStatus(framework.Error, "node not found") - } state, err := getPreFilterState(cycleState) if err != nil { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/interpodaffinity/scoring.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/interpodaffinity/scoring.go index fd4cc24327df..10c6d25f01a3 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/interpodaffinity/scoring.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/interpodaffinity/scoring.go @@ -131,7 +131,7 @@ func (pl *InterPodAffinity) PreScore( ) *framework.Status { if len(nodes) == 0 { // No nodes to score. - return nil + return framework.NewStatus(framework.Skip) } if pl.sharedLister == nil { @@ -141,21 +141,19 @@ func (pl *InterPodAffinity) PreScore( affinity := pod.Spec.Affinity hasPreferredAffinityConstraints := affinity != nil && affinity.PodAffinity != nil && len(affinity.PodAffinity.PreferredDuringSchedulingIgnoredDuringExecution) > 0 hasPreferredAntiAffinityConstraints := affinity != nil && affinity.PodAntiAffinity != nil && len(affinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution) > 0 + hasConstraints := hasPreferredAffinityConstraints || hasPreferredAntiAffinityConstraints // Optionally ignore calculating preferences of existing pods' affinity rules // if the incoming pod has no inter-pod affinities. - if pl.args.IgnorePreferredTermsOfExistingPods && !hasPreferredAffinityConstraints && !hasPreferredAntiAffinityConstraints { - cycleState.Write(preScoreStateKey, &preScoreState{ - topologyScore: make(map[string]map[string]int64), - }) - return nil + if pl.args.IgnorePreferredTermsOfExistingPods && !hasConstraints { + return framework.NewStatus(framework.Skip) } // Unless the pod being scheduled has preferred affinity terms, we only // need to process nodes hosting pods with affinity. var allNodes []*framework.NodeInfo var err error - if hasPreferredAffinityConstraints || hasPreferredAntiAffinityConstraints { + if hasConstraints { allNodes, err = pl.sharedLister.NodeInfos().List() if err != nil { return framework.AsStatus(fmt.Errorf("failed to get all nodes from shared lister: %w", err)) @@ -192,13 +190,11 @@ func (pl *InterPodAffinity) PreScore( index := int32(-1) processNode := func(i int) { nodeInfo := allNodes[i] - if nodeInfo.Node() == nil { - return - } + // Unless the pod being scheduled has preferred affinity terms, we only // need to process pods with affinity in the node. podsToProcess := nodeInfo.PodsWithAffinity - if hasPreferredAffinityConstraints || hasPreferredAntiAffinityConstraints { + if hasConstraints { // We need to process all the pods. podsToProcess = nodeInfo.Pods } @@ -213,6 +209,10 @@ func (pl *InterPodAffinity) PreScore( } pl.parallelizer.Until(pCtx, len(allNodes), processNode, pl.Name()) + if index == -1 { + return framework.NewStatus(framework.Skip) + } + for i := 0; i <= int(index); i++ { state.topologyScore.append(topoScores[i]) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeaffinity/node_affinity.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeaffinity/node_affinity.go index d966e53d1d17..871fd924ea45 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeaffinity/node_affinity.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeaffinity/node_affinity.go @@ -107,14 +107,14 @@ func (pl *NodeAffinity) PreFilter(ctx context.Context, cycleState *framework.Cyc // Check if there is affinity to a specific node and return it. terms := affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms - var nodeNames sets.String + var nodeNames sets.Set[string] for _, t := range terms { - var termNodeNames sets.String + var termNodeNames sets.Set[string] for _, r := range t.MatchFields { if r.Key == metav1.ObjectNameField && r.Operator == v1.NodeSelectorOpIn { // The requirements represent ANDed constraints, and so we need to // find the intersection of nodes. - s := sets.NewString(r.Values...) + s := sets.New(r.Values...) if termNodeNames == nil { termNodeNames = s } else { @@ -149,9 +149,7 @@ func (pl *NodeAffinity) PreFilterExtensions() framework.PreFilterExtensions { // the plugin's added affinity. func (pl *NodeAffinity) Filter(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status { node := nodeInfo.Node() - if node == nil { - return framework.NewStatus(framework.Error, "node not found") - } + if pl.addedNodeSelector != nil && !pl.addedNodeSelector.Match(node) { return framework.NewStatus(framework.UnschedulableAndUnresolvable, errReasonEnforced) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodename/node_name.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodename/node_name.go index e02134f793c4..1f0a0f388df5 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodename/node_name.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodename/node_name.go @@ -54,9 +54,7 @@ func (pl *NodeName) Name() string { // Filter invoked at the filter extension point. func (pl *NodeName) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status { - if nodeInfo.Node() == nil { - return framework.NewStatus(framework.Error, "node not found") - } + if !Fits(pod, nodeInfo) { return framework.NewStatus(framework.UnschedulableAndUnresolvable, ErrReason) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/balanced_allocation.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/balanced_allocation.go index ef6b8723b652..f375be2d42f6 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/balanced_allocation.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/balanced_allocation.go @@ -105,7 +105,7 @@ func (ba *BalancedAllocation) Score(ctx context.Context, state *framework.CycleS // Detail: score = (1 - std) * MaxNodeScore, where std is calculated by the root square of Σ((fraction(i)-mean)^2)/len(resources) // The algorithm is partly inspired by: // "Wei Huang et al. An Energy Efficient Virtual Machine Placement Algorithm with Balanced Resource Utilization" - return ba.score(pod, nodeInfo, s.podRequests) + return ba.score(ctx, pod, nodeInfo, s.podRequests) } // ScoreExtensions of the Score plugin. diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/fit.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/fit.go index 81edad6e2777..3f4a352183d6 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/fit.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/fit.go @@ -24,7 +24,6 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/kubernetes/pkg/api/v1/resource" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" "k8s.io/kubernetes/pkg/scheduler/apis/config" @@ -82,8 +81,8 @@ var nodeResourceStrategyTypeMap = map[config.ScoringStrategyType]scorer{ // Fit is a plugin that checks if a node has sufficient resources. type Fit struct { - ignoredResources sets.String - ignoredResourceGroups sets.String + ignoredResources sets.Set[string] + ignoredResourceGroups sets.Set[string] enableInPlacePodVerticalScaling bool handle framework.Handle resourceAllocationScorer @@ -165,8 +164,8 @@ func NewFit(plArgs runtime.Object, h framework.Handle, fts feature.Features) (fr } return &Fit{ - ignoredResources: sets.NewString(args.IgnoredResources...), - ignoredResourceGroups: sets.NewString(args.IgnoredResourceGroups...), + ignoredResources: sets.New(args.IgnoredResources...), + ignoredResourceGroups: sets.New(args.IgnoredResourceGroups...), enableInPlacePodVerticalScaling: fts.EnableInPlacePodVerticalScaling, handle: h, resourceAllocationScorer: *scorePlugin(args), @@ -286,7 +285,7 @@ func Fits(pod *v1.Pod, nodeInfo *framework.NodeInfo) []InsufficientResource { return fitsRequest(computePodResourceRequest(pod), nodeInfo, nil, nil) } -func fitsRequest(podRequest *preFilterState, nodeInfo *framework.NodeInfo, ignoredExtendedResources, ignoredResourceGroups sets.String) []InsufficientResource { +func fitsRequest(podRequest *preFilterState, nodeInfo *framework.NodeInfo, ignoredExtendedResources, ignoredResourceGroups sets.Set[string]) []InsufficientResource { insufficientResources := make([]InsufficientResource, 0, 4) allowedPodNumber := nodeInfo.Allocatable.AllowedPodNumber @@ -307,7 +306,7 @@ func fitsRequest(podRequest *preFilterState, nodeInfo *framework.NodeInfo, ignor return insufficientResources } - if podRequest.MilliCPU > (nodeInfo.Allocatable.MilliCPU - nodeInfo.Requested.MilliCPU) { + if podRequest.MilliCPU > 0 && podRequest.MilliCPU > (nodeInfo.Allocatable.MilliCPU-nodeInfo.Requested.MilliCPU) { insufficientResources = append(insufficientResources, InsufficientResource{ ResourceName: v1.ResourceCPU, Reason: "Insufficient cpu", @@ -316,7 +315,7 @@ func fitsRequest(podRequest *preFilterState, nodeInfo *framework.NodeInfo, ignor Capacity: nodeInfo.Allocatable.MilliCPU, }) } - if podRequest.Memory > (nodeInfo.Allocatable.Memory - nodeInfo.Requested.Memory) { + if podRequest.Memory > 0 && podRequest.Memory > (nodeInfo.Allocatable.Memory-nodeInfo.Requested.Memory) { insufficientResources = append(insufficientResources, InsufficientResource{ ResourceName: v1.ResourceMemory, Reason: "Insufficient memory", @@ -325,7 +324,8 @@ func fitsRequest(podRequest *preFilterState, nodeInfo *framework.NodeInfo, ignor Capacity: nodeInfo.Allocatable.Memory, }) } - if podRequest.EphemeralStorage > (nodeInfo.Allocatable.EphemeralStorage - nodeInfo.Requested.EphemeralStorage) { + if podRequest.EphemeralStorage > 0 && + podRequest.EphemeralStorage > (nodeInfo.Allocatable.EphemeralStorage-nodeInfo.Requested.EphemeralStorage) { insufficientResources = append(insufficientResources, InsufficientResource{ ResourceName: v1.ResourceEphemeralStorage, Reason: "Insufficient ephemeral-storage", @@ -381,5 +381,5 @@ func (f *Fit) Score(ctx context.Context, state *framework.CycleState, pod *v1.Po } } - return f.score(pod, nodeInfo, s.podRequests) + return f.score(ctx, pod, nodeInfo, s.podRequests) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/resource_allocation.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/resource_allocation.go index 68e4433f9185..863647bd0af6 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/resource_allocation.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/resource_allocation.go @@ -17,6 +17,8 @@ limitations under the License. package noderesources import ( + "context" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" utilfeature "k8s.io/apiserver/pkg/util/feature" @@ -44,13 +46,13 @@ type resourceAllocationScorer struct { // score will use `scorer` function to calculate the score. func (r *resourceAllocationScorer) score( + ctx context.Context, pod *v1.Pod, nodeInfo *framework.NodeInfo, podRequests []int64) (int64, *framework.Status) { + logger := klog.FromContext(ctx) node := nodeInfo.Node() - if node == nil { - return 0, framework.NewStatus(framework.Error, "node not found") - } + // resources not set, nothing scheduled, if len(r.resources) == 0 { return 0, framework.NewStatus(framework.Error, "resources not found") @@ -59,7 +61,7 @@ func (r *resourceAllocationScorer) score( requested := make([]int64, len(r.resources)) allocatable := make([]int64, len(r.resources)) for i := range r.resources { - alloc, req := r.calculateResourceAllocatableRequest(nodeInfo, v1.ResourceName(r.resources[i].Name), podRequests[i]) + alloc, req := r.calculateResourceAllocatableRequest(logger, nodeInfo, v1.ResourceName(r.resources[i].Name), podRequests[i]) // Only fill the extended resource entry when it's non-zero. if alloc == 0 { continue @@ -70,8 +72,8 @@ func (r *resourceAllocationScorer) score( score := r.scorer(requested, allocatable) - if klogV := klog.V(10); klogV.Enabled() { // Serializing these maps is costly. - klogV.InfoS("Listing internal info for allocatable resources, requested resources and score", "pod", + if loggerV := logger.V(10); loggerV.Enabled() { // Serializing these maps is costly. + loggerV.Info("Listed internal info for allocatable resources, requested resources and score", "pod", klog.KObj(pod), "node", klog.KObj(node), "resourceAllocationScorer", r.Name, "allocatableResource", allocatable, "requestedResource", requested, "resourceScore", score, ) @@ -84,7 +86,7 @@ func (r *resourceAllocationScorer) score( // - 1st param: quantity of allocatable resource on the node. // - 2nd param: aggregated quantity of requested resource on the node. // Note: if it's an extended resource, and the pod doesn't request it, (0, 0) is returned. -func (r *resourceAllocationScorer) calculateResourceAllocatableRequest(nodeInfo *framework.NodeInfo, resource v1.ResourceName, podRequest int64) (int64, int64) { +func (r *resourceAllocationScorer) calculateResourceAllocatableRequest(logger klog.Logger, nodeInfo *framework.NodeInfo, resource v1.ResourceName, podRequest int64) (int64, int64) { requested := nodeInfo.NonZeroRequested if r.useRequested { requested = nodeInfo.Requested @@ -107,7 +109,7 @@ func (r *resourceAllocationScorer) calculateResourceAllocatableRequest(nodeInfo return nodeInfo.Allocatable.ScalarResources[resource], (nodeInfo.Requested.ScalarResources[resource] + podRequest) } } - klog.V(10).InfoS("Requested resource is omitted for node score calculation", "resourceName", resource) + logger.V(10).Info("Requested resource is omitted for node score calculation", "resourceName", resource) return 0, 0 } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeunschedulable/node_unschedulable.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeunschedulable/node_unschedulable.go index 47231837e5cb..47844847ff82 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeunschedulable/node_unschedulable.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeunschedulable/node_unschedulable.go @@ -60,9 +60,7 @@ func (pl *NodeUnschedulable) Name() string { // Filter invoked at the filter extension point. func (pl *NodeUnschedulable) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status { node := nodeInfo.Node() - if node == nil { - return framework.NewStatus(framework.UnschedulableAndUnresolvable, ErrReasonUnknownCondition) - } + // If pod tolerate unschedulable taint, it's also tolerate `node.Spec.Unschedulable`. podToleratesUnschedulable := v1helper.TolerationsTolerateTaint(pod.Spec.Tolerations, &v1.Taint{ Key: v1.TaintNodeUnschedulable, diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodevolumelimits/csi.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodevolumelimits/csi.go index ab2d335f6afc..8d345d602eef 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodevolumelimits/csi.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodevolumelimits/csi.go @@ -60,6 +60,7 @@ type CSILimits struct { translator InTreeToCSITranslator } +var _ framework.PreFilterPlugin = &CSILimits{} var _ framework.FilterPlugin = &CSILimits{} var _ framework.EnqueueExtensions = &CSILimits{} @@ -80,6 +81,26 @@ func (pl *CSILimits) EventsToRegister() []framework.ClusterEvent { } } +// PreFilter invoked at the prefilter extension point +// +// If the pod haven't those types of volumes, we'll skip the Filter phase +func (pl *CSILimits) PreFilter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod) (*framework.PreFilterResult, *framework.Status) { + volumes := pod.Spec.Volumes + for i := range volumes { + vol := &volumes[i] + if vol.PersistentVolumeClaim != nil || vol.Ephemeral != nil || pl.translator.IsInlineMigratable(vol) { + return nil, nil + } + } + + return nil, framework.NewStatus(framework.Skip) +} + +// PreFilterExtensions returns prefilter extensions, pod add and remove. +func (pl *CSILimits) PreFilterExtensions() framework.PreFilterExtensions { + return nil +} + // Filter invoked at the filter extension point. func (pl *CSILimits) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status { // If the new pod doesn't have any volume attached to it, the predicate will always be true @@ -88,9 +109,6 @@ func (pl *CSILimits) Filter(ctx context.Context, _ *framework.CycleState, pod *v } node := nodeInfo.Node() - if node == nil { - return framework.NewStatus(framework.Error, "node not found") - } // If CSINode doesn't exist, the predicate may read the limits from Node object csiNode, err := pl.csiNodeLister.Get(node.Name) @@ -172,7 +190,7 @@ func (pl *CSILimits) filterAttachableVolumes( // - If the volume is migratable and CSI migration is enabled, need to count it // as well. // - If the volume is not migratable, it will be count in non_csi filter. - if err := pl.checkAttachableInlineVolume(vol, csiNode, pod, result); err != nil { + if err := pl.checkAttachableInlineVolume(&vol, csiNode, pod, result); err != nil { return err } @@ -220,15 +238,15 @@ func (pl *CSILimits) filterAttachableVolumes( // checkAttachableInlineVolume takes an inline volume and add to the result map if the // volume is migratable and CSI migration for this plugin has been enabled. -func (pl *CSILimits) checkAttachableInlineVolume(vol v1.Volume, csiNode *storagev1.CSINode, +func (pl *CSILimits) checkAttachableInlineVolume(vol *v1.Volume, csiNode *storagev1.CSINode, pod *v1.Pod, result map[string]string) error { - if !pl.translator.IsInlineMigratable(&vol) { + if !pl.translator.IsInlineMigratable(vol) { return nil } // Check if the intree provisioner CSI migration has been enabled. - inTreeProvisionerName, err := pl.translator.GetInTreePluginNameFromSpec(nil, &vol) + inTreeProvisionerName, err := pl.translator.GetInTreePluginNameFromSpec(nil, vol) if err != nil { - return fmt.Errorf("looking up provisioner name for volume %v: %w", vol, err) + return fmt.Errorf("looking up provisioner name for volume %s: %w", vol.Name, err) } if !isCSIMigrationOn(csiNode, inTreeProvisionerName) { csiNodeName := "" @@ -240,9 +258,9 @@ func (pl *CSILimits) checkAttachableInlineVolume(vol v1.Volume, csiNode *storage return nil } // Do translation for the in-tree volume. - translatedPV, err := pl.translator.TranslateInTreeInlineVolumeToCSI(&vol, pod.Namespace) + translatedPV, err := pl.translator.TranslateInTreeInlineVolumeToCSI(vol, pod.Namespace) if err != nil || translatedPV == nil { - return fmt.Errorf("converting volume(%v) from inline to csi: %w", vol, err) + return fmt.Errorf("converting volume(%s) from inline to csi: %w", vol.Name, err) } driverName, err := pl.translator.GetCSINameFromInTreeName(inTreeProvisionerName) if err != nil { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodevolumelimits/non_csi.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodevolumelimits/non_csi.go index 4a2535e5f26b..03a012eb0f30 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodevolumelimits/non_csi.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodevolumelimits/non_csi.go @@ -119,6 +119,7 @@ type nonCSILimits struct { randomVolumeIDPrefix string } +var _ framework.PreFilterPlugin = &nonCSILimits{} var _ framework.FilterPlugin = &nonCSILimits{} var _ framework.EnqueueExtensions = &nonCSILimits{} @@ -208,6 +209,27 @@ func (pl *nonCSILimits) EventsToRegister() []framework.ClusterEvent { } } +// PreFilter invoked at the prefilter extension point +// +// If the pod haven't those types of volumes, we'll skip the Filter phase +func (pl *nonCSILimits) PreFilter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod) (*framework.PreFilterResult, *framework.Status) { + volumes := pod.Spec.Volumes + for i := range volumes { + vol := &volumes[i] + _, ok := pl.filter.FilterVolume(vol) + if ok || vol.PersistentVolumeClaim != nil || vol.Ephemeral != nil { + return nil, nil + } + } + + return nil, framework.NewStatus(framework.Skip) +} + +// PreFilterExtensions returns prefilter extensions, pod add and remove. +func (pl *nonCSILimits) PreFilterExtensions() framework.PreFilterExtensions { + return nil +} + // Filter invoked at the filter extension point. func (pl *nonCSILimits) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status { // If a pod doesn't have any volume attached to it, the predicate will always be true. @@ -216,7 +238,7 @@ func (pl *nonCSILimits) Filter(ctx context.Context, _ *framework.CycleState, pod return nil } - newVolumes := make(sets.String) + newVolumes := sets.New[string]() if err := pl.filterVolumes(pod, true /* new pod */, newVolumes); err != nil { return framework.AsStatus(err) } @@ -227,9 +249,6 @@ func (pl *nonCSILimits) Filter(ctx context.Context, _ *framework.CycleState, pod } node := nodeInfo.Node() - if node == nil { - return framework.NewStatus(framework.Error, "node not found") - } var csiNode *storage.CSINode var err error @@ -248,7 +267,7 @@ func (pl *nonCSILimits) Filter(ctx context.Context, _ *framework.CycleState, pod } // count unique volumes - existingVolumes := make(sets.String) + existingVolumes := sets.New[string]() for _, existingPod := range nodeInfo.Pods { if err := pl.filterVolumes(existingPod.Pod, false /* existing pod */, existingVolumes); err != nil { return framework.AsStatus(err) @@ -274,7 +293,7 @@ func (pl *nonCSILimits) Filter(ctx context.Context, _ *framework.CycleState, pod return nil } -func (pl *nonCSILimits) filterVolumes(pod *v1.Pod, newPod bool, filteredVolumes sets.String) error { +func (pl *nonCSILimits) filterVolumes(pod *v1.Pod, newPod bool, filteredVolumes sets.Set[string]) error { volumes := pod.Spec.Volumes for i := range volumes { vol := &volumes[i] diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodevolumelimits/utils.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodevolumelimits/utils.go index 07f388cf94d7..8a884bb2cff6 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodevolumelimits/utils.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodevolumelimits/utils.go @@ -68,13 +68,13 @@ func isCSIMigrationOn(csiNode *storagev1.CSINode, pluginName string) bool { return false } - var mpaSet sets.String + var mpaSet sets.Set[string] mpa := csiNodeAnn[v1.MigratedPluginsAnnotationKey] if len(mpa) == 0 { - mpaSet = sets.NewString() + mpaSet = sets.New[string]() } else { tok := strings.Split(mpa, ",") - mpaSet = sets.NewString(tok...) + mpaSet = sets.New(tok...) } return mpaSet.Has(pluginName) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread/filtering.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread/filtering.go index bd0d5a92f30f..68cdc2199fb3 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread/filtering.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread/filtering.go @@ -152,7 +152,10 @@ func (pl *PodTopologySpread) PreFilter(ctx context.Context, cycleState *framewor s, err := pl.calPreFilterState(ctx, pod) if err != nil { return nil, framework.AsStatus(err) + } else if s != nil && len(s.Constraints) == 0 { + return nil, framework.NewStatus(framework.Skip) } + cycleState.Write(preFilterStateKey, s) return nil, nil } @@ -236,11 +239,8 @@ func getPreFilterState(cycleState *framework.CycleState) (*preFilterState, error // calPreFilterState computes preFilterState describing how pods are spread on topologies. func (pl *PodTopologySpread) calPreFilterState(ctx context.Context, pod *v1.Pod) (*preFilterState, error) { - allNodes, err := pl.sharedLister.NodeInfos().List() - if err != nil { - return nil, fmt.Errorf("listing NodeInfos: %w", err) - } var constraints []topologySpreadConstraint + var err error if len(pod.Spec.TopologySpreadConstraints) > 0 { // We have feature gating in APIServer to strip the spec // so don't need to re-check feature gate, just check length of Constraints. @@ -262,6 +262,11 @@ func (pl *PodTopologySpread) calPreFilterState(ctx context.Context, pod *v1.Pod) return &preFilterState{}, nil } + allNodes, err := pl.sharedLister.NodeInfos().List() + if err != nil { + return nil, fmt.Errorf("listing NodeInfos: %w", err) + } + s := preFilterState{ Constraints: constraints, TpKeyToCriticalPaths: make(map[string]*criticalPaths, len(constraints)), @@ -273,10 +278,6 @@ func (pl *PodTopologySpread) calPreFilterState(ctx context.Context, pod *v1.Pod) processNode := func(i int) { nodeInfo := allNodes[i] node := nodeInfo.Node() - if node == nil { - klog.ErrorS(nil, "Node not found") - return - } if !pl.enableNodeInclusionPolicyInPodTopologySpread { // spreading is applied to nodes that pass those filters. @@ -333,9 +334,6 @@ func (pl *PodTopologySpread) calPreFilterState(ctx context.Context, pod *v1.Pod) // Filter invoked at the filter extension point. func (pl *PodTopologySpread) Filter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status { node := nodeInfo.Node() - if node == nil { - return framework.AsStatus(fmt.Errorf("node not found")) - } s, err := getPreFilterState(cycleState) if err != nil { @@ -347,12 +345,13 @@ func (pl *PodTopologySpread) Filter(ctx context.Context, cycleState *framework.C return nil } + logger := klog.FromContext(ctx) podLabelSet := labels.Set(pod.Labels) for _, c := range s.Constraints { tpKey := c.TopologyKey tpVal, ok := node.Labels[c.TopologyKey] if !ok { - klog.V(5).InfoS("Node doesn't have required label", "node", klog.KObj(node), "label", tpKey) + logger.V(5).Info("Node doesn't have required label", "node", klog.KObj(node), "label", tpKey) return framework.NewStatus(framework.UnschedulableAndUnresolvable, ErrReasonNodeLabelNotMatch) } @@ -360,7 +359,7 @@ func (pl *PodTopologySpread) Filter(ctx context.Context, cycleState *framework.C // 'existing matching num' + 'if self-match (1 or 0)' - 'global minimum' <= 'maxSkew' minMatchNum, err := s.minMatchNum(tpKey, c.MinDomains, pl.enableMinDomainsInPodTopologySpread) if err != nil { - klog.ErrorS(err, "Internal error occurred while retrieving value precalculated in PreFilter", "topologyKey", tpKey, "paths", s.TpKeyToCriticalPaths) + logger.Error(err, "Internal error occurred while retrieving value precalculated in PreFilter", "topologyKey", tpKey, "paths", s.TpKeyToCriticalPaths) continue } @@ -376,7 +375,7 @@ func (pl *PodTopologySpread) Filter(ctx context.Context, cycleState *framework.C } skew := matchNum + selfMatchNum - minMatchNum if skew > int(c.MaxSkew) { - klog.V(5).InfoS("Node failed spreadConstraint: matchNum + selfMatchNum - minMatchNum > maxSkew", "node", klog.KObj(node), "topologyKey", tpKey, "matchNum", matchNum, "selfMatchNum", selfMatchNum, "minMatchNum", minMatchNum, "maxSkew", c.MaxSkew) + logger.V(5).Info("Node failed spreadConstraint: matchNum + selfMatchNum - minMatchNum > maxSkew", "node", klog.KObj(node), "topologyKey", tpKey, "matchNum", matchNum, "selfMatchNum", selfMatchNum, "minMatchNum", minMatchNum, "maxSkew", c.MaxSkew) return framework.NewStatus(framework.Unschedulable, ErrReasonConstraintsNotMatch) } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread/scoring.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread/scoring.go index 7a3c0589b1b9..bf5e8fe7d054 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread/scoring.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread/scoring.go @@ -36,7 +36,7 @@ const invalidScore = -1 type preScoreState struct { Constraints []topologySpreadConstraint // IgnoredNodes is a set of node names which miss some Constraints[*].topologyKey. - IgnoredNodes sets.String + IgnoredNodes sets.Set[string] // TopologyPairToPodCounts is keyed with topologyPair, and valued with the number of matching pods. TopologyPairToPodCounts map[topologyPair]*int64 // TopologyNormalizingWeight is the weight we give to the counts per topology. @@ -126,7 +126,7 @@ func (pl *PodTopologySpread) PreScore( } state := &preScoreState{ - IgnoredNodes: sets.NewString(), + IgnoredNodes: sets.New[string](), TopologyPairToPodCounts: make(map[topologyPair]*int64), } // Only require that nodes have all the topology labels if using @@ -149,9 +149,6 @@ func (pl *PodTopologySpread) PreScore( processAllNode := func(i int) { nodeInfo := allNodes[i] node := nodeInfo.Node() - if node == nil { - return - } if !pl.enableNodeInclusionPolicyInPodTopologySpread { // `node` should satisfy incoming pod's NodeSelector/NodeAffinity diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/tainttoleration/taint_toleration.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/tainttoleration/taint_toleration.go index 4611a98158c0..a990754e7412 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/tainttoleration/taint_toleration.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/tainttoleration/taint_toleration.go @@ -63,9 +63,6 @@ func (pl *TaintToleration) EventsToRegister() []framework.ClusterEvent { // Filter invoked at the filter extension point. func (pl *TaintToleration) Filter(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status { node := nodeInfo.Node() - if node == nil { - return framework.AsStatus(fmt.Errorf("invalid nodeInfo")) - } taint, isUntolerated := v1helper.FindMatchingUntoleratedTaint(node.Spec.Taints, pod.Spec.Tolerations, helper.DoNotScheduleTaintsFilterFunc()) if !isUntolerated { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/assume_cache.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/assume_cache.go index 283b4083e698..5b5127762218 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/assume_cache.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/assume_cache.go @@ -43,7 +43,7 @@ type AssumeCache interface { // Get the object by name Get(objName string) (interface{}, error) - // Get the API object by name + // GetAPIObj gets the API object by name GetAPIObj(objName string) (interface{}, error) // List all the objects in the cache diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/binder.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/binder.go index b9ac4cb04273..c326f2e605af 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/binder.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/binder.go @@ -125,7 +125,7 @@ type InTreeToCSITranslator interface { // // This integrates into the existing scheduler workflow as follows: // 1. The scheduler takes a Pod off the scheduler queue and processes it serially: -// a. Invokes all pre-filter plugins for the pod. GetPodVolumes() is invoked +// a. Invokes all pre-filter plugins for the pod. GetPodVolumeClaims() is invoked // here, pod volume information will be saved in current scheduling cycle state for later use. // If pod has bound immediate PVCs, GetEligibleNodes() is invoked to potentially reduce // down the list of eligible nodes based on the bound PV's NodeAffinity (if any). @@ -157,7 +157,7 @@ type SchedulerVolumeBinder interface { // // If eligibleNodes is 'nil', then it indicates that such eligible node reduction cannot be made // and all nodes should be considered. - GetEligibleNodes(boundClaims []*v1.PersistentVolumeClaim) (eligibleNodes sets.String) + GetEligibleNodes(boundClaims []*v1.PersistentVolumeClaim) (eligibleNodes sets.Set[string]) // FindPodVolumes checks if all of a Pod's PVCs can be satisfied by the // node and returns pod's volumes information. @@ -386,7 +386,7 @@ func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, podVolumeClaims *PodVolumeCla // // Returning 'nil' for eligibleNodes indicates that such eligible node reduction cannot be made and all nodes // should be considered. -func (b *volumeBinder) GetEligibleNodes(boundClaims []*v1.PersistentVolumeClaim) (eligibleNodes sets.String) { +func (b *volumeBinder) GetEligibleNodes(boundClaims []*v1.PersistentVolumeClaim) (eligibleNodes sets.Set[string]) { if len(boundClaims) == 0 { return } @@ -407,13 +407,13 @@ func (b *volumeBinder) GetEligibleNodes(boundClaims []*v1.PersistentVolumeClaim) // on the first found list of eligible nodes for the local PersistentVolume, // insert to the eligible node set. if eligibleNodes == nil { - eligibleNodes = sets.NewString(nodeNames...) + eligibleNodes = sets.New(nodeNames...) } else { // for subsequent finding of eligible nodes for the local PersistentVolume, // take the intersection of the nodes with the existing eligible nodes // for cases if PV1 has node affinity to node1 and PV2 has node affinity to node2, // then the eligible node list should be empty. - eligibleNodes = eligibleNodes.Intersection(sets.NewString(nodeNames...)) + eligibleNodes = eligibleNodes.Intersection(sets.New(nodeNames...)) } } } @@ -1112,13 +1112,13 @@ func isPluginMigratedToCSIOnNode(pluginName string, csiNode *storagev1.CSINode) return false } - var mpaSet sets.String + var mpaSet sets.Set[string] mpa := csiNodeAnn[v1.MigratedPluginsAnnotationKey] if len(mpa) == 0 { - mpaSet = sets.NewString() + mpaSet = sets.New[string]() } else { tok := strings.Split(mpa, ",") - mpaSet = sets.NewString(tok...) + mpaSet = sets.New(tok...) } return mpaSet.Has(pluginName) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/fake_binder.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/fake_binder.go index c77c9733789a..b8e78b8bea12 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/fake_binder.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/fake_binder.go @@ -55,7 +55,7 @@ func (b *FakeVolumeBinder) GetPodVolumeClaims(pod *v1.Pod) (podVolumeClaims *Pod } // GetEligibleNodes implements SchedulerVolumeBinder.GetEligibleNodes. -func (b *FakeVolumeBinder) GetEligibleNodes(boundClaims []*v1.PersistentVolumeClaim) (eligibleNodes sets.String) { +func (b *FakeVolumeBinder) GetEligibleNodes(boundClaims []*v1.PersistentVolumeClaim) (eligibleNodes sets.Set[string]) { return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/volume_binding.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/volume_binding.go index 3d251ba4a55d..2209e9309bb0 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/volume_binding.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/volume_binding.go @@ -233,9 +233,6 @@ func getStateData(cs *framework.CycleState) (*stateData, error) { // PVCs can be matched with an available and node-compatible PV. func (pl *VolumeBinding) Filter(ctx context.Context, cs *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status { node := nodeInfo.Node() - if node == nil { - return framework.NewStatus(framework.Error, "node not found") - } state, err := getStateData(cs) if err != nil { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumerestrictions/volume_restrictions.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumerestrictions/volume_restrictions.go index b73752051ef3..3681da4f7ab5 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumerestrictions/volume_restrictions.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumerestrictions/volume_restrictions.go @@ -154,10 +154,26 @@ func haveOverlap(a1, a2 []string) bool { return false } +// return true if there are conflict checking targets. +func needsRestrictionsCheck(v v1.Volume) bool { + return v.GCEPersistentDisk != nil || v.AWSElasticBlockStore != nil || v.RBD != nil || v.ISCSI != nil +} + // PreFilter computes and stores cycleState containing details for enforcing ReadWriteOncePod. func (pl *VolumeRestrictions) PreFilter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod) (*framework.PreFilterResult, *framework.Status) { + needsCheck := false + for i := range pod.Spec.Volumes { + if needsRestrictionsCheck(pod.Spec.Volumes[i]) { + needsCheck = true + break + } + } + if !pl.enableReadWriteOncePod { - return nil, nil + if needsCheck { + return nil, nil + } + return nil, framework.NewStatus(framework.Skip) } pvcs, err := pl.readWriteOncePodPVCsForPod(ctx, pod) @@ -172,6 +188,10 @@ func (pl *VolumeRestrictions) PreFilter(ctx context.Context, cycleState *framewo if err != nil { return nil, framework.AsStatus(err) } + + if !needsCheck && s.conflictingPVCRefCount == 0 { + return nil, framework.NewStatus(framework.Skip) + } cycleState.Write(preFilterStateKey, s) return nil, nil } @@ -257,14 +277,12 @@ func (pl *VolumeRestrictions) readWriteOncePodPVCsForPod(ctx context.Context, po // existing volumes. func satisfyVolumeConflicts(pod *v1.Pod, nodeInfo *framework.NodeInfo) bool { for i := range pod.Spec.Volumes { - v := &pod.Spec.Volumes[i] - // fast path if there is no conflict checking targets. - if v.GCEPersistentDisk == nil && v.AWSElasticBlockStore == nil && v.RBD == nil && v.ISCSI == nil { + v := pod.Spec.Volumes[i] + if !needsRestrictionsCheck(v) { continue } - for _, ev := range nodeInfo.Pods { - if isVolumeConflict(v, ev.Pod) { + if isVolumeConflict(&v, ev.Pod) { return false } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumezone/volume_zone.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumezone/volume_zone.go index f3f0f5dfe348..e1b09f6f2a85 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumezone/volume_zone.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumezone/volume_zone.go @@ -60,7 +60,7 @@ const ( type pvTopology struct { pvName string key string - values sets.String + values sets.Set[string] } // the state is initialized in PreFilter phase. because we save the pointer in @@ -107,6 +107,7 @@ func (pl *VolumeZone) PreFilter(ctx context.Context, cs *framework.CycleState, p } func (pl *VolumeZone) getPVbyPod(ctx context.Context, pod *v1.Pod) ([]pvTopology, *framework.Status) { + logger := klog.FromContext(ctx) podPVTopologies := make([]pvTopology, 0) for i := range pod.Spec.Volumes { @@ -154,13 +155,13 @@ func (pl *VolumeZone) getPVbyPod(ctx context.Context, pod *v1.Pod) ([]pvTopology if value, ok := pv.ObjectMeta.Labels[key]; ok { volumeVSet, err := volumehelpers.LabelZonesToSet(value) if err != nil { - klog.InfoS("Failed to parse label, ignoring the label", "label", fmt.Sprintf("%s:%s", key, value), "err", err) + logger.Info("Failed to parse label, ignoring the label", "label", fmt.Sprintf("%s:%s", key, value), "err", err) continue } podPVTopologies = append(podPVTopologies, pvTopology{ pvName: pv.Name, key: key, - values: volumeVSet, + values: sets.Set[string](volumeVSet), }) } } @@ -190,6 +191,7 @@ func (pl *VolumeZone) PreFilterExtensions() framework.PreFilterExtensions { // require calling out to the cloud provider. It seems that we are moving away // from inline volume declarations anyway. func (pl *VolumeZone) Filter(ctx context.Context, cs *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status { + logger := klog.FromContext(ctx) // If a pod doesn't have any volume attached to it, the predicate will always be true. // Thus we make a fast path for it, to avoid unnecessary computations in this case. if len(pod.Spec.Volumes) == 0 { @@ -226,7 +228,7 @@ func (pl *VolumeZone) Filter(ctx context.Context, cs *framework.CycleState, pod for _, pvTopology := range podPVTopologies { v, ok := node.Labels[pvTopology.key] if !ok || !pvTopology.values.Has(v) { - klog.V(10).InfoS("Won't schedule pod onto node due to volume (mismatch on label key)", "pod", klog.KObj(pod), "node", klog.KObj(node), "PV", klog.KRef("", pvTopology.pvName), "PVLabelKey", pvTopology.key) + logger.V(10).Info("Won't schedule pod onto node due to volume (mismatch on label key)", "pod", klog.KObj(pod), "node", klog.KObj(node), "PV", klog.KRef("", pvTopology.pvName), "PVLabelKey", pvTopology.key) return framework.NewStatus(framework.UnschedulableAndUnresolvable, ErrReasonConflict) } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/preemption/preemption.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/preemption/preemption.go index 68e215dd2a5c..15d8a34f39b2 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/preemption/preemption.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/preemption/preemption.go @@ -150,6 +150,8 @@ type Evaluator struct { // - . It's the regular happy path // and the non-empty nominatedNodeName will be applied to the preemptor pod. func (ev *Evaluator) Preempt(ctx context.Context, pod *v1.Pod, m framework.NodeToStatusMap) (*framework.PostFilterResult, *framework.Status) { + logger := klog.FromContext(ctx) + // 0) Fetch the latest version of . // It's safe to directly fetch pod here. Because the informer cache has already been // initialized when creating the Scheduler obj. @@ -157,13 +159,13 @@ func (ev *Evaluator) Preempt(ctx context.Context, pod *v1.Pod, m framework.NodeT podNamespace, podName := pod.Namespace, pod.Name pod, err := ev.PodLister.Pods(pod.Namespace).Get(pod.Name) if err != nil { - klog.ErrorS(err, "Getting the updated preemptor pod object", "pod", klog.KRef(podNamespace, podName)) + logger.Error(err, "Could not get the updated preemptor pod object", "pod", klog.KRef(podNamespace, podName)) return nil, framework.AsStatus(err) } // 1) Ensure the preemptor is eligible to preempt other pods. if ok, msg := ev.PodEligibleToPreemptOthers(pod, m[pod.Status.NominatedNodeName]); !ok { - klog.V(5).InfoS("Pod is not eligible for preemption", "pod", klog.KObj(pod), "reason", msg) + logger.V(5).Info("Pod is not eligible for preemption", "pod", klog.KObj(pod), "reason", msg) return nil, framework.NewStatus(framework.Unschedulable, msg) } @@ -188,13 +190,13 @@ func (ev *Evaluator) Preempt(ctx context.Context, pod *v1.Pod, m framework.NodeT } // 3) Interact with registered Extenders to filter out some candidates if needed. - candidates, status := ev.callExtenders(pod, candidates) + candidates, status := ev.callExtenders(logger, pod, candidates) if !status.IsSuccess() { return nil, status } // 4) Find the best candidate. - bestCandidate := ev.SelectCandidate(candidates) + bestCandidate := ev.SelectCandidate(logger, candidates) if bestCandidate == nil || len(bestCandidate.Name()) == 0 { return nil, framework.NewStatus(framework.Unschedulable, "no candidate node for preemption") } @@ -217,12 +219,13 @@ func (ev *Evaluator) findCandidates(ctx context.Context, pod *v1.Pod, m framewor if len(allNodes) == 0 { return nil, nil, errors.New("no nodes available") } + logger := klog.FromContext(ctx) potentialNodes, unschedulableNodeStatus := nodesWherePreemptionMightHelp(allNodes, m) if len(potentialNodes) == 0 { - klog.V(3).InfoS("Preemption will not help schedule pod on any node", "pod", klog.KObj(pod)) + logger.V(3).Info("Preemption will not help schedule pod on any node", "pod", klog.KObj(pod)) // In this case, we should clean-up any existing nominated node name of the pod. if err := util.ClearNominatedNodeName(ctx, ev.Handler.ClientSet(), pod); err != nil { - klog.ErrorS(err, "Cannot clear 'NominatedNodeName' field of pod", "pod", klog.KObj(pod)) + logger.Error(err, "Could not clear the nominatedNodeName field of pod", "pod", klog.KObj(pod)) // We do not return as this error is not critical. } return nil, unschedulableNodeStatus, nil @@ -234,12 +237,12 @@ func (ev *Evaluator) findCandidates(ctx context.Context, pod *v1.Pod, m framewor } offset, numCandidates := ev.GetOffsetAndNumCandidates(int32(len(potentialNodes))) - if klogV := klog.V(5); klogV.Enabled() { + if loggerV := logger.V(5); logger.Enabled() { var sample []string for i := offset; i < offset+10 && i < int32(len(potentialNodes)); i++ { sample = append(sample, potentialNodes[i].Node().Name) } - klogV.InfoS("Selecting candidates from a pool of nodes", "potentialNodesCount", len(potentialNodes), "offset", offset, "sampleLength", len(sample), "sample", sample, "candidates", numCandidates) + loggerV.Info("Selected candidates from a pool of nodes", "potentialNodesCount", len(potentialNodes), "offset", offset, "sampleLength", len(sample), "sample", sample, "candidates", numCandidates) } candidates, nodeStatuses, err := ev.DryRunPreemption(ctx, pod, potentialNodes, pdbs, offset, numCandidates) for node, nodeStatus := range unschedulableNodeStatus { @@ -252,7 +255,7 @@ func (ev *Evaluator) findCandidates(ctx context.Context, pod *v1.Pod, m framewor // We will only check with extenders that support preemption. // Extenders which do not support preemption may later prevent preemptor from being scheduled on the nominated // node. In that case, scheduler will find a different host for the preemptor in subsequent scheduling cycles. -func (ev *Evaluator) callExtenders(pod *v1.Pod, candidates []Candidate) ([]Candidate, *framework.Status) { +func (ev *Evaluator) callExtenders(logger klog.Logger, pod *v1.Pod, candidates []Candidate) ([]Candidate, *framework.Status) { extenders := ev.Handler.Extenders() nodeLister := ev.Handler.SnapshotSharedLister().NodeInfos() if len(extenders) == 0 { @@ -272,8 +275,8 @@ func (ev *Evaluator) callExtenders(pod *v1.Pod, candidates []Candidate) ([]Candi nodeNameToVictims, err := extender.ProcessPreemption(pod, victimsMap, nodeLister) if err != nil { if extender.IsIgnorable() { - klog.InfoS("Skipping extender as it returned error and has ignorable flag set", - "extender", extender, "err", err) + logger.Info("Skipped extender as it returned error and has ignorable flag set", + "extender", extender.Name(), "err", err) continue } return nil, framework.AsStatus(err) @@ -283,7 +286,7 @@ func (ev *Evaluator) callExtenders(pod *v1.Pod, candidates []Candidate) ([]Candi if victims == nil || len(victims.Pods) == 0 { if extender.IsIgnorable() { delete(nodeNameToVictims, nodeName) - klog.InfoS("Ignoring node without victims", "node", klog.KRef("", nodeName)) + logger.Info("Ignored node for which the extender didn't report victims", "node", klog.KRef("", nodeName), "extender", extender.Name()) continue } return nil, framework.AsStatus(fmt.Errorf("expected at least one victim pod on node %q", nodeName)) @@ -312,7 +315,7 @@ func (ev *Evaluator) callExtenders(pod *v1.Pod, candidates []Candidate) ([]Candi // SelectCandidate chooses the best-fit candidate from given and return it. // NOTE: This method is exported for easier testing in default preemption. -func (ev *Evaluator) SelectCandidate(candidates []Candidate) Candidate { +func (ev *Evaluator) SelectCandidate(logger klog.Logger, candidates []Candidate) Candidate { if len(candidates) == 0 { return nil } @@ -321,7 +324,7 @@ func (ev *Evaluator) SelectCandidate(candidates []Candidate) Candidate { } victimsMap := ev.CandidatesToVictimsMap(candidates) - candidateNode := pickOneNodeForPreemption(victimsMap) + candidateNode := pickOneNodeForPreemption(logger, victimsMap) // Same as candidatesToVictimsMap, this logic is not applicable for out-of-tree // preemption plugins that exercise different candidates on the same nominated node. @@ -333,7 +336,7 @@ func (ev *Evaluator) SelectCandidate(candidates []Candidate) Candidate { } // We shouldn't reach here. - klog.ErrorS(errors.New("no candidate selected"), "Should not reach here", "candidates", candidates) + logger.Error(errors.New("no candidate selected"), "Should not reach here", "candidates", candidates) // To not break the whole flow, return the first candidate. return candidates[0] } @@ -348,6 +351,7 @@ func (ev *Evaluator) prepareCandidate(ctx context.Context, c Candidate, pod *v1. ctx, cancel := context.WithCancel(ctx) defer cancel() + logger := klog.FromContext(ctx) errCh := parallelize.NewErrorChannel() preemptPod := func(index int) { victim := c.Victims().Pods[index] @@ -355,6 +359,7 @@ func (ev *Evaluator) prepareCandidate(ctx context.Context, c Candidate, pod *v1. // Otherwise we should delete the victim. if waitingPod := fh.GetWaitingPod(victim.UID); waitingPod != nil { waitingPod.Reject(pluginName, "preempted") + klog.V(2).InfoS("Preemptor pod rejected a waiting pod", "preemptor", klog.KObj(pod), "waitingPod", klog.KObj(victim), "node", c.Name()) } else { if feature.DefaultFeatureGate.Enabled(features.PodDisruptionConditions) { victimPodApply := corev1apply.Pod(victim.Name, victim.Namespace).WithStatus(corev1apply.PodStatus()) @@ -367,17 +372,19 @@ func (ev *Evaluator) prepareCandidate(ctx context.Context, c Candidate, pod *v1. ) if _, err := cs.CoreV1().Pods(victim.Namespace).ApplyStatus(ctx, victimPodApply, metav1.ApplyOptions{FieldManager: fieldManager, Force: true}); err != nil { - klog.ErrorS(err, "Preparing pod preemption", "pod", klog.KObj(victim), "preemptor", klog.KObj(pod)) + logger.Error(err, "Could not add DisruptionTarget condition due to preemption", "pod", klog.KObj(victim), "preemptor", klog.KObj(pod)) errCh.SendErrorWithCancel(err, cancel) return } } if err := util.DeletePod(ctx, cs, victim); err != nil { - klog.ErrorS(err, "Preempting pod", "pod", klog.KObj(victim), "preemptor", klog.KObj(pod)) + logger.Error(err, "Preempted pod", "pod", klog.KObj(victim), "preemptor", klog.KObj(pod)) errCh.SendErrorWithCancel(err, cancel) return } + klog.V(2).InfoS("Preemptor Pod preempted victim Pod", "preemptor", klog.KObj(pod), "victim", klog.KObj(victim), "node", c.Name()) } + fh.EventRecorder().Eventf(victim, pod, v1.EventTypeNormal, "Preempted", "Preempting", "Preempted by a pod on node %v", c.Name()) } @@ -392,9 +399,9 @@ func (ev *Evaluator) prepareCandidate(ctx context.Context, c Candidate, pod *v1. // this node. So, we should remove their nomination. Removing their // nomination updates these pods and moves them to the active queue. It // lets scheduler find another place for them. - nominatedPods := getLowerPriorityNominatedPods(fh, pod, c.Name()) + nominatedPods := getLowerPriorityNominatedPods(logger, fh, pod, c.Name()) if err := util.ClearNominatedNodeName(ctx, cs, nominatedPods...); err != nil { - klog.ErrorS(err, "Cannot clear 'NominatedNodeName' field") + logger.Error(err, "Cannot clear 'NominatedNodeName' field") // We do not return as this error is not critical. } @@ -437,7 +444,7 @@ func getPodDisruptionBudgets(pdbLister policylisters.PodDisruptionBudgetLister) // 6. If there are still ties, the first such node is picked (sort of randomly). // The 'minNodes1' and 'minNodes2' are being reused here to save the memory // allocation and garbage collection time. -func pickOneNodeForPreemption(nodesToVictims map[string]*extenderv1.Victims) string { +func pickOneNodeForPreemption(logger klog.Logger, nodesToVictims map[string]*extenderv1.Victims) string { if len(nodesToVictims) == 0 { return "" } @@ -477,7 +484,7 @@ func pickOneNodeForPreemption(nodesToVictims map[string]*extenderv1.Victims) str // Get earliest start time of all pods on the current node. earliestStartTimeOnNode := util.GetEarliestPodStartTime(nodesToVictims[node]) if earliestStartTimeOnNode == nil { - klog.ErrorS(errors.New("earliestStartTime is nil for node"), "Should not reach here", "node", node) + logger.Error(errors.New("earliestStartTime is nil for node"), "Should not reach here", "node", node) return int64(math.MinInt64) } // The bigger the earliestStartTimeOnNode, the higher the score. @@ -530,7 +537,7 @@ func pickOneNodeForPreemption(nodesToVictims map[string]*extenderv1.Victims) str // manipulation of NodeInfo and PreFilter state per nominated pod. It may not be // worth the complexity, especially because we generally expect to have a very // small number of nominated pods per node. -func getLowerPriorityNominatedPods(pn framework.PodNominator, pod *v1.Pod, nodeName string) []*v1.Pod { +func getLowerPriorityNominatedPods(logger klog.Logger, pn framework.PodNominator, pod *v1.Pod, nodeName string) []*v1.Pod { podInfos := pn.NominatedPodsForNode(nodeName) if len(podInfos) == 0 { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/runtime/framework.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/runtime/framework.go index d8684f5ae01c..780e0a7923e7 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/runtime/framework.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/runtime/framework.go @@ -77,6 +77,7 @@ type frameworkImpl struct { kubeConfig *restclient.Config eventRecorder events.EventRecorder informerFactory informers.SharedInformerFactory + logger klog.Logger metricsRecorder *metrics.MetricAsyncRecorder profileName string @@ -132,8 +133,9 @@ type frameworkOptions struct { podNominator framework.PodNominator extenders []framework.Extender captureProfile CaptureProfile - clusterEventMap map[framework.ClusterEvent]sets.String + clusterEventMap map[framework.ClusterEvent]sets.Set[string] parallelizer parallelize.Parallelizer + logger *klog.Logger } // Option for the frameworkImpl. @@ -142,7 +144,7 @@ type Option func(*frameworkOptions) // WithComponentConfigVersion sets the component config version to the // KubeSchedulerConfiguration version used. The string should be the full // scheme group/version of the external type we converted from (for example -// "kubescheduler.config.k8s.io/v1beta2") +// "kubescheduler.config.k8s.io/v1") func WithComponentConfigVersion(componentConfigVersion string) Option { return func(o *frameworkOptions) { o.componentConfigVersion = componentConfigVersion @@ -216,7 +218,7 @@ func WithCaptureProfile(c CaptureProfile) Option { } // WithClusterEventMap sets clusterEventMap for the scheduling frameworkImpl. -func WithClusterEventMap(m map[framework.ClusterEvent]sets.String) Option { +func WithClusterEventMap(m map[framework.ClusterEvent]sets.Set[string]) Option { return func(o *frameworkOptions) { o.clusterEventMap = m } @@ -229,11 +231,18 @@ func WithMetricsRecorder(r *metrics.MetricAsyncRecorder) Option { } } +// WithLogger overrides the default logger from k8s.io/klog. +func WithLogger(logger klog.Logger) Option { + return func(o *frameworkOptions) { + o.logger = &logger + } +} + // defaultFrameworkOptions are applied when no option corresponding to those fields exist. func defaultFrameworkOptions(stopCh <-chan struct{}) frameworkOptions { return frameworkOptions{ metricsRecorder: metrics.NewMetricsAsyncRecorder(1000, time.Second, stopCh), - clusterEventMap: make(map[framework.ClusterEvent]sets.String), + clusterEventMap: make(map[framework.ClusterEvent]sets.Set[string]), parallelizer: parallelize.NewParallelizer(parallelize.DefaultParallelism), } } @@ -241,12 +250,17 @@ func defaultFrameworkOptions(stopCh <-chan struct{}) frameworkOptions { var _ framework.Framework = &frameworkImpl{} // NewFramework initializes plugins given the configuration and the registry. -func NewFramework(r Registry, profile *config.KubeSchedulerProfile, stopCh <-chan struct{}, opts ...Option) (framework.Framework, error) { - options := defaultFrameworkOptions(stopCh) +func NewFramework(ctx context.Context, r Registry, profile *config.KubeSchedulerProfile, opts ...Option) (framework.Framework, error) { + options := defaultFrameworkOptions(ctx.Done()) for _, opt := range opts { opt(&options) } + logger := klog.FromContext(ctx) + if options.logger != nil { + logger = *options.logger + } + f := &frameworkImpl{ registry: r, snapshotSharedLister: options.snapshotSharedLister, @@ -260,6 +274,7 @@ func NewFramework(r Registry, profile *config.KubeSchedulerProfile, stopCh <-cha extenders: options.extenders, PodNominator: options.podNominator, parallelizer: options.parallelizer, + logger: logger, } if profile == nil { @@ -311,7 +326,7 @@ func NewFramework(r Registry, profile *config.KubeSchedulerProfile, stopCh <-cha pluginsMap[name] = p // Update ClusterEventMap in place. - fillEventToPluginMap(p, options.clusterEventMap) + fillEventToPluginMap(logger, p, options.clusterEventMap) } // initialize plugins per individual extension points @@ -323,7 +338,7 @@ func NewFramework(r Registry, profile *config.KubeSchedulerProfile, stopCh <-cha // initialize multiPoint plugins to their expanded extension points if len(profile.Plugins.MultiPoint.Enabled) > 0 { - if err := f.expandMultiPointPlugins(profile, pluginsMap); err != nil { + if err := f.expandMultiPointPlugins(logger, profile, pluginsMap); err != nil { return nil, err } } @@ -358,6 +373,20 @@ func NewFramework(r Registry, profile *config.KubeSchedulerProfile, stopCh <-cha options.captureProfile(outputProfile) } + // Cache metric streams for prefilter and filter plugins. + for i, pl := range f.preFilterPlugins { + f.preFilterPlugins[i] = &instrumentedPreFilterPlugin{ + PreFilterPlugin: f.preFilterPlugins[i], + metric: metrics.PluginEvaluationTotal.WithLabelValues(pl.Name(), metrics.PreFilter, f.profileName), + } + } + for i, pl := range f.filterPlugins { + f.filterPlugins[i] = &instrumentedFilterPlugin{ + FilterPlugin: f.filterPlugins[i], + metric: metrics.PluginEvaluationTotal.WithLabelValues(pl.Name(), metrics.Filter, f.profileName), + } + } + return f, nil } @@ -429,7 +458,7 @@ func (os *orderedSet) delete(s string) { } } -func (f *frameworkImpl) expandMultiPointPlugins(profile *config.KubeSchedulerProfile, pluginsMap map[string]framework.Plugin) error { +func (f *frameworkImpl) expandMultiPointPlugins(logger klog.Logger, profile *config.KubeSchedulerProfile, pluginsMap map[string]framework.Plugin) error { // initialize MultiPoint plugins for _, e := range f.getExtensionPoints(profile.Plugins) { plugins := reflect.ValueOf(e.slicePtr).Elem() @@ -441,12 +470,12 @@ func (f *frameworkImpl) expandMultiPointPlugins(profile *config.KubeSchedulerPro enabledSet.insert(plugin.Name) } - disabledSet := sets.NewString() + disabledSet := sets.New[string]() for _, disabledPlugin := range e.plugins.Disabled { disabledSet.Insert(disabledPlugin.Name) } if disabledSet.Has("*") { - klog.V(4).InfoS("all plugins disabled for extension point, skipping MultiPoint expansion", "extension", pluginType) + logger.V(4).Info("Skipped MultiPoint expansion because all plugins are disabled for extension point", "extension", pluginType) continue } @@ -467,7 +496,7 @@ func (f *frameworkImpl) expandMultiPointPlugins(profile *config.KubeSchedulerPro // a plugin that's enabled via MultiPoint can still be disabled for specific extension points if disabledSet.Has(ep.Name) { - klog.V(4).InfoS("plugin disabled for extension point", "plugin", ep.Name, "extension", pluginType) + logger.V(4).Info("Skipped disabled plugin for extension point", "plugin", ep.Name, "extension", pluginType) continue } @@ -477,7 +506,7 @@ func (f *frameworkImpl) expandMultiPointPlugins(profile *config.KubeSchedulerPro // This maintains expected behavior for overriding default plugins (see https://github.com/kubernetes/kubernetes/pull/99582) if enabledSet.has(ep.Name) { overridePlugins.insert(ep.Name) - klog.InfoS("MultiPoint plugin is explicitly re-configured; overriding", "plugin", ep.Name) + logger.Info("MultiPoint plugin is explicitly re-configured; overriding", "plugin", ep.Name) continue } @@ -516,7 +545,7 @@ func (f *frameworkImpl) expandMultiPointPlugins(profile *config.KubeSchedulerPro return nil } -func fillEventToPluginMap(p framework.Plugin, eventToPlugins map[framework.ClusterEvent]sets.String) { +func fillEventToPluginMap(logger klog.Logger, p framework.Plugin, eventToPlugins map[framework.ClusterEvent]sets.Set[string]) { ext, ok := p.(framework.EnqueueExtensions) if !ok { // If interface EnqueueExtensions is not implemented, register the default events @@ -530,17 +559,17 @@ func fillEventToPluginMap(p framework.Plugin, eventToPlugins map[framework.Clust // We treat it as: the plugin is not interested in any event, and hence pod failed by that plugin // cannot be moved by any regular cluster event. if len(events) == 0 { - klog.InfoS("Plugin's EventsToRegister() returned nil", "plugin", p.Name()) + logger.Info("Plugin's EventsToRegister() returned nil", "plugin", p.Name()) return } // The most common case: a plugin implements EnqueueExtensions and returns non-nil result. registerClusterEvents(p.Name(), eventToPlugins, events) } -func registerClusterEvents(name string, eventToPlugins map[framework.ClusterEvent]sets.String, evts []framework.ClusterEvent) { +func registerClusterEvents(name string, eventToPlugins map[framework.ClusterEvent]sets.Set[string], evts []framework.ClusterEvent) { for _, evt := range evts { if eventToPlugins[evt] == nil { - eventToPlugins[evt] = sets.NewString(name) + eventToPlugins[evt] = sets.New(name) } else { eventToPlugins[evt].Insert(name) } @@ -550,7 +579,7 @@ func registerClusterEvents(name string, eventToPlugins map[framework.ClusterEven func updatePluginList(pluginList interface{}, pluginSet config.PluginSet, pluginsMap map[string]framework.Plugin) error { plugins := reflect.ValueOf(pluginList).Elem() pluginType := plugins.Type().Elem() - set := sets.NewString() + set := sets.New[string]() for _, ep := range pluginSet.Enabled { pg, ok := pluginsMap[ep.Name] if !ok { @@ -573,7 +602,7 @@ func updatePluginList(pluginList interface{}, pluginSet config.PluginSet, plugin return nil } -// EnqueuePlugins returns the registered enqueue plugins. +// PreEnqueuePlugins returns the registered preEnqueue plugins. func (f *frameworkImpl) PreEnqueuePlugins() []framework.PreEnqueuePlugin { return f.preEnqueuePlugins } @@ -608,13 +637,20 @@ func (f *frameworkImpl) RunPreFilterPlugins(ctx context.Context, state *framewor var result *framework.PreFilterResult var pluginsWithNodes []string skipPlugins := sets.New[string]() + logger := klog.FromContext(ctx) + logger = klog.LoggerWithName(logger, "PreFilter") + // TODO(knelasevero): Remove duplicated keys from log entry calls + // When contextualized logging hits GA + // https://github.com/kubernetes/kubernetes/issues/111672 + logger = klog.LoggerWithValues(logger, "pod", klog.KObj(pod)) for _, pl := range f.preFilterPlugins { + logger := klog.LoggerWithName(logger, pl.Name()) + ctx := klog.NewContext(ctx, logger) r, s := f.runPreFilterPlugin(ctx, pl, state, pod) if s.IsSkip() { skipPlugins.Insert(pl.Name()) continue } - metrics.PluginEvaluationTotal.WithLabelValues(pl.Name(), metrics.PreFilter, f.profileName).Inc() if !s.IsSuccess() { s.SetFailedPlugin(pl.Name()) if s.IsUnschedulable() { @@ -658,14 +694,22 @@ func (f *frameworkImpl) RunPreFilterExtensionAddPod( podInfoToAdd *framework.PodInfo, nodeInfo *framework.NodeInfo, ) (status *framework.Status) { + logger := klog.FromContext(ctx) + logger = klog.LoggerWithName(logger, "PreFilterExtension") + // TODO(knelasevero): Remove duplicated keys from log entry calls + // When contextualized logging hits GA + // https://github.com/kubernetes/kubernetes/issues/111672 + logger = klog.LoggerWithValues(logger, "pod", klog.KObj(podToSchedule), "node", klog.KObj(nodeInfo.Node()), "operation", "addPod") for _, pl := range f.preFilterPlugins { if pl.PreFilterExtensions() == nil || state.SkipFilterPlugins.Has(pl.Name()) { continue } + logger := klog.LoggerWithName(logger, pl.Name()) + ctx := klog.NewContext(ctx, logger) status = f.runPreFilterExtensionAddPod(ctx, pl, state, podToSchedule, podInfoToAdd, nodeInfo) if !status.IsSuccess() { err := status.AsError() - klog.ErrorS(err, "Failed running AddPod on PreFilter plugin", "plugin", pl.Name(), "pod", klog.KObj(podToSchedule)) + logger.Error(err, "Plugin failed", "pod", klog.KObj(podToSchedule), "node", klog.KObj(nodeInfo.Node()), "operation", "addPod", "plugin", pl.Name()) return framework.AsStatus(fmt.Errorf("running AddPod on PreFilter plugin %q: %w", pl.Name(), err)) } } @@ -693,14 +737,22 @@ func (f *frameworkImpl) RunPreFilterExtensionRemovePod( podInfoToRemove *framework.PodInfo, nodeInfo *framework.NodeInfo, ) (status *framework.Status) { + logger := klog.FromContext(ctx) + logger = klog.LoggerWithName(logger, "PreFilterExtension") + // TODO(knelasevero): Remove duplicated keys from log entry calls + // When contextualized logging hits GA + // https://github.com/kubernetes/kubernetes/issues/111672 + logger = klog.LoggerWithValues(logger, klog.KObj(podToSchedule), "node", klog.KObj(nodeInfo.Node())) for _, pl := range f.preFilterPlugins { if pl.PreFilterExtensions() == nil || state.SkipFilterPlugins.Has(pl.Name()) { continue } + logger := klog.LoggerWithName(logger, pl.Name()) + ctx := klog.NewContext(ctx, logger) status = f.runPreFilterExtensionRemovePod(ctx, pl, state, podToSchedule, podInfoToRemove, nodeInfo) if !status.IsSuccess() { err := status.AsError() - klog.ErrorS(err, "Failed running RemovePod on PreFilter plugin", "plugin", pl.Name(), "pod", klog.KObj(podToSchedule)) + logger.Error(err, "Plugin failed", "node", klog.KObj(nodeInfo.Node()), "operation", "removePod", "plugin", pl.Name(), "pod", klog.KObj(podToSchedule)) return framework.AsStatus(fmt.Errorf("running RemovePod on PreFilter plugin %q: %w", pl.Name(), err)) } } @@ -728,11 +780,19 @@ func (f *frameworkImpl) RunFilterPlugins( pod *v1.Pod, nodeInfo *framework.NodeInfo, ) *framework.Status { + logger := klog.FromContext(ctx) + logger = klog.LoggerWithName(logger, "Filter") + // TODO(knelasevero): Remove duplicated keys from log entry calls + // When contextualized logging hits GA + // https://github.com/kubernetes/kubernetes/issues/111672 + logger = klog.LoggerWithValues(logger, "pod", klog.KObj(pod), "node", klog.KObj(nodeInfo.Node())) + for _, pl := range f.filterPlugins { + logger := klog.LoggerWithName(logger, pl.Name()) + ctx := klog.NewContext(ctx, logger) if state.SkipFilterPlugins.Has(pl.Name()) { continue } - metrics.PluginEvaluationTotal.WithLabelValues(pl.Name(), metrics.Filter, f.profileName).Inc() if status := f.runFilterPlugin(ctx, pl, state, pod, nodeInfo); !status.IsSuccess() { if !status.IsUnschedulable() { // Filter plugins are not supposed to return any status other than @@ -765,11 +825,20 @@ func (f *frameworkImpl) RunPostFilterPlugins(ctx context.Context, state *framewo metrics.FrameworkExtensionPointDuration.WithLabelValues(metrics.PostFilter, status.Code().String(), f.profileName).Observe(metrics.SinceInSeconds(startTime)) }() + logger := klog.FromContext(ctx) + logger = klog.LoggerWithName(logger, "PostFilter") + // TODO(knelasevero): Remove duplicated keys from log entry calls + // When contextualized logging hits GA + // https://github.com/kubernetes/kubernetes/issues/111672 + logger = klog.LoggerWithValues(logger, "pod", klog.KObj(pod)) + // `result` records the last meaningful(non-noop) PostFilterResult. var result *framework.PostFilterResult var reasons []string var failedPlugin string for _, pl := range f.postFilterPlugins { + logger := klog.LoggerWithName(logger, pl.Name()) + ctx := klog.NewContext(ctx, logger) r, s := f.runPostFilterPlugin(ctx, pl, state, pod, filteredNodeStatusMap) if s.IsSuccess() { return r, s @@ -835,6 +904,9 @@ func (f *frameworkImpl) RunFilterPluginsWithNominatedPods(ctx context.Context, s // the nominated pods are treated as not running. We can't just assume the // nominated pods are running because they are not running right now and in fact, // they may end up getting scheduled to a different node. + logger := klog.FromContext(ctx) + logger = klog.LoggerWithName(logger, "FilterWithNominatedPods") + ctx = klog.NewContext(ctx, logger) for i := 0; i < 2; i++ { stateToUse := state nodeInfoToUse := info @@ -861,7 +933,7 @@ func (f *frameworkImpl) RunFilterPluginsWithNominatedPods(ctx context.Context, s // to run on the node. It returns 1) whether any pod was added, 2) augmented cycleState, // 3) augmented nodeInfo. func addNominatedPods(ctx context.Context, fh framework.Handle, pod *v1.Pod, state *framework.CycleState, nodeInfo *framework.NodeInfo) (bool, *framework.CycleState, *framework.NodeInfo, error) { - if fh == nil || nodeInfo.Node() == nil { + if fh == nil { // This may happen only in tests. return false, state, nodeInfo, nil } @@ -900,7 +972,15 @@ func (f *frameworkImpl) RunPreScorePlugins( metrics.FrameworkExtensionPointDuration.WithLabelValues(metrics.PreScore, status.Code().String(), f.profileName).Observe(metrics.SinceInSeconds(startTime)) }() skipPlugins := sets.New[string]() + logger := klog.FromContext(ctx) + logger = klog.LoggerWithName(logger, "PreScore") + // TODO(knelasevero): Remove duplicated keys from log entry calls + // When contextualized logging hits GA + // https://github.com/kubernetes/kubernetes/issues/111672 + logger = klog.LoggerWithValues(logger, "pod", klog.KObj(pod)) for _, pl := range f.preScorePlugins { + logger := klog.LoggerWithName(logger, pl.Name()) + ctx := klog.NewContext(ctx, logger) status = f.runPreScorePlugin(ctx, pl, state, pod, nodes) if status.IsSkip() { skipPlugins.Insert(pl.Name()) @@ -949,10 +1029,19 @@ func (f *frameworkImpl) RunScorePlugins(ctx context.Context, state *framework.Cy errCh := parallelize.NewErrorChannel() if len(plugins) > 0 { + logger := klog.FromContext(ctx) + logger = klog.LoggerWithName(logger, "Score") + // TODO(knelasevero): Remove duplicated keys from log entry calls + // When contextualized logging hits GA + // https://github.com/kubernetes/kubernetes/issues/111672 + logger = klog.LoggerWithValues(logger, "pod", klog.KObj(pod)) // Run Score method for each node in parallel. f.Parallelizer().Until(ctx, len(nodes), func(index int) { nodeName := nodes[index].Name + logger := klog.LoggerWithValues(logger, "node", klog.ObjectRef{Name: nodeName}) for _, pl := range plugins { + logger := klog.LoggerWithName(logger, pl.Name()) + ctx := klog.NewContext(ctx, logger) s, status := f.runScorePlugin(ctx, pl, state, pod, nodeName) if !status.IsSuccess() { err := fmt.Errorf("plugin %q failed with: %w", pl.Name(), status.AsError()) @@ -1050,16 +1139,24 @@ func (f *frameworkImpl) RunPreBindPlugins(ctx context.Context, state *framework. defer func() { metrics.FrameworkExtensionPointDuration.WithLabelValues(metrics.PreBind, status.Code().String(), f.profileName).Observe(metrics.SinceInSeconds(startTime)) }() + logger := klog.FromContext(ctx) + logger = klog.LoggerWithName(logger, "PreBind") + // TODO(knelasevero): Remove duplicated keys from log entry calls + // When contextualized logging hits GA + // https://github.com/kubernetes/kubernetes/issues/111672 + logger = klog.LoggerWithValues(logger, "pod", klog.KObj(pod), "node", klog.ObjectRef{Name: nodeName}) for _, pl := range f.preBindPlugins { + logger := klog.LoggerWithName(logger, pl.Name()) + ctx := klog.NewContext(ctx, logger) status = f.runPreBindPlugin(ctx, pl, state, pod, nodeName) if !status.IsSuccess() { if status.IsUnschedulable() { - klog.V(4).InfoS("Pod rejected by PreBind plugin", "pod", klog.KObj(pod), "node", nodeName, "plugin", pl.Name(), "status", status.Message()) + logger.V(4).Info("Pod rejected by PreBind plugin", "pod", klog.KObj(pod), "node", nodeName, "plugin", pl.Name(), "status", status.Message()) status.SetFailedPlugin(pl.Name()) return status } err := status.AsError() - klog.ErrorS(err, "Failed running PreBind plugin", "plugin", pl.Name(), "pod", klog.KObj(pod), "node", nodeName) + logger.Error(err, "Plugin failed", "plugin", pl.Name(), "pod", klog.KObj(pod), "node", nodeName) return framework.AsStatus(fmt.Errorf("running PreBind plugin %q: %w", pl.Name(), err)) } } @@ -1085,19 +1182,27 @@ func (f *frameworkImpl) RunBindPlugins(ctx context.Context, state *framework.Cyc if len(f.bindPlugins) == 0 { return framework.NewStatus(framework.Skip, "") } + logger := klog.FromContext(ctx) + logger = klog.LoggerWithName(logger, "Bind") + // TODO(knelasevero): Remove duplicated keys from log entry calls + // When contextualized logging hits GA + // https://github.com/kubernetes/kubernetes/issues/111672 + logger = klog.LoggerWithValues(logger, "pod", klog.KObj(pod), "node", klog.ObjectRef{Name: nodeName}) for _, pl := range f.bindPlugins { + logger := klog.LoggerWithName(logger, pl.Name()) + ctx := klog.NewContext(ctx, logger) status = f.runBindPlugin(ctx, pl, state, pod, nodeName) if status.IsSkip() { continue } if !status.IsSuccess() { if status.IsUnschedulable() { - klog.V(4).InfoS("Pod rejected by Bind plugin", "pod", klog.KObj(pod), "node", nodeName, "plugin", pl.Name(), "status", status.Message()) + logger.V(4).Info("Pod rejected by Bind plugin", "pod", klog.KObj(pod), "node", nodeName, "plugin", pl.Name(), "status", status.Message()) status.SetFailedPlugin(pl.Name()) return status } err := status.AsError() - klog.ErrorS(err, "Failed running Bind plugin", "plugin", pl.Name(), "pod", klog.KObj(pod), "node", nodeName) + logger.Error(err, "Plugin Failed", "plugin", pl.Name(), "pod", klog.KObj(pod), "node", nodeName) return framework.AsStatus(fmt.Errorf("running Bind plugin %q: %w", pl.Name(), err)) } return status @@ -1121,7 +1226,15 @@ func (f *frameworkImpl) RunPostBindPlugins(ctx context.Context, state *framework defer func() { metrics.FrameworkExtensionPointDuration.WithLabelValues(metrics.PostBind, framework.Success.String(), f.profileName).Observe(metrics.SinceInSeconds(startTime)) }() + logger := klog.FromContext(ctx) + logger = klog.LoggerWithName(logger, "PostBind") + // TODO(knelasevero): Remove duplicated keys from log entry calls + // When contextualized logging hits GA + // https://github.com/kubernetes/kubernetes/issues/111672 + logger = klog.LoggerWithValues(logger, "pod", klog.KObj(pod), "node", klog.ObjectRef{Name: nodeName}) for _, pl := range f.postBindPlugins { + logger := klog.LoggerWithName(logger, pl.Name()) + ctx := klog.NewContext(ctx, logger) f.runPostBindPlugin(ctx, pl, state, pod, nodeName) } } @@ -1146,11 +1259,19 @@ func (f *frameworkImpl) RunReservePluginsReserve(ctx context.Context, state *fra defer func() { metrics.FrameworkExtensionPointDuration.WithLabelValues(metrics.Reserve, status.Code().String(), f.profileName).Observe(metrics.SinceInSeconds(startTime)) }() + logger := klog.FromContext(ctx) + logger = klog.LoggerWithName(logger, "Reserve") + // TODO(knelasevero): Remove duplicated keys from log entry calls + // When contextualized logging hits GA + // https://github.com/kubernetes/kubernetes/issues/111672 + logger = klog.LoggerWithValues(logger, "pod", klog.KObj(pod), "node", klog.ObjectRef{Name: nodeName}) for _, pl := range f.reservePlugins { + logger := klog.LoggerWithName(logger, pl.Name()) + ctx := klog.NewContext(ctx, logger) status = f.runReservePluginReserve(ctx, pl, state, pod, nodeName) if !status.IsSuccess() { err := status.AsError() - klog.ErrorS(err, "Failed running Reserve plugin", "plugin", pl.Name(), "pod", klog.KObj(pod)) + logger.Error(err, "Plugin failed", "plugin", pl.Name(), "pod", klog.KObj(pod)) return framework.AsStatus(fmt.Errorf("running Reserve plugin %q: %w", pl.Name(), err)) } } @@ -1176,7 +1297,15 @@ func (f *frameworkImpl) RunReservePluginsUnreserve(ctx context.Context, state *f }() // Execute the Unreserve operation of each reserve plugin in the // *reverse* order in which the Reserve operation was executed. + logger := klog.FromContext(ctx) + logger = klog.LoggerWithName(logger, "Unreserve") + // TODO(knelasevero): Remove duplicated keys from log entry calls + // When contextualized logging hits GA + // https://github.com/kubernetes/kubernetes/issues/111672 + logger = klog.LoggerWithValues(logger, "pod", klog.KObj(pod), "node", klog.ObjectRef{Name: nodeName}) for i := len(f.reservePlugins) - 1; i >= 0; i-- { + logger := klog.LoggerWithName(logger, f.reservePlugins[i].Name()) + ctx := klog.NewContext(ctx, logger) f.runReservePluginUnreserve(ctx, f.reservePlugins[i], state, pod, nodeName) } } @@ -1204,11 +1333,19 @@ func (f *frameworkImpl) RunPermitPlugins(ctx context.Context, state *framework.C }() pluginsWaitTime := make(map[string]time.Duration) statusCode := framework.Success + logger := klog.FromContext(ctx) + logger = klog.LoggerWithName(logger, "Permit") + // TODO(knelasevero): Remove duplicated keys from log entry calls + // When contextualized logging hits GA + // https://github.com/kubernetes/kubernetes/issues/111672 + logger = klog.LoggerWithValues(logger, "pod", klog.KObj(pod), "node", klog.ObjectRef{Name: nodeName}) for _, pl := range f.permitPlugins { + logger := klog.LoggerWithName(logger, pl.Name()) + ctx := klog.NewContext(ctx, logger) status, timeout := f.runPermitPlugin(ctx, pl, state, pod, nodeName) if !status.IsSuccess() { if status.IsUnschedulable() { - klog.V(4).InfoS("Pod rejected by permit plugin", "pod", klog.KObj(pod), "plugin", pl.Name(), "status", status.Message()) + logger.V(4).Info("Pod rejected by plugin", "pod", klog.KObj(pod), "plugin", pl.Name(), "status", status.Message()) status.SetFailedPlugin(pl.Name()) return status } @@ -1221,7 +1358,7 @@ func (f *frameworkImpl) RunPermitPlugins(ctx context.Context, state *framework.C statusCode = framework.Wait } else { err := status.AsError() - klog.ErrorS(err, "Failed running Permit plugin", "plugin", pl.Name(), "pod", klog.KObj(pod)) + logger.Error(err, "Plugin failed", "plugin", pl.Name(), "pod", klog.KObj(pod)) return framework.AsStatus(fmt.Errorf("running Permit plugin %q: %w", pl.Name(), err)).WithFailedPlugin(pl.Name()) } } @@ -1230,7 +1367,7 @@ func (f *frameworkImpl) RunPermitPlugins(ctx context.Context, state *framework.C waitingPod := newWaitingPod(pod, pluginsWaitTime) f.waitingPods.add(waitingPod) msg := fmt.Sprintf("one or more plugins asked to wait and no plugin rejected pod %q", pod.Name) - klog.V(4).InfoS("One or more plugins asked to wait and no plugin rejected pod", "pod", klog.KObj(pod)) + logger.V(4).Info("One or more plugins asked to wait and no plugin rejected pod", "pod", klog.KObj(pod)) return framework.NewStatus(framework.Wait, msg) } return nil @@ -1253,7 +1390,9 @@ func (f *frameworkImpl) WaitOnPermit(ctx context.Context, pod *v1.Pod) *framewor return nil } defer f.waitingPods.remove(pod.UID) - klog.V(4).InfoS("Pod waiting on permit", "pod", klog.KObj(pod)) + + logger := klog.FromContext(ctx) + logger.V(4).Info("Pod waiting on permit", "pod", klog.KObj(pod)) startTime := time.Now() s := <-waitingPod.s @@ -1261,11 +1400,11 @@ func (f *frameworkImpl) WaitOnPermit(ctx context.Context, pod *v1.Pod) *framewor if !s.IsSuccess() { if s.IsUnschedulable() { - klog.V(4).InfoS("Pod rejected while waiting on permit", "pod", klog.KObj(pod), "status", s.Message()) + logger.V(4).Info("Pod rejected while waiting on permit", "pod", klog.KObj(pod), "status", s.Message()) return s } err := s.AsError() - klog.ErrorS(err, "Failed waiting on permit for pod", "pod", klog.KObj(pod)) + logger.Error(err, "Failed waiting on permit for pod", "pod", klog.KObj(pod)) return framework.AsStatus(fmt.Errorf("waiting on permit for pod: %w", err)).WithFailedPlugin(s.FailedPlugin()) } return nil @@ -1362,8 +1501,8 @@ func (f *frameworkImpl) SharedInformerFactory() informers.SharedInformerFactory return f.informerFactory } -func (f *frameworkImpl) pluginsNeeded(plugins *config.Plugins) sets.String { - pgSet := sets.String{} +func (f *frameworkImpl) pluginsNeeded(plugins *config.Plugins) sets.Set[string] { + pgSet := sets.Set[string]{} if plugins == nil { return pgSet diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/runtime/instrumented_plugins.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/runtime/instrumented_plugins.go new file mode 100644 index 000000000000..152d6788a9e1 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/runtime/instrumented_plugins.go @@ -0,0 +1,54 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + "context" + + v1 "k8s.io/api/core/v1" + compbasemetrics "k8s.io/component-base/metrics" + "k8s.io/kubernetes/pkg/scheduler/framework" +) + +type instrumentedFilterPlugin struct { + framework.FilterPlugin + + metric compbasemetrics.CounterMetric +} + +var _ framework.FilterPlugin = &instrumentedFilterPlugin{} + +func (p *instrumentedFilterPlugin) Filter(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status { + p.metric.Inc() + return p.FilterPlugin.Filter(ctx, state, pod, nodeInfo) +} + +type instrumentedPreFilterPlugin struct { + framework.PreFilterPlugin + + metric compbasemetrics.CounterMetric +} + +var _ framework.PreFilterPlugin = &instrumentedPreFilterPlugin{} + +func (p *instrumentedPreFilterPlugin) PreFilter(ctx context.Context, state *framework.CycleState, pod *v1.Pod) (*framework.PreFilterResult, *framework.Status) { + result, status := p.PreFilterPlugin.PreFilter(ctx, state, pod) + if !status.IsSkip() { + p.metric.Inc() + } + return result, status +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/types.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/types.go index fda3ced88af3..6f9e9b295daf 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/types.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/types.go @@ -108,7 +108,7 @@ type QueuedPodInfo struct { // latency for a pod. InitialAttemptTimestamp time.Time // If a Pod failed in a scheduling cycle, record the plugin names it failed by. - UnschedulablePlugins sets.String + UnschedulablePlugins sets.Set[string] // Whether the Pod is scheduling gated (by PreEnqueuePlugins) or not. Gated bool } @@ -197,7 +197,7 @@ func (pi *PodInfo) Update(pod *v1.Pod) error { // AffinityTerm is a processed version of v1.PodAffinityTerm. type AffinityTerm struct { - Namespaces sets.String + Namespaces sets.Set[string] Selector labels.Selector TopologyKey string NamespaceSelector labels.Selector @@ -220,7 +220,7 @@ type WeightedAffinityTerm struct { // Diagnosis records the details to diagnose a scheduling failure. type Diagnosis struct { NodeToStatusMap NodeToStatusMap - UnschedulablePlugins sets.String + UnschedulablePlugins sets.Set[string] // PreFilterMsg records the messages returned from PreFilter plugins. PreFilterMsg string // PostFilterMsg records the messages returned from PostFilter plugins. @@ -364,8 +364,8 @@ func getPodAntiAffinityTerms(affinity *v1.Affinity) (terms []v1.PodAffinityTerm) // returns a set of names according to the namespaces indicated in podAffinityTerm. // If namespaces is empty it considers the given pod's namespace. -func getNamespacesFromPodAffinityTerm(pod *v1.Pod, podAffinityTerm *v1.PodAffinityTerm) sets.String { - names := sets.String{} +func getNamespacesFromPodAffinityTerm(pod *v1.Pod, podAffinityTerm *v1.PodAffinityTerm) sets.Set[string] { + names := sets.Set[string]{} if len(podAffinityTerm.Namespaces) == 0 && podAffinityTerm.NamespaceSelector == nil { names.Insert(pod.Namespace) } else { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/cache.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/cache.go index 0b60a82f9c2a..4e94b4b3baaf 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/cache.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/cache.go @@ -17,6 +17,7 @@ limitations under the License. package cache import ( + "context" "fmt" "sync" "time" @@ -35,11 +36,12 @@ var ( // New returns a Cache implementation. // It automatically starts a go routine that manages expiration of assumed pods. -// "ttl" is how long the assumed pod will get expired, "0" means pod will never expire. -// "stop" is the channel that would close the background goroutine. -func New(ttl time.Duration, stop <-chan struct{}) Cache { - cache := newCache(ttl, cleanAssumedPeriod, stop) - cache.run() +// "ttl" is how long the assumed pod will get expired. +// "ctx" is the context that would close the background goroutine. +func New(ctx context.Context, ttl time.Duration) Cache { + logger := klog.FromContext(ctx) + cache := newCache(ctx, ttl, cleanAssumedPeriod) + cache.run(logger) return cache } @@ -61,7 +63,7 @@ type cacheImpl struct { mu sync.RWMutex // a set of assumed pod keys. // The key could further be used to get an entry in podStates. - assumedPods sets.String + assumedPods sets.Set[string] // a map from pod key to podState. podStates map[string]*podState nodes map[string]*nodeInfoListItem @@ -86,7 +88,7 @@ type imageState struct { // Size of the image size int64 // A set of node names for nodes having this image present - nodes sets.String + nodes sets.Set[string] } // createImageStateSummary returns a summarizing snapshot of the given image's state. @@ -97,15 +99,16 @@ func (cache *cacheImpl) createImageStateSummary(state *imageState) *framework.Im } } -func newCache(ttl, period time.Duration, stop <-chan struct{}) *cacheImpl { +func newCache(ctx context.Context, ttl, period time.Duration) *cacheImpl { + logger := klog.FromContext(ctx) return &cacheImpl{ ttl: ttl, period: period, - stop: stop, + stop: ctx.Done(), nodes: make(map[string]*nodeInfoListItem), - nodeTree: newNodeTree(nil), - assumedPods: make(sets.String), + nodeTree: newNodeTree(logger, nil), + assumedPods: sets.New[string](), podStates: make(map[string]*podState), imageStates: make(map[string]*imageState), } @@ -121,10 +124,10 @@ func newNodeInfoListItem(ni *framework.NodeInfo) *nodeInfoListItem { // moveNodeInfoToHead moves a NodeInfo to the head of "cache.nodes" doubly // linked list. The head is the most recently updated NodeInfo. // We assume cache lock is already acquired. -func (cache *cacheImpl) moveNodeInfoToHead(name string) { +func (cache *cacheImpl) moveNodeInfoToHead(logger klog.Logger, name string) { ni, ok := cache.nodes[name] if !ok { - klog.ErrorS(nil, "No node info with given name found in the cache", "node", klog.KRef("", name)) + logger.Error(nil, "No node info with given name found in the cache", "node", klog.KRef("", name)) return } // if the node info list item is already at the head, we are done. @@ -149,10 +152,10 @@ func (cache *cacheImpl) moveNodeInfoToHead(name string) { // removeNodeInfoFromList removes a NodeInfo from the "cache.nodes" doubly // linked list. // We assume cache lock is already acquired. -func (cache *cacheImpl) removeNodeInfoFromList(name string) { +func (cache *cacheImpl) removeNodeInfoFromList(logger klog.Logger, name string) { ni, ok := cache.nodes[name] if !ok { - klog.ErrorS(nil, "No node info with given name found in the cache", "node", klog.KRef("", name)) + logger.Error(nil, "No node info with given name found in the cache", "node", klog.KRef("", name)) return } @@ -194,7 +197,7 @@ func (cache *cacheImpl) Dump() *Dump { // nodeInfo.Node() is guaranteed to be not nil for all the nodes in the snapshot. // This function tracks generation number of NodeInfo and updates only the // entries of an existing snapshot that have changed after the snapshot was taken. -func (cache *cacheImpl) UpdateSnapshot(nodeSnapshot *Snapshot) error { +func (cache *cacheImpl) UpdateSnapshot(logger klog.Logger, nodeSnapshot *Snapshot) error { cache.mu.Lock() defer cache.mu.Unlock() @@ -271,7 +274,7 @@ func (cache *cacheImpl) UpdateSnapshot(nodeSnapshot *Snapshot) error { } if updateAllLists || updateNodesHavePodsWithAffinity || updateNodesHavePodsWithRequiredAntiAffinity || updateUsedPVCSet { - cache.updateNodeInfoSnapshotList(nodeSnapshot, updateAllLists) + cache.updateNodeInfoSnapshotList(logger, nodeSnapshot, updateAllLists) } if len(nodeSnapshot.nodeInfoList) != cache.nodeTree.numNodes { @@ -280,26 +283,26 @@ func (cache *cacheImpl) UpdateSnapshot(nodeSnapshot *Snapshot) error { ", trying to recover", len(nodeSnapshot.nodeInfoList), cache.nodeTree.numNodes, len(nodeSnapshot.nodeInfoMap), len(cache.nodes)) - klog.ErrorS(nil, errMsg) + logger.Error(nil, errMsg) // We will try to recover by re-creating the lists for the next scheduling cycle, but still return an // error to surface the problem, the error will likely cause a failure to the current scheduling cycle. - cache.updateNodeInfoSnapshotList(nodeSnapshot, true) + cache.updateNodeInfoSnapshotList(logger, nodeSnapshot, true) return fmt.Errorf(errMsg) } return nil } -func (cache *cacheImpl) updateNodeInfoSnapshotList(snapshot *Snapshot, updateAll bool) { +func (cache *cacheImpl) updateNodeInfoSnapshotList(logger klog.Logger, snapshot *Snapshot, updateAll bool) { snapshot.havePodsWithAffinityNodeInfoList = make([]*framework.NodeInfo, 0, cache.nodeTree.numNodes) snapshot.havePodsWithRequiredAntiAffinityNodeInfoList = make([]*framework.NodeInfo, 0, cache.nodeTree.numNodes) - snapshot.usedPVCSet = sets.NewString() + snapshot.usedPVCSet = sets.New[string]() if updateAll { // Take a snapshot of the nodes order in the tree snapshot.nodeInfoList = make([]*framework.NodeInfo, 0, cache.nodeTree.numNodes) nodesList, err := cache.nodeTree.list() if err != nil { - klog.ErrorS(err, "Error occurred while retrieving the list of names of the nodes from node tree") + logger.Error(err, "Error occurred while retrieving the list of names of the nodes from node tree") } for _, nodeName := range nodesList { if nodeInfo := snapshot.nodeInfoMap[nodeName]; nodeInfo != nil { @@ -314,7 +317,7 @@ func (cache *cacheImpl) updateNodeInfoSnapshotList(snapshot *Snapshot, updateAll snapshot.usedPVCSet.Insert(key) } } else { - klog.ErrorS(nil, "Node exists in nodeTree but not in NodeInfoMap, this should not happen", "node", klog.KRef("", nodeName)) + logger.Error(nil, "Node exists in nodeTree but not in NodeInfoMap, this should not happen", "node", klog.KRef("", nodeName)) } } } else { @@ -369,7 +372,7 @@ func (cache *cacheImpl) PodCount() (int, error) { return count, nil } -func (cache *cacheImpl) AssumePod(pod *v1.Pod) error { +func (cache *cacheImpl) AssumePod(logger klog.Logger, pod *v1.Pod) error { key, err := framework.GetPodKey(pod) if err != nil { return err @@ -381,15 +384,15 @@ func (cache *cacheImpl) AssumePod(pod *v1.Pod) error { return fmt.Errorf("pod %v(%v) is in the cache, so can't be assumed", key, klog.KObj(pod)) } - return cache.addPod(pod, true) + return cache.addPod(logger, pod, true) } -func (cache *cacheImpl) FinishBinding(pod *v1.Pod) error { - return cache.finishBinding(pod, time.Now()) +func (cache *cacheImpl) FinishBinding(logger klog.Logger, pod *v1.Pod) error { + return cache.finishBinding(logger, pod, time.Now()) } // finishBinding exists to make tests deterministic by injecting now as an argument -func (cache *cacheImpl) finishBinding(pod *v1.Pod, now time.Time) error { +func (cache *cacheImpl) finishBinding(logger klog.Logger, pod *v1.Pod, now time.Time) error { key, err := framework.GetPodKey(pod) if err != nil { return err @@ -398,7 +401,7 @@ func (cache *cacheImpl) finishBinding(pod *v1.Pod, now time.Time) error { cache.mu.RLock() defer cache.mu.RUnlock() - klog.V(5).InfoS("Finished binding for pod, can be expired", "podKey", key, "pod", klog.KObj(pod)) + logger.V(5).Info("Finished binding for pod, can be expired", "podKey", key, "pod", klog.KObj(pod)) currState, ok := cache.podStates[key] if ok && cache.assumedPods.Has(key) { if cache.ttl == time.Duration(0) { @@ -412,7 +415,7 @@ func (cache *cacheImpl) finishBinding(pod *v1.Pod, now time.Time) error { return nil } -func (cache *cacheImpl) ForgetPod(pod *v1.Pod) error { +func (cache *cacheImpl) ForgetPod(logger klog.Logger, pod *v1.Pod) error { key, err := framework.GetPodKey(pod) if err != nil { return err @@ -428,13 +431,13 @@ func (cache *cacheImpl) ForgetPod(pod *v1.Pod) error { // Only assumed pod can be forgotten. if ok && cache.assumedPods.Has(key) { - return cache.removePod(pod) + return cache.removePod(logger, pod) } return fmt.Errorf("pod %v(%v) wasn't assumed so cannot be forgotten", key, klog.KObj(pod)) } // Assumes that lock is already acquired. -func (cache *cacheImpl) addPod(pod *v1.Pod, assumePod bool) error { +func (cache *cacheImpl) addPod(logger klog.Logger, pod *v1.Pod, assumePod bool) error { key, err := framework.GetPodKey(pod) if err != nil { return err @@ -445,7 +448,7 @@ func (cache *cacheImpl) addPod(pod *v1.Pod, assumePod bool) error { cache.nodes[pod.Spec.NodeName] = n } n.info.AddPod(pod) - cache.moveNodeInfoToHead(pod.Spec.NodeName) + cache.moveNodeInfoToHead(logger, pod.Spec.NodeName) ps := &podState{ pod: pod, } @@ -457,18 +460,18 @@ func (cache *cacheImpl) addPod(pod *v1.Pod, assumePod bool) error { } // Assumes that lock is already acquired. -func (cache *cacheImpl) updatePod(oldPod, newPod *v1.Pod) error { - if err := cache.removePod(oldPod); err != nil { +func (cache *cacheImpl) updatePod(logger klog.Logger, oldPod, newPod *v1.Pod) error { + if err := cache.removePod(logger, oldPod); err != nil { return err } - return cache.addPod(newPod, false) + return cache.addPod(logger, newPod, false) } // Assumes that lock is already acquired. // Removes a pod from the cached node info. If the node information was already // removed and there are no more pods left in the node, cleans up the node from // the cache. -func (cache *cacheImpl) removePod(pod *v1.Pod) error { +func (cache *cacheImpl) removePod(logger klog.Logger, pod *v1.Pod) error { key, err := framework.GetPodKey(pod) if err != nil { return err @@ -476,16 +479,15 @@ func (cache *cacheImpl) removePod(pod *v1.Pod) error { n, ok := cache.nodes[pod.Spec.NodeName] if !ok { - klog.ErrorS(nil, "Node not found when trying to remove pod", "node", klog.KRef("", pod.Spec.NodeName), "podKey", key, "pod", klog.KObj(pod)) - + logger.Error(nil, "Node not found when trying to remove pod", "node", klog.KRef("", pod.Spec.NodeName), "podKey", key, "pod", klog.KObj(pod)) } else { if err := n.info.RemovePod(pod); err != nil { return err } if len(n.info.Pods) == 0 && n.info.Node() == nil { - cache.removeNodeInfoFromList(pod.Spec.NodeName) + cache.removeNodeInfoFromList(logger, pod.Spec.NodeName) } else { - cache.moveNodeInfoToHead(pod.Spec.NodeName) + cache.moveNodeInfoToHead(logger, pod.Spec.NodeName) } } @@ -494,7 +496,7 @@ func (cache *cacheImpl) removePod(pod *v1.Pod) error { return nil } -func (cache *cacheImpl) AddPod(pod *v1.Pod) error { +func (cache *cacheImpl) AddPod(logger klog.Logger, pod *v1.Pod) error { key, err := framework.GetPodKey(pod) if err != nil { return err @@ -508,18 +510,18 @@ func (cache *cacheImpl) AddPod(pod *v1.Pod) error { case ok && cache.assumedPods.Has(key): // When assuming, we've already added the Pod to cache, // Just update here to make sure the Pod's status is up-to-date. - if err = cache.updatePod(currState.pod, pod); err != nil { - klog.ErrorS(err, "Error occurred while updating pod") + if err = cache.updatePod(logger, currState.pod, pod); err != nil { + logger.Error(err, "Error occurred while updating pod") } if currState.pod.Spec.NodeName != pod.Spec.NodeName { // The pod was added to a different node than it was assumed to. - klog.InfoS("Pod was added to a different node than it was assumed", "podKey", key, "pod", klog.KObj(pod), "assumedNode", klog.KRef("", pod.Spec.NodeName), "currentNode", klog.KRef("", currState.pod.Spec.NodeName)) + logger.Info("Pod was added to a different node than it was assumed", "podKey", key, "pod", klog.KObj(pod), "assumedNode", klog.KRef("", pod.Spec.NodeName), "currentNode", klog.KRef("", currState.pod.Spec.NodeName)) return nil } case !ok: // Pod was expired. We should add it back. - if err = cache.addPod(pod, false); err != nil { - klog.ErrorS(err, "Error occurred while adding pod") + if err = cache.addPod(logger, pod, false); err != nil { + logger.Error(err, "Error occurred while adding pod") } default: return fmt.Errorf("pod %v(%v) was already in added state", key, klog.KObj(pod)) @@ -527,7 +529,7 @@ func (cache *cacheImpl) AddPod(pod *v1.Pod) error { return nil } -func (cache *cacheImpl) UpdatePod(oldPod, newPod *v1.Pod) error { +func (cache *cacheImpl) UpdatePod(logger klog.Logger, oldPod, newPod *v1.Pod) error { key, err := framework.GetPodKey(oldPod) if err != nil { return err @@ -548,14 +550,14 @@ func (cache *cacheImpl) UpdatePod(oldPod, newPod *v1.Pod) error { } if currState.pod.Spec.NodeName != newPod.Spec.NodeName { - klog.ErrorS(nil, "Pod updated on a different node than previously added to", "podKey", key, "pod", klog.KObj(oldPod)) - klog.ErrorS(nil, "scheduler cache is corrupted and can badly affect scheduling decisions") + logger.Error(nil, "Pod updated on a different node than previously added to", "podKey", key, "pod", klog.KObj(oldPod)) + logger.Error(nil, "scheduler cache is corrupted and can badly affect scheduling decisions") klog.FlushAndExit(klog.ExitFlushTimeout, 1) } - return cache.updatePod(oldPod, newPod) + return cache.updatePod(logger, oldPod, newPod) } -func (cache *cacheImpl) RemovePod(pod *v1.Pod) error { +func (cache *cacheImpl) RemovePod(logger klog.Logger, pod *v1.Pod) error { key, err := framework.GetPodKey(pod) if err != nil { return err @@ -569,15 +571,15 @@ func (cache *cacheImpl) RemovePod(pod *v1.Pod) error { return fmt.Errorf("pod %v(%v) is not found in scheduler cache, so cannot be removed from it", key, klog.KObj(pod)) } if currState.pod.Spec.NodeName != pod.Spec.NodeName { - klog.ErrorS(nil, "Pod was added to a different node than it was assumed", "podKey", key, "pod", klog.KObj(pod), "assumedNode", klog.KRef("", pod.Spec.NodeName), "currentNode", klog.KRef("", currState.pod.Spec.NodeName)) + logger.Error(nil, "Pod was added to a different node than it was assumed", "podKey", key, "pod", klog.KObj(pod), "assumedNode", klog.KRef("", pod.Spec.NodeName), "currentNode", klog.KRef("", currState.pod.Spec.NodeName)) if pod.Spec.NodeName != "" { // An empty NodeName is possible when the scheduler misses a Delete // event and it gets the last known state from the informer cache. - klog.ErrorS(nil, "scheduler cache is corrupted and can badly affect scheduling decisions") + logger.Error(nil, "scheduler cache is corrupted and can badly affect scheduling decisions") klog.FlushAndExit(klog.ExitFlushTimeout, 1) } } - return cache.removePod(currState.pod) + return cache.removePod(logger, currState.pod) } func (cache *cacheImpl) IsAssumedPod(pod *v1.Pod) (bool, error) { @@ -611,7 +613,7 @@ func (cache *cacheImpl) GetPod(pod *v1.Pod) (*v1.Pod, error) { return podState.pod, nil } -func (cache *cacheImpl) AddNode(node *v1.Node) *framework.NodeInfo { +func (cache *cacheImpl) AddNode(logger klog.Logger, node *v1.Node) *framework.NodeInfo { cache.mu.Lock() defer cache.mu.Unlock() @@ -622,15 +624,15 @@ func (cache *cacheImpl) AddNode(node *v1.Node) *framework.NodeInfo { } else { cache.removeNodeImageStates(n.info.Node()) } - cache.moveNodeInfoToHead(node.Name) + cache.moveNodeInfoToHead(logger, node.Name) - cache.nodeTree.addNode(node) + cache.nodeTree.addNode(logger, node) cache.addNodeImageStates(node, n.info) n.info.SetNode(node) return n.info.Clone() } -func (cache *cacheImpl) UpdateNode(oldNode, newNode *v1.Node) *framework.NodeInfo { +func (cache *cacheImpl) UpdateNode(logger klog.Logger, oldNode, newNode *v1.Node) *framework.NodeInfo { cache.mu.Lock() defer cache.mu.Unlock() @@ -638,13 +640,13 @@ func (cache *cacheImpl) UpdateNode(oldNode, newNode *v1.Node) *framework.NodeInf if !ok { n = newNodeInfoListItem(framework.NewNodeInfo()) cache.nodes[newNode.Name] = n - cache.nodeTree.addNode(newNode) + cache.nodeTree.addNode(logger, newNode) } else { cache.removeNodeImageStates(n.info.Node()) } - cache.moveNodeInfoToHead(newNode.Name) + cache.moveNodeInfoToHead(logger, newNode.Name) - cache.nodeTree.updateNode(oldNode, newNode) + cache.nodeTree.updateNode(logger, oldNode, newNode) cache.addNodeImageStates(newNode, n.info) n.info.SetNode(newNode) return n.info.Clone() @@ -656,7 +658,7 @@ func (cache *cacheImpl) UpdateNode(oldNode, newNode *v1.Node) *framework.NodeInf // the source of truth. // However, we keep a ghost node with the list of pods until all pod deletion // events have arrived. A ghost node is skipped from snapshots. -func (cache *cacheImpl) RemoveNode(node *v1.Node) error { +func (cache *cacheImpl) RemoveNode(logger klog.Logger, node *v1.Node) error { cache.mu.Lock() defer cache.mu.Unlock() @@ -670,11 +672,11 @@ func (cache *cacheImpl) RemoveNode(node *v1.Node) error { // in a different watch, and thus can potentially be observed later, even though // they happened before node removal. if len(n.info.Pods) == 0 { - cache.removeNodeInfoFromList(node.Name) + cache.removeNodeInfoFromList(logger, node.Name) } else { - cache.moveNodeInfoToHead(node.Name) + cache.moveNodeInfoToHead(logger, node.Name) } - if err := cache.nodeTree.removeNode(node); err != nil { + if err := cache.nodeTree.removeNode(logger, node); err != nil { return err } cache.removeNodeImageStates(node) @@ -693,7 +695,7 @@ func (cache *cacheImpl) addNodeImageStates(node *v1.Node, nodeInfo *framework.No if !ok { state = &imageState{ size: image.SizeBytes, - nodes: sets.NewString(node.Name), + nodes: sets.New(node.Name), } cache.imageStates[name] = state } else { @@ -732,17 +734,15 @@ func (cache *cacheImpl) removeNodeImageStates(node *v1.Node) { } } -func (cache *cacheImpl) run() { - go wait.Until(cache.cleanupExpiredAssumedPods, cache.period, cache.stop) -} - -func (cache *cacheImpl) cleanupExpiredAssumedPods() { - cache.cleanupAssumedPods(time.Now()) +func (cache *cacheImpl) run(logger klog.Logger) { + go wait.Until(func() { + cache.cleanupAssumedPods(logger, time.Now()) + }, cache.period, cache.stop) } // cleanupAssumedPods exists for making test deterministic by taking time as input argument. // It also reports metrics on the cache size for nodes, pods, and assumed pods. -func (cache *cacheImpl) cleanupAssumedPods(now time.Time) { +func (cache *cacheImpl) cleanupAssumedPods(logger klog.Logger, now time.Time) { cache.mu.Lock() defer cache.mu.Unlock() defer cache.updateMetrics() @@ -751,17 +751,17 @@ func (cache *cacheImpl) cleanupAssumedPods(now time.Time) { for key := range cache.assumedPods { ps, ok := cache.podStates[key] if !ok { - klog.ErrorS(nil, "Key found in assumed set but not in podStates, potentially a logical error") + logger.Error(nil, "Key found in assumed set but not in podStates, potentially a logical error") klog.FlushAndExit(klog.ExitFlushTimeout, 1) } if !ps.bindingFinished { - klog.V(5).InfoS("Could not expire cache for pod as binding is still in progress", "podKey", key, "pod", klog.KObj(ps.pod)) + logger.V(5).Info("Could not expire cache for pod as binding is still in progress", "podKey", key, "pod", klog.KObj(ps.pod)) continue } if cache.ttl != 0 && now.After(*ps.deadline) { - klog.InfoS("Pod expired", "podKey", key, "pod", klog.KObj(ps.pod)) - if err := cache.removePod(ps.pod); err != nil { - klog.ErrorS(err, "ExpirePod failed", "podKey", key, "pod", klog.KObj(ps.pod)) + logger.Info("Pod expired", "podKey", key, "pod", klog.KObj(ps.pod)) + if err := cache.removePod(logger, ps.pod); err != nil { + logger.Error(err, "ExpirePod failed", "podKey", key, "pod", klog.KObj(ps.pod)) } } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/debugger/comparer.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/debugger/comparer.go index bf8cafb7844d..bc7648dce6bf 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/debugger/comparer.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/debugger/comparer.go @@ -38,9 +38,9 @@ type CacheComparer struct { } // Compare compares the nodes and pods of NodeLister with Cache.Snapshot. -func (c *CacheComparer) Compare() error { - klog.V(3).InfoS("Cache comparer started") - defer klog.V(3).InfoS("Cache comparer finished") +func (c *CacheComparer) Compare(logger klog.Logger) error { + logger.V(3).Info("Cache comparer started") + defer logger.V(3).Info("Cache comparer finished") nodes, err := c.NodeLister.List(labels.Everything()) if err != nil { @@ -57,11 +57,11 @@ func (c *CacheComparer) Compare() error { pendingPods, _ := c.PodQueue.PendingPods() if missed, redundant := c.CompareNodes(nodes, dump.Nodes); len(missed)+len(redundant) != 0 { - klog.InfoS("Cache mismatch", "missedNodes", missed, "redundantNodes", redundant) + logger.Info("Cache mismatch", "missedNodes", missed, "redundantNodes", redundant) } if missed, redundant := c.ComparePods(pods, pendingPods, dump.Nodes); len(missed)+len(redundant) != 0 { - klog.InfoS("Cache mismatch", "missedPods", missed, "redundantPods", redundant) + logger.Info("Cache mismatch", "missedPods", missed, "redundantPods", redundant) } return nil diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/debugger/debugger.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/debugger/debugger.go index d8839ec67e83..8a1f80c88b42 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/debugger/debugger.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/debugger/debugger.go @@ -17,10 +17,12 @@ limitations under the License. package debugger import ( + "context" "os" "os/signal" corelisters "k8s.io/client-go/listers/core/v1" + "k8s.io/klog/v2" internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache" internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue" ) @@ -54,7 +56,9 @@ func New( // ListenForSignal starts a goroutine that will trigger the CacheDebugger's // behavior when the process receives SIGINT (Windows) or SIGUSER2 (non-Windows). -func (d *CacheDebugger) ListenForSignal(stopCh <-chan struct{}) { +func (d *CacheDebugger) ListenForSignal(ctx context.Context) { + logger := klog.FromContext(ctx) + stopCh := ctx.Done() ch := make(chan os.Signal, 1) signal.Notify(ch, compareSignal) @@ -64,8 +68,8 @@ func (d *CacheDebugger) ListenForSignal(stopCh <-chan struct{}) { case <-stopCh: return case <-ch: - d.Comparer.Compare() - d.Dumper.DumpAll() + d.Comparer.Compare(logger) + d.Dumper.DumpAll(logger) } } }() diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/debugger/dumper.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/debugger/dumper.go index d95c234eed7c..618be63d989d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/debugger/dumper.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/debugger/dumper.go @@ -36,30 +36,30 @@ type CacheDumper struct { } // DumpAll writes cached nodes and scheduling queue information to the scheduler logs. -func (d *CacheDumper) DumpAll() { - d.dumpNodes() - d.dumpSchedulingQueue() +func (d *CacheDumper) DumpAll(logger klog.Logger) { + d.dumpNodes(logger) + d.dumpSchedulingQueue(logger) } // dumpNodes writes NodeInfo to the scheduler logs. -func (d *CacheDumper) dumpNodes() { +func (d *CacheDumper) dumpNodes(logger klog.Logger) { dump := d.cache.Dump() nodeInfos := make([]string, 0, len(dump.Nodes)) for name, nodeInfo := range dump.Nodes { nodeInfos = append(nodeInfos, d.printNodeInfo(name, nodeInfo)) } // Extra blank line added between node entries for readability. - klog.InfoS("Dump of cached NodeInfo", "nodes", strings.Join(nodeInfos, "\n\n")) + logger.Info("Dump of cached NodeInfo", "nodes", strings.Join(nodeInfos, "\n\n")) } // dumpSchedulingQueue writes pods in the scheduling queue to the scheduler logs. -func (d *CacheDumper) dumpSchedulingQueue() { +func (d *CacheDumper) dumpSchedulingQueue(logger klog.Logger) { pendingPods, s := d.podQueue.PendingPods() var podData strings.Builder for _, p := range pendingPods { podData.WriteString(printPod(p)) } - klog.InfoS("Dump of scheduling queue", "summary", s, "pods", podData.String()) + logger.Info("Dump of scheduling queue", "summary", s, "pods", podData.String()) } // printNodeInfo writes parts of NodeInfo to a string. diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/interface.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/interface.go index f6298bd346b3..24b7fd49066d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/interface.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/interface.go @@ -19,6 +19,7 @@ package cache import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/klog/v2" "k8s.io/kubernetes/pkg/scheduler/framework" ) @@ -68,23 +69,23 @@ type Cache interface { // AssumePod assumes a pod scheduled and aggregates the pod's information into its node. // The implementation also decides the policy to expire pod before being confirmed (receiving Add event). // After expiration, its information would be subtracted. - AssumePod(pod *v1.Pod) error + AssumePod(logger klog.Logger, pod *v1.Pod) error // FinishBinding signals that cache for assumed pod can be expired - FinishBinding(pod *v1.Pod) error + FinishBinding(logger klog.Logger, pod *v1.Pod) error // ForgetPod removes an assumed pod from cache. - ForgetPod(pod *v1.Pod) error + ForgetPod(logger klog.Logger, pod *v1.Pod) error // AddPod either confirms a pod if it's assumed, or adds it back if it's expired. // If added back, the pod's information would be added again. - AddPod(pod *v1.Pod) error + AddPod(logger klog.Logger, pod *v1.Pod) error // UpdatePod removes oldPod's information and adds newPod's information. - UpdatePod(oldPod, newPod *v1.Pod) error + UpdatePod(logger klog.Logger, oldPod, newPod *v1.Pod) error // RemovePod removes a pod. The pod's information would be subtracted from assigned node. - RemovePod(pod *v1.Pod) error + RemovePod(logger klog.Logger, pod *v1.Pod) error // GetPod returns the pod from the cache with the same namespace and the // same name of the specified pod. @@ -95,21 +96,21 @@ type Cache interface { // AddNode adds overall information about node. // It returns a clone of added NodeInfo object. - AddNode(node *v1.Node) *framework.NodeInfo + AddNode(logger klog.Logger, node *v1.Node) *framework.NodeInfo // UpdateNode updates overall information about node. // It returns a clone of updated NodeInfo object. - UpdateNode(oldNode, newNode *v1.Node) *framework.NodeInfo + UpdateNode(logger klog.Logger, oldNode, newNode *v1.Node) *framework.NodeInfo // RemoveNode removes overall information about node. - RemoveNode(node *v1.Node) error + RemoveNode(logger klog.Logger, node *v1.Node) error // UpdateSnapshot updates the passed infoSnapshot to the current contents of Cache. // The node info contains aggregated information of pods scheduled (including assumed to be) // on this node. // The snapshot only includes Nodes that are not deleted at the time this function is called. // nodeinfo.Node() is guaranteed to be not nil for all the nodes in the snapshot. - UpdateSnapshot(nodeSnapshot *Snapshot) error + UpdateSnapshot(logger klog.Logger, nodeSnapshot *Snapshot) error // Dump produces a dump of the current cache. Dump() *Dump @@ -117,6 +118,6 @@ type Cache interface { // Dump is a dump of the cache state. type Dump struct { - AssumedPods sets.String + AssumedPods sets.Set[string] Nodes map[string]*framework.NodeInfo } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/node_tree.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/node_tree.go index 2463e3a95bd3..f344f8494fdc 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/node_tree.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/node_tree.go @@ -36,24 +36,24 @@ type nodeTree struct { } // newNodeTree creates a NodeTree from nodes. -func newNodeTree(nodes []*v1.Node) *nodeTree { +func newNodeTree(logger klog.Logger, nodes []*v1.Node) *nodeTree { nt := &nodeTree{ tree: make(map[string][]string, len(nodes)), } for _, n := range nodes { - nt.addNode(n) + nt.addNode(logger, n) } return nt } // addNode adds a node and its corresponding zone to the tree. If the zone already exists, the node // is added to the array of nodes in that zone. -func (nt *nodeTree) addNode(n *v1.Node) { +func (nt *nodeTree) addNode(logger klog.Logger, n *v1.Node) { zone := utilnode.GetZoneKey(n) if na, ok := nt.tree[zone]; ok { for _, nodeName := range na { if nodeName == n.Name { - klog.InfoS("Node already exists in the NodeTree", "node", klog.KObj(n)) + logger.Info("Did not add to the NodeTree because it already exists", "node", klog.KObj(n)) return } } @@ -62,12 +62,12 @@ func (nt *nodeTree) addNode(n *v1.Node) { nt.zones = append(nt.zones, zone) nt.tree[zone] = []string{n.Name} } - klog.V(2).InfoS("Added node in listed group to NodeTree", "node", klog.KObj(n), "zone", zone) + logger.V(2).Info("Added node in listed group to NodeTree", "node", klog.KObj(n), "zone", zone) nt.numNodes++ } // removeNode removes a node from the NodeTree. -func (nt *nodeTree) removeNode(n *v1.Node) error { +func (nt *nodeTree) removeNode(logger klog.Logger, n *v1.Node) error { zone := utilnode.GetZoneKey(n) if na, ok := nt.tree[zone]; ok { for i, nodeName := range na { @@ -76,13 +76,13 @@ func (nt *nodeTree) removeNode(n *v1.Node) error { if len(nt.tree[zone]) == 0 { nt.removeZone(zone) } - klog.V(2).InfoS("Removed node in listed group from NodeTree", "node", klog.KObj(n), "zone", zone) + logger.V(2).Info("Removed node in listed group from NodeTree", "node", klog.KObj(n), "zone", zone) nt.numNodes-- return nil } } } - klog.ErrorS(nil, "Node in listed group was not found", "node", klog.KObj(n), "zone", zone) + logger.Error(nil, "Did not remove Node in NodeTree because it was not found", "node", klog.KObj(n), "zone", zone) return fmt.Errorf("node %q in group %q was not found", n.Name, zone) } @@ -99,7 +99,7 @@ func (nt *nodeTree) removeZone(zone string) { } // updateNode updates a node in the NodeTree. -func (nt *nodeTree) updateNode(old, new *v1.Node) { +func (nt *nodeTree) updateNode(logger klog.Logger, old, new *v1.Node) { var oldZone string if old != nil { oldZone = utilnode.GetZoneKey(old) @@ -110,8 +110,8 @@ func (nt *nodeTree) updateNode(old, new *v1.Node) { if oldZone == newZone { return } - nt.removeNode(old) // No error checking. We ignore whether the old node exists or not. - nt.addNode(new) + nt.removeNode(logger, old) // No error checking. We ignore whether the old node exists or not. + nt.addNode(logger, new) } // list returns the list of names of the node. NodeTree iterates over zones and in each zone iterates diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/snapshot.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/snapshot.go index 78b67322a41f..abd79312a2df 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/snapshot.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/snapshot.go @@ -38,7 +38,7 @@ type Snapshot struct { havePodsWithRequiredAntiAffinityNodeInfoList []*framework.NodeInfo // usedPVCSet contains a set of PVC names that have one or more scheduled pods using them, // keyed in the format "namespace/name". - usedPVCSet sets.String + usedPVCSet sets.Set[string] generation int64 } @@ -48,7 +48,7 @@ var _ framework.SharedLister = &Snapshot{} func NewEmptySnapshot() *Snapshot { return &Snapshot{ nodeInfoMap: make(map[string]*framework.NodeInfo), - usedPVCSet: sets.NewString(), + usedPVCSet: sets.New[string](), } } @@ -103,8 +103,8 @@ func createNodeInfoMap(pods []*v1.Pod, nodes []*v1.Node) map[string]*framework.N return nodeNameToInfo } -func createUsedPVCSet(pods []*v1.Pod) sets.String { - usedPVCSet := sets.NewString() +func createUsedPVCSet(pods []*v1.Pod) sets.Set[string] { + usedPVCSet := sets.New[string]() for _, pod := range pods { if pod.Spec.NodeName == "" { continue @@ -123,7 +123,7 @@ func createUsedPVCSet(pods []*v1.Pod) sets.String { } // getNodeImageStates returns the given node's image states based on the given imageExistence map. -func getNodeImageStates(node *v1.Node, imageExistenceMap map[string]sets.String) map[string]*framework.ImageStateSummary { +func getNodeImageStates(node *v1.Node, imageExistenceMap map[string]sets.Set[string]) map[string]*framework.ImageStateSummary { imageStates := make(map[string]*framework.ImageStateSummary) for _, image := range node.Status.Images { @@ -138,13 +138,13 @@ func getNodeImageStates(node *v1.Node, imageExistenceMap map[string]sets.String) } // createImageExistenceMap returns a map recording on which nodes the images exist, keyed by the images' names. -func createImageExistenceMap(nodes []*v1.Node) map[string]sets.String { - imageExistenceMap := make(map[string]sets.String) +func createImageExistenceMap(nodes []*v1.Node) map[string]sets.Set[string] { + imageExistenceMap := make(map[string]sets.Set[string]) for _, node := range nodes { for _, image := range node.Status.Images { for _, name := range image.Names { if _, ok := imageExistenceMap[name]; !ok { - imageExistenceMap[name] = sets.NewString(node.Name) + imageExistenceMap[name] = sets.New(node.Name) } else { imageExistenceMap[name].Insert(node.Name) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/queue/scheduling_queue.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/queue/scheduling_queue.go index 50fc17d63cee..a209f0f29d69 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/queue/scheduling_queue.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/queue/scheduling_queue.go @@ -59,12 +59,9 @@ const ( // backoffQ or activeQ. If this value is empty, the default value (5min) // will be used. DefaultPodMaxInUnschedulablePodsDuration time.Duration = 5 * time.Minute - - queueClosed = "scheduling queue is closed" - // Scheduling queue names - activeQName = "Active" - backoffQName = "Backoff" + activeQ = "Active" + backoffQ = "Backoff" unschedulablePods = "Unschedulable" preEnqueue = "PreEnqueue" @@ -91,15 +88,15 @@ type PreEnqueueCheck func(pod *v1.Pod) bool // makes it easy to use those data structures as a SchedulingQueue. type SchedulingQueue interface { framework.PodNominator - Add(pod *v1.Pod) error + Add(logger klog.Logger, pod *v1.Pod) error // Activate moves the given pods to activeQ iff they're in unschedulablePods or backoffQ. // The passed-in pods are originally compiled from plugins that want to activate Pods, // by injecting the pods through a reserved CycleState struct (PodsToActivate). - Activate(pods map[string]*v1.Pod) + Activate(logger klog.Logger, pods map[string]*v1.Pod) // AddUnschedulableIfNotPresent adds an unschedulable pod back to scheduling queue. // The podSchedulingCycle represents the current scheduling cycle number which can be // returned by calling SchedulingCycle(). - AddUnschedulableIfNotPresent(pod *framework.QueuedPodInfo, podSchedulingCycle int64) error + AddUnschedulableIfNotPresent(logger klog.Logger, pod *framework.QueuedPodInfo, podSchedulingCycle int64) error // SchedulingCycle returns the current number of scheduling cycle which is // cached by scheduling queue. Normally, incrementing this number whenever // a pod is popped (e.g. called Pop()) is enough. @@ -107,17 +104,17 @@ type SchedulingQueue interface { // Pop removes the head of the queue and returns it. It blocks if the // queue is empty and waits until a new item is added to the queue. Pop() (*framework.QueuedPodInfo, error) - Update(oldPod, newPod *v1.Pod) error + Update(logger klog.Logger, oldPod, newPod *v1.Pod) error Delete(pod *v1.Pod) error - MoveAllToActiveOrBackoffQueue(event framework.ClusterEvent, preCheck PreEnqueueCheck) - AssignedPodAdded(pod *v1.Pod) - AssignedPodUpdated(pod *v1.Pod) + MoveAllToActiveOrBackoffQueue(logger klog.Logger, event framework.ClusterEvent, preCheck PreEnqueueCheck) + AssignedPodAdded(logger klog.Logger, pod *v1.Pod) + AssignedPodUpdated(logger klog.Logger, pod *v1.Pod) PendingPods() ([]*v1.Pod, string) // Close closes the SchedulingQueue so that the goroutine which is // waiting to pop items can exit gracefully. Close() // Run starts the goroutines managing the queue. - Run() + Run(logger klog.Logger) } // NewSchedulingQueue initializes a priority queue as a new scheduling queue. @@ -174,7 +171,7 @@ type PriorityQueue struct { // when we received move request. moveRequestCycle int64 - clusterEventMap map[framework.ClusterEvent]sets.String + clusterEventMap map[framework.ClusterEvent]sets.Set[string] // preEnqueuePluginMap is keyed with profile name, valued with registered preEnqueue plugins. preEnqueuePluginMap map[string][]framework.PreEnqueuePlugin @@ -197,7 +194,7 @@ type priorityQueueOptions struct { podLister listersv1.PodLister metricsRecorder metrics.MetricAsyncRecorder pluginMetricsSamplePercent int - clusterEventMap map[framework.ClusterEvent]sets.String + clusterEventMap map[framework.ClusterEvent]sets.Set[string] preEnqueuePluginMap map[string][]framework.PreEnqueuePlugin } @@ -233,7 +230,7 @@ func WithPodLister(pl listersv1.PodLister) Option { } // WithClusterEventMap sets clusterEventMap for PriorityQueue. -func WithClusterEventMap(m map[framework.ClusterEvent]sets.String) Option { +func WithClusterEventMap(m map[framework.ClusterEvent]sets.Set[string]) Option { return func(o *priorityQueueOptions) { o.clusterEventMap = m } @@ -283,7 +280,7 @@ func newQueuedPodInfoForLookup(pod *v1.Pod, plugins ...string) *framework.Queued // and so we avoid creating a full PodInfo, which is expensive to instantiate frequently. return &framework.QueuedPodInfo{ PodInfo: &framework.PodInfo{Pod: pod}, - UnschedulablePlugins: sets.NewString(plugins...), + UnschedulablePlugins: sets.New(plugins...), } } @@ -330,9 +327,13 @@ func NewPriorityQueue( } // Run starts the goroutine to pump from podBackoffQ to activeQ -func (p *PriorityQueue) Run() { - go wait.Until(p.flushBackoffQCompleted, 1.0*time.Second, p.stop) - go wait.Until(p.flushUnschedulablePodsLeftover, 30*time.Second, p.stop) +func (p *PriorityQueue) Run(logger klog.Logger) { + go wait.Until(func() { + p.flushBackoffQCompleted(logger) + }, 1.0*time.Second, p.stop) + go wait.Until(func() { + p.flushUnschedulablePodsLeftover(logger) + }, 30*time.Second, p.stop) } // runPreEnqueuePlugins iterates PreEnqueue function in each registered PreEnqueuePlugin. @@ -341,6 +342,7 @@ func (p *PriorityQueue) Run() { // Note: we need to associate the failed plugin to `pInfo`, so that the pod can be moved back // to activeQ by related cluster event. func (p *PriorityQueue) runPreEnqueuePlugins(ctx context.Context, pInfo *framework.QueuedPodInfo) bool { + logger := klog.FromContext(ctx) var s *framework.Status pod := pInfo.Pod startTime := time.Now() @@ -357,9 +359,9 @@ func (p *PriorityQueue) runPreEnqueuePlugins(ctx context.Context, pInfo *framewo pInfo.UnschedulablePlugins.Insert(pl.Name()) metrics.UnschedulableReason(pl.Name(), pod.Spec.SchedulerName).Inc() if s.Code() == framework.Error { - klog.ErrorS(s.AsError(), "Unexpected error running PreEnqueue plugin", "pod", klog.KObj(pod), "plugin", pl.Name()) + logger.Error(s.AsError(), "Unexpected error running PreEnqueue plugin", "pod", klog.KObj(pod), "plugin", pl.Name()) } else { - klog.V(5).InfoS("Status after running PreEnqueue plugin", "pod", klog.KObj(pod), "plugin", pl.Name(), "status", s) + logger.Info("Status after running PreEnqueue plugin", "pod", klog.KObj(pod), "plugin", pl.Name(), "status", s) } return false } @@ -379,7 +381,7 @@ func (p *PriorityQueue) runPreEnqueuePlugin(ctx context.Context, pl framework.Pr // addToActiveQ tries to add pod to active queue. It returns 2 parameters: // 1. a boolean flag to indicate whether the pod is added successfully. // 2. an error for the caller to act on. -func (p *PriorityQueue) addToActiveQ(pInfo *framework.QueuedPodInfo) (bool, error) { +func (p *PriorityQueue) addToActiveQ(logger klog.Logger, pInfo *framework.QueuedPodInfo) (bool, error) { pInfo.Gated = !p.runPreEnqueuePlugins(context.Background(), pInfo) if pInfo.Gated { // Add the Pod to unschedulablePods if it's not passing PreEnqueuePlugins. @@ -387,7 +389,7 @@ func (p *PriorityQueue) addToActiveQ(pInfo *framework.QueuedPodInfo) (bool, erro return false, nil } if err := p.activeQ.Add(pInfo); err != nil { - klog.ErrorS(err, "Error adding pod to the active queue", "pod", klog.KObj(pInfo.Pod)) + logger.Error(err, "Error adding pod to the active queue", "pod", klog.KObj(pInfo.Pod)) return false, err } return true, nil @@ -395,39 +397,39 @@ func (p *PriorityQueue) addToActiveQ(pInfo *framework.QueuedPodInfo) (bool, erro // Add adds a pod to the active queue. It should be called only when a new pod // is added so there is no chance the pod is already in active/unschedulable/backoff queues -func (p *PriorityQueue) Add(pod *v1.Pod) error { +func (p *PriorityQueue) Add(logger klog.Logger, pod *v1.Pod) error { p.lock.Lock() defer p.lock.Unlock() pInfo := p.newQueuedPodInfo(pod) gated := pInfo.Gated - if added, err := p.addToActiveQ(pInfo); !added { + if added, err := p.addToActiveQ(logger, pInfo); !added { return err } if p.unschedulablePods.get(pod) != nil { - klog.ErrorS(nil, "Error: pod is already in the unschedulable queue", "pod", klog.KObj(pod)) + logger.Error(nil, "Error: pod is already in the unschedulable queue", "pod", klog.KObj(pod)) p.unschedulablePods.delete(pod, gated) } // Delete pod from backoffQ if it is backing off if err := p.podBackoffQ.Delete(pInfo); err == nil { - klog.ErrorS(nil, "Error: pod is already in the podBackoff queue", "pod", klog.KObj(pod)) + logger.Error(nil, "Error: pod is already in the podBackoff queue", "pod", klog.KObj(pod)) } - klog.V(5).InfoS("Pod moved to an internal scheduling queue", "pod", klog.KObj(pod), "event", PodAdd, "queue", activeQName) + logger.V(5).Info("Pod moved to an internal scheduling queue", "pod", klog.KObj(pod), "event", PodAdd, "queue", activeQ) metrics.SchedulerQueueIncomingPods.WithLabelValues("active", PodAdd).Inc() - p.addNominatedPodUnlocked(pInfo.PodInfo, nil) + p.addNominatedPodUnlocked(logger, pInfo.PodInfo, nil) p.cond.Broadcast() return nil } // Activate moves the given pods to activeQ iff they're in unschedulablePods or backoffQ. -func (p *PriorityQueue) Activate(pods map[string]*v1.Pod) { +func (p *PriorityQueue) Activate(logger klog.Logger, pods map[string]*v1.Pod) { p.lock.Lock() defer p.lock.Unlock() activated := false for _, pod := range pods { - if p.activate(pod) { + if p.activate(logger, pod) { activated = true } } @@ -437,7 +439,7 @@ func (p *PriorityQueue) Activate(pods map[string]*v1.Pod) { } } -func (p *PriorityQueue) activate(pod *v1.Pod) bool { +func (p *PriorityQueue) activate(logger klog.Logger, pod *v1.Pod) bool { // Verify if the pod is present in activeQ. if _, exists, _ := p.activeQ.Get(newQueuedPodInfoForLookup(pod)); exists { // No need to activate if it's already present in activeQ. @@ -448,7 +450,7 @@ func (p *PriorityQueue) activate(pod *v1.Pod) bool { if pInfo = p.unschedulablePods.get(pod); pInfo == nil { // If the pod doesn't belong to unschedulablePods or backoffQ, don't activate it. if obj, exists, _ := p.podBackoffQ.Get(newQueuedPodInfoForLookup(pod)); !exists { - klog.ErrorS(nil, "To-activate pod does not exist in unschedulablePods or backoffQ", "pod", klog.KObj(pod)) + logger.Error(nil, "To-activate pod does not exist in unschedulablePods or backoffQ", "pod", klog.KObj(pod)) return false } else { pInfo = obj.(*framework.QueuedPodInfo) @@ -457,18 +459,18 @@ func (p *PriorityQueue) activate(pod *v1.Pod) bool { if pInfo == nil { // Redundant safe check. We shouldn't reach here. - klog.ErrorS(nil, "Internal error: cannot obtain pInfo") + logger.Error(nil, "Internal error: cannot obtain pInfo") return false } gated := pInfo.Gated - if added, _ := p.addToActiveQ(pInfo); !added { + if added, _ := p.addToActiveQ(logger, pInfo); !added { return false } p.unschedulablePods.delete(pInfo.Pod, gated) p.podBackoffQ.Delete(pInfo) metrics.SchedulerQueueIncomingPods.WithLabelValues("active", ForceActivate).Inc() - p.addNominatedPodUnlocked(pInfo.PodInfo, nil) + p.addNominatedPodUnlocked(logger, pInfo.PodInfo, nil) return true } @@ -493,7 +495,7 @@ func (p *PriorityQueue) SchedulingCycle() int64 { // the queue, unless it is already in the queue. Normally, PriorityQueue puts // unschedulable pods in `unschedulablePods`. But if there has been a recent move // request, then the pod is put in `podBackoffQ`. -func (p *PriorityQueue) AddUnschedulableIfNotPresent(pInfo *framework.QueuedPodInfo, podSchedulingCycle int64) error { +func (p *PriorityQueue) AddUnschedulableIfNotPresent(logger klog.Logger, pInfo *framework.QueuedPodInfo, podSchedulingCycle int64) error { p.lock.Lock() defer p.lock.Unlock() pod := pInfo.Pod @@ -520,21 +522,21 @@ func (p *PriorityQueue) AddUnschedulableIfNotPresent(pInfo *framework.QueuedPodI if err := p.podBackoffQ.Add(pInfo); err != nil { return fmt.Errorf("error adding pod %v to the backoff queue: %v", klog.KObj(pod), err) } - klog.V(5).InfoS("Pod moved to an internal scheduling queue", "pod", klog.KObj(pod), "event", ScheduleAttemptFailure, "queue", backoffQName) + logger.V(5).Info("Pod moved to an internal scheduling queue", "pod", klog.KObj(pod), "event", ScheduleAttemptFailure, "queue", backoffQ) metrics.SchedulerQueueIncomingPods.WithLabelValues("backoff", ScheduleAttemptFailure).Inc() } else { p.unschedulablePods.addOrUpdate(pInfo) - klog.V(5).InfoS("Pod moved to an internal scheduling queue", "pod", klog.KObj(pod), "event", ScheduleAttemptFailure, "queue", unschedulablePods) + logger.V(5).Info("Pod moved to an internal scheduling queue", "pod", klog.KObj(pod), "event", ScheduleAttemptFailure, "queue", unschedulablePods) metrics.SchedulerQueueIncomingPods.WithLabelValues("unschedulable", ScheduleAttemptFailure).Inc() } - p.addNominatedPodUnlocked(pInfo.PodInfo, nil) + p.addNominatedPodUnlocked(logger, pInfo.PodInfo, nil) return nil } // flushBackoffQCompleted Moves all pods from backoffQ which have completed backoff in to activeQ -func (p *PriorityQueue) flushBackoffQCompleted() { +func (p *PriorityQueue) flushBackoffQCompleted(logger klog.Logger) { p.lock.Lock() defer p.lock.Unlock() activated := false @@ -550,13 +552,11 @@ func (p *PriorityQueue) flushBackoffQCompleted() { } _, err := p.podBackoffQ.Pop() if err != nil { - klog.ErrorS(err, "Unable to pop pod from backoff queue despite backoff completion", "pod", klog.KObj(pod)) + logger.Error(err, "Unable to pop pod from backoff queue despite backoff completion", "pod", klog.KObj(pod)) break } - if err := p.activeQ.Add(pInfo); err != nil { - klog.ErrorS(err, "Error adding pod to the active queue", "pod", klog.KObj(pInfo.Pod)) - } else { - klog.V(5).InfoS("Pod moved to an internal scheduling queue", "pod", klog.KObj(pod), "event", BackoffComplete, "queue", activeQName) + if added, _ := p.addToActiveQ(logger, pInfo); added { + logger.V(5).Info("Pod moved to an internal scheduling queue", "pod", klog.KObj(pod), "event", BackoffComplete, "queue", activeQ) metrics.SchedulerQueueIncomingPods.WithLabelValues("active", BackoffComplete).Inc() activated = true } @@ -569,7 +569,7 @@ func (p *PriorityQueue) flushBackoffQCompleted() { // flushUnschedulablePodsLeftover moves pods which stay in unschedulablePods // longer than podMaxInUnschedulablePodsDuration to backoffQ or activeQ. -func (p *PriorityQueue) flushUnschedulablePodsLeftover() { +func (p *PriorityQueue) flushUnschedulablePodsLeftover(logger klog.Logger) { p.lock.Lock() defer p.lock.Unlock() @@ -583,7 +583,7 @@ func (p *PriorityQueue) flushUnschedulablePodsLeftover() { } if len(podsToMove) > 0 { - p.movePodsToActiveOrBackoffQueue(podsToMove, UnschedulableTimeout) + p.movePodsToActiveOrBackoffQueue(logger, podsToMove, UnschedulableTimeout) } } @@ -598,7 +598,8 @@ func (p *PriorityQueue) Pop() (*framework.QueuedPodInfo, error) { // When Close() is called, the p.closed is set and the condition is broadcast, // which causes this loop to continue and return from the Pop(). if p.closed { - return nil, fmt.Errorf(queueClosed) + klog.V(2).InfoS("Scheduling queue is closed") + return nil, nil } p.cond.Wait() } @@ -631,7 +632,7 @@ func isPodUpdated(oldPod, newPod *v1.Pod) bool { // the item from the unschedulable queue if pod is updated in a way that it may // become schedulable and adds the updated one to the active queue. // If pod is not present in any of the queues, it is added to the active queue. -func (p *PriorityQueue) Update(oldPod, newPod *v1.Pod) error { +func (p *PriorityQueue) Update(logger klog.Logger, oldPod, newPod *v1.Pod) error { p.lock.Lock() defer p.lock.Unlock() @@ -640,14 +641,14 @@ func (p *PriorityQueue) Update(oldPod, newPod *v1.Pod) error { // If the pod is already in the active queue, just update it there. if oldPodInfo, exists, _ := p.activeQ.Get(oldPodInfo); exists { pInfo := updatePod(oldPodInfo, newPod) - p.updateNominatedPodUnlocked(oldPod, pInfo.PodInfo) + p.updateNominatedPodUnlocked(logger, oldPod, pInfo.PodInfo) return p.activeQ.Update(pInfo) } // If the pod is in the backoff queue, update it there. if oldPodInfo, exists, _ := p.podBackoffQ.Get(oldPodInfo); exists { pInfo := updatePod(oldPodInfo, newPod) - p.updateNominatedPodUnlocked(oldPod, pInfo.PodInfo) + p.updateNominatedPodUnlocked(logger, oldPod, pInfo.PodInfo) return p.podBackoffQ.Update(pInfo) } } @@ -655,7 +656,7 @@ func (p *PriorityQueue) Update(oldPod, newPod *v1.Pod) error { // If the pod is in the unschedulable queue, updating it may make it schedulable. if usPodInfo := p.unschedulablePods.get(newPod); usPodInfo != nil { pInfo := updatePod(usPodInfo, newPod) - p.updateNominatedPodUnlocked(oldPod, pInfo.PodInfo) + p.updateNominatedPodUnlocked(logger, oldPod, pInfo.PodInfo) if isPodUpdated(oldPod, newPod) { gated := usPodInfo.Gated if p.isPodBackingoff(usPodInfo) { @@ -663,13 +664,13 @@ func (p *PriorityQueue) Update(oldPod, newPod *v1.Pod) error { return err } p.unschedulablePods.delete(usPodInfo.Pod, gated) - klog.V(5).InfoS("Pod moved to an internal scheduling queue", "pod", klog.KObj(pInfo.Pod), "event", PodUpdate, "queue", backoffQName) + logger.V(5).Info("Pod moved to an internal scheduling queue", "pod", klog.KObj(pInfo.Pod), "event", PodUpdate, "queue", backoffQ) } else { - if added, err := p.addToActiveQ(pInfo); !added { + if added, err := p.addToActiveQ(logger, pInfo); !added { return err } p.unschedulablePods.delete(usPodInfo.Pod, gated) - klog.V(5).InfoS("Pod moved to an internal scheduling queue", "pod", klog.KObj(pInfo.Pod), "event", BackoffComplete, "queue", activeQName) + logger.V(5).Info("Pod moved to an internal scheduling queue", "pod", klog.KObj(pInfo.Pod), "event", BackoffComplete, "queue", activeQ) p.cond.Broadcast() } } else { @@ -681,11 +682,11 @@ func (p *PriorityQueue) Update(oldPod, newPod *v1.Pod) error { } // If pod is not in any of the queues, we put it in the active queue. pInfo := p.newQueuedPodInfo(newPod) - if added, err := p.addToActiveQ(pInfo); !added { + if added, err := p.addToActiveQ(logger, pInfo); !added { return err } - p.addNominatedPodUnlocked(pInfo.PodInfo, nil) - klog.V(5).InfoS("Pod moved to an internal scheduling queue", "pod", klog.KObj(pInfo.Pod), "event", PodUpdate, "queue", activeQName) + p.addNominatedPodUnlocked(logger, pInfo.PodInfo, nil) + logger.V(5).Info("Pod moved to an internal scheduling queue", "pod", klog.KObj(pInfo.Pod), "event", PodUpdate, "queue", activeQ) p.cond.Broadcast() return nil } @@ -709,9 +710,9 @@ func (p *PriorityQueue) Delete(pod *v1.Pod) error { // AssignedPodAdded is called when a bound pod is added. Creation of this pod // may make pending pods with matching affinity terms schedulable. -func (p *PriorityQueue) AssignedPodAdded(pod *v1.Pod) { +func (p *PriorityQueue) AssignedPodAdded(logger klog.Logger, pod *v1.Pod) { p.lock.Lock() - p.movePodsToActiveOrBackoffQueue(p.getUnschedulablePodsWithMatchingAffinityTerm(pod), AssignedPodAdd) + p.movePodsToActiveOrBackoffQueue(logger, p.getUnschedulablePodsWithMatchingAffinityTerm(pod), AssignedPodAdd) p.lock.Unlock() } @@ -731,12 +732,12 @@ func isPodResourcesResizedDown(pod *v1.Pod) bool { // AssignedPodUpdated is called when a bound pod is updated. Change of labels // may make pending pods with matching affinity terms schedulable. -func (p *PriorityQueue) AssignedPodUpdated(pod *v1.Pod) { +func (p *PriorityQueue) AssignedPodUpdated(logger klog.Logger, pod *v1.Pod) { p.lock.Lock() if isPodResourcesResizedDown(pod) { - p.moveAllToActiveOrBackoffQueue(AssignedPodUpdate, nil) + p.moveAllToActiveOrBackoffQueue(logger, AssignedPodUpdate, nil) } else { - p.movePodsToActiveOrBackoffQueue(p.getUnschedulablePodsWithMatchingAffinityTerm(pod), AssignedPodUpdate) + p.movePodsToActiveOrBackoffQueue(logger, p.getUnschedulablePodsWithMatchingAffinityTerm(pod), AssignedPodUpdate) } p.lock.Unlock() } @@ -746,28 +747,28 @@ func (p *PriorityQueue) AssignedPodUpdated(pod *v1.Pod) { // This function adds all pods and then signals the condition variable to ensure that // if Pop() is waiting for an item, it receives the signal after all the pods are in the // queue and the head is the highest priority pod. -func (p *PriorityQueue) moveAllToActiveOrBackoffQueue(event framework.ClusterEvent, preCheck PreEnqueueCheck) { +func (p *PriorityQueue) moveAllToActiveOrBackoffQueue(logger klog.Logger, event framework.ClusterEvent, preCheck PreEnqueueCheck) { unschedulablePods := make([]*framework.QueuedPodInfo, 0, len(p.unschedulablePods.podInfoMap)) for _, pInfo := range p.unschedulablePods.podInfoMap { if preCheck == nil || preCheck(pInfo.Pod) { unschedulablePods = append(unschedulablePods, pInfo) } } - p.movePodsToActiveOrBackoffQueue(unschedulablePods, event) + p.movePodsToActiveOrBackoffQueue(logger, unschedulablePods, event) } // MoveAllToActiveOrBackoffQueue moves all pods from unschedulablePods to activeQ or backoffQ. // This function adds all pods and then signals the condition variable to ensure that // if Pop() is waiting for an item, it receives the signal after all the pods are in the // queue and the head is the highest priority pod. -func (p *PriorityQueue) MoveAllToActiveOrBackoffQueue(event framework.ClusterEvent, preCheck PreEnqueueCheck) { +func (p *PriorityQueue) MoveAllToActiveOrBackoffQueue(logger klog.Logger, event framework.ClusterEvent, preCheck PreEnqueueCheck) { p.lock.Lock() defer p.lock.Unlock() - p.moveAllToActiveOrBackoffQueue(event, preCheck) + p.moveAllToActiveOrBackoffQueue(logger, event, preCheck) } // NOTE: this function assumes lock has been acquired in caller -func (p *PriorityQueue) movePodsToActiveOrBackoffQueue(podInfoList []*framework.QueuedPodInfo, event framework.ClusterEvent) { +func (p *PriorityQueue) movePodsToActiveOrBackoffQueue(logger klog.Logger, podInfoList []*framework.QueuedPodInfo, event framework.ClusterEvent) { activated := false for _, pInfo := range podInfoList { // If the event doesn't help making the Pod schedulable, continue. @@ -780,16 +781,16 @@ func (p *PriorityQueue) movePodsToActiveOrBackoffQueue(podInfoList []*framework. pod := pInfo.Pod if p.isPodBackingoff(pInfo) { if err := p.podBackoffQ.Add(pInfo); err != nil { - klog.ErrorS(err, "Error adding pod to the backoff queue", "pod", klog.KObj(pod)) + logger.Error(err, "Error adding pod to the backoff queue", "pod", klog.KObj(pod)) } else { - klog.V(5).InfoS("Pod moved to an internal scheduling queue", "pod", klog.KObj(pInfo.Pod), "event", event, "queue", backoffQName) + logger.V(5).Info("Pod moved to an internal scheduling queue", "pod", klog.KObj(pInfo.Pod), "event", event, "queue", backoffQ) metrics.SchedulerQueueIncomingPods.WithLabelValues("backoff", event.Label).Inc() p.unschedulablePods.delete(pod, pInfo.Gated) } } else { gated := pInfo.Gated - if added, _ := p.addToActiveQ(pInfo); added { - klog.V(5).InfoS("Pod moved to an internal scheduling queue", "pod", klog.KObj(pInfo.Pod), "event", event, "queue", activeQName) + if added, _ := p.addToActiveQ(logger, pInfo); added { + logger.V(5).Info("Pod moved to an internal scheduling queue", "pod", klog.KObj(pInfo.Pod), "event", event, "queue", activeQ) activated = true metrics.SchedulerQueueIncomingPods.WithLabelValues("active", event.Label).Inc() p.unschedulablePods.delete(pod, gated) @@ -866,9 +867,9 @@ func (npm *nominator) deleteNominatedPodIfExistsUnlocked(pod *v1.Pod) { // This is called during the preemption process after a node is nominated to run // the pod. We update the structure before sending a request to update the pod // object to avoid races with the following scheduling cycles. -func (npm *nominator) AddNominatedPod(pi *framework.PodInfo, nominatingInfo *framework.NominatingInfo) { +func (npm *nominator) AddNominatedPod(logger klog.Logger, pi *framework.PodInfo, nominatingInfo *framework.NominatingInfo) { npm.lock.Lock() - npm.addNominatedPodUnlocked(pi, nominatingInfo) + npm.addNominatedPodUnlocked(logger, pi, nominatingInfo) npm.lock.Unlock() } @@ -903,7 +904,7 @@ func (p *PriorityQueue) newQueuedPodInfo(pod *v1.Pod, plugins ...string) *framew PodInfo: podInfo, Timestamp: now, InitialAttemptTimestamp: now, - UnschedulablePlugins: sets.NewString(plugins...), + UnschedulablePlugins: sets.New(plugins...), } } @@ -1021,7 +1022,7 @@ type nominator struct { lock sync.RWMutex } -func (npm *nominator) addNominatedPodUnlocked(pi *framework.PodInfo, nominatingInfo *framework.NominatingInfo) { +func (npm *nominator) addNominatedPodUnlocked(logger klog.Logger, pi *framework.PodInfo, nominatingInfo *framework.NominatingInfo) { // Always delete the pod if it already exists, to ensure we never store more than // one instance of the pod. npm.delete(pi.Pod) @@ -1040,11 +1041,11 @@ func (npm *nominator) addNominatedPodUnlocked(pi *framework.PodInfo, nominatingI // If the pod was removed or if it was already scheduled, don't nominate it. updatedPod, err := npm.podLister.Pods(pi.Pod.Namespace).Get(pi.Pod.Name) if err != nil { - klog.V(4).InfoS("Pod doesn't exist in podLister, aborted adding it to the nominator", "pod", klog.KObj(pi.Pod)) + logger.V(4).Info("Pod doesn't exist in podLister, aborted adding it to the nominator", "pod", klog.KObj(pi.Pod)) return } if updatedPod.Spec.NodeName != "" { - klog.V(4).InfoS("Pod is already scheduled to a node, aborted adding it to the nominator", "pod", klog.KObj(pi.Pod), "node", updatedPod.Spec.NodeName) + logger.V(4).Info("Pod is already scheduled to a node, aborted adding it to the nominator", "pod", klog.KObj(pi.Pod), "node", updatedPod.Spec.NodeName) return } } @@ -1052,7 +1053,7 @@ func (npm *nominator) addNominatedPodUnlocked(pi *framework.PodInfo, nominatingI npm.nominatedPodToNode[pi.Pod.UID] = nodeName for _, npi := range npm.nominatedPods[nodeName] { if npi.Pod.UID == pi.Pod.UID { - klog.V(4).InfoS("Pod already exists in the nominator", "pod", klog.KObj(npi.Pod)) + logger.V(4).Info("Pod already exists in the nominator", "pod", klog.KObj(npi.Pod)) return } } @@ -1077,13 +1078,13 @@ func (npm *nominator) delete(p *v1.Pod) { } // UpdateNominatedPod updates the with . -func (npm *nominator) UpdateNominatedPod(oldPod *v1.Pod, newPodInfo *framework.PodInfo) { +func (npm *nominator) UpdateNominatedPod(logger klog.Logger, oldPod *v1.Pod, newPodInfo *framework.PodInfo) { npm.lock.Lock() defer npm.lock.Unlock() - npm.updateNominatedPodUnlocked(oldPod, newPodInfo) + npm.updateNominatedPodUnlocked(logger, oldPod, newPodInfo) } -func (npm *nominator) updateNominatedPodUnlocked(oldPod *v1.Pod, newPodInfo *framework.PodInfo) { +func (npm *nominator) updateNominatedPodUnlocked(logger klog.Logger, oldPod *v1.Pod, newPodInfo *framework.PodInfo) { // In some cases, an Update event with no "NominatedNode" present is received right // after a node("NominatedNode") is reserved for this pod in memory. // In this case, we need to keep reserving the NominatedNode when updating the pod pointer. @@ -1104,7 +1105,7 @@ func (npm *nominator) updateNominatedPodUnlocked(oldPod *v1.Pod, newPodInfo *fra // We update irrespective of the nominatedNodeName changed or not, to ensure // that pod pointer is updated. npm.delete(oldPod) - npm.addNominatedPodUnlocked(newPodInfo, nominatingInfo) + npm.addNominatedPodUnlocked(logger, newPodInfo, nominatingInfo) } // NewPodNominator creates a nominator as a backing of framework.PodNominator. @@ -1124,17 +1125,18 @@ func newPodNominator(podLister listersv1.PodLister) *nominator { // MakeNextPodFunc returns a function to retrieve the next pod from a given // scheduling queue -func MakeNextPodFunc(queue SchedulingQueue) func() *framework.QueuedPodInfo { +func MakeNextPodFunc(logger klog.Logger, queue SchedulingQueue) func() *framework.QueuedPodInfo { return func() *framework.QueuedPodInfo { podInfo, err := queue.Pop() - if err == nil { - klog.V(4).InfoS("About to try and schedule pod", "pod", klog.KObj(podInfo.Pod)) + if err == nil && podInfo != nil { + logger.V(4).Info("About to try and schedule pod", "pod", klog.KObj(podInfo.Pod)) for plugin := range podInfo.UnschedulablePlugins { metrics.UnschedulableReason(plugin, podInfo.Pod.Spec.SchedulerName).Dec() } return podInfo + } else if err != nil { + logger.Error(err, "Error while retrieving next pod from scheduling queue") } - klog.ErrorS(err, "Error while retrieving next pod from scheduling queue") return nil } } @@ -1170,7 +1172,7 @@ func (p *PriorityQueue) podMatchesEvent(podInfo *framework.QueuedPodInfo, cluste return false } -func intersect(x, y sets.String) bool { +func intersect(x, y sets.Set[string]) bool { if len(x) > len(y) { x, y = y, x } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/metrics/metrics.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/metrics/metrics.go index 5ee59a430f0c..c76e1a28d642 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/metrics/metrics.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/metrics/metrics.go @@ -106,16 +106,6 @@ var ( Help: "Number of pending pods, by the queue type. 'active' means number of pods in activeQ; 'backoff' means number of pods in backoffQ; 'unschedulable' means number of pods in unschedulablePods that the scheduler attempted to schedule and failed; 'gated' is the number of unschedulable pods that the scheduler never attempted to schedule because they are gated.", StabilityLevel: metrics.STABLE, }, []string{"queue"}) - // SchedulerGoroutines isn't called in some parts where goroutines start. - // Goroutines metric replaces SchedulerGoroutines metric. Goroutine metric tracks all goroutines. - SchedulerGoroutines = metrics.NewGaugeVec( - &metrics.GaugeOpts{ - Subsystem: SchedulerSubsystem, - DeprecatedVersion: "1.26.0", - Name: "scheduler_goroutines", - Help: "Number of running goroutines split by the work they do such as binding. This metric is replaced by the \"goroutines\" metric.", - StabilityLevel: metrics.ALPHA, - }, []string{"work"}) Goroutines = metrics.NewGaugeVec( &metrics.GaugeOpts{ Subsystem: SchedulerSubsystem, @@ -220,7 +210,6 @@ var ( FrameworkExtensionPointDuration, PluginExecutionDuration, SchedulerQueueIncomingPods, - SchedulerGoroutines, Goroutines, PermitWaitDuration, CacheSize, diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/metrics/profile_metrics.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/metrics/profile_metrics.go index cf7fa4249c84..d8e36e9df0a8 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/metrics/profile_metrics.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/metrics/profile_metrics.go @@ -19,27 +19,27 @@ package metrics // This file contains helpers for metrics that are associated to a profile. var ( - scheduledResult = "scheduled" - unschedulableResult = "unschedulable" - errorResult = "error" + ScheduledResult = "scheduled" + UnschedulableResult = "unschedulable" + ErrorResult = "error" ) // PodScheduled can records a successful scheduling attempt and the duration // since `start`. func PodScheduled(profile string, duration float64) { - observeScheduleAttemptAndLatency(scheduledResult, profile, duration) + observeScheduleAttemptAndLatency(ScheduledResult, profile, duration) } // PodUnschedulable can records a scheduling attempt for an unschedulable pod // and the duration since `start`. func PodUnschedulable(profile string, duration float64) { - observeScheduleAttemptAndLatency(unschedulableResult, profile, duration) + observeScheduleAttemptAndLatency(UnschedulableResult, profile, duration) } // PodScheduleError can records a scheduling attempt that had an error and the // duration since `start`. func PodScheduleError(profile string, duration float64) { - observeScheduleAttemptAndLatency(errorResult, profile, duration) + observeScheduleAttemptAndLatency(ErrorResult, profile, duration) } func observeScheduleAttemptAndLatency(result, profile string, duration float64) { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/profile/profile.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/profile/profile.go index b023b03f628d..3846d9720df9 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/profile/profile.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/profile/profile.go @@ -18,6 +18,7 @@ limitations under the License. package profile import ( + "context" "errors" "fmt" @@ -34,24 +35,24 @@ import ( type RecorderFactory func(string) events.EventRecorder // newProfile builds a Profile for the given configuration. -func newProfile(cfg config.KubeSchedulerProfile, r frameworkruntime.Registry, recorderFact RecorderFactory, - stopCh <-chan struct{}, opts ...frameworkruntime.Option) (framework.Framework, error) { +func newProfile(ctx context.Context, cfg config.KubeSchedulerProfile, r frameworkruntime.Registry, recorderFact RecorderFactory, + opts ...frameworkruntime.Option) (framework.Framework, error) { recorder := recorderFact(cfg.SchedulerName) opts = append(opts, frameworkruntime.WithEventRecorder(recorder)) - return frameworkruntime.NewFramework(r, &cfg, stopCh, opts...) + return frameworkruntime.NewFramework(ctx, r, &cfg, opts...) } // Map holds frameworks indexed by scheduler name. type Map map[string]framework.Framework // NewMap builds the frameworks given by the configuration, indexed by name. -func NewMap(cfgs []config.KubeSchedulerProfile, r frameworkruntime.Registry, recorderFact RecorderFactory, - stopCh <-chan struct{}, opts ...frameworkruntime.Option) (Map, error) { +func NewMap(ctx context.Context, cfgs []config.KubeSchedulerProfile, r frameworkruntime.Registry, recorderFact RecorderFactory, + opts ...frameworkruntime.Option) (Map, error) { m := make(Map) v := cfgValidator{m: m} for _, cfg := range cfgs { - p, err := newProfile(cfg, r, recorderFact, stopCh, opts...) + p, err := newProfile(ctx, cfg, r, recorderFact, opts...) if err != nil { return nil, fmt.Errorf("creating profile for scheduler name %s: %v", cfg.SchedulerName, err) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/schedule_one.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/schedule_one.go index 90e2d30e9c99..aa4f19bbf9ab 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/schedule_one.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/schedule_one.go @@ -60,6 +60,7 @@ const ( // scheduleOne does the entire scheduling workflow for a single pod. It is serialized on the scheduling algorithm's host fitting. func (sched *Scheduler) scheduleOne(ctx context.Context) { + logger := klog.FromContext(ctx) podInfo := sched.NextPod() // pod could be nil when schedulerQueue is closed if podInfo == nil || podInfo.Pod == nil { @@ -70,14 +71,14 @@ func (sched *Scheduler) scheduleOne(ctx context.Context) { if err != nil { // This shouldn't happen, because we only accept for scheduling the pods // which specify a scheduler name that matches one of the profiles. - klog.ErrorS(err, "Error occurred") + logger.Error(err, "Error occurred") return } - if sched.skipPodSchedule(fwk, pod) { + if sched.skipPodSchedule(ctx, fwk, pod) { return } - klog.V(3).InfoS("Attempting to schedule pod", "pod", klog.KObj(pod)) + logger.V(3).Info("Attempting to schedule pod", "pod", klog.KObj(pod)) // Synchronously attempt to find a fit for the pod. start := time.Now() @@ -102,8 +103,6 @@ func (sched *Scheduler) scheduleOne(ctx context.Context) { bindingCycleCtx, cancel := context.WithCancel(ctx) defer cancel() - metrics.SchedulerGoroutines.WithLabelValues(metrics.Binding).Inc() - defer metrics.SchedulerGoroutines.WithLabelValues(metrics.Binding).Dec() metrics.Goroutines.WithLabelValues(metrics.Binding).Inc() defer metrics.Goroutines.WithLabelValues(metrics.Binding).Dec() @@ -125,6 +124,7 @@ func (sched *Scheduler) schedulingCycle( start time.Time, podsToActivate *framework.PodsToActivate, ) (ScheduleResult, *framework.QueuedPodInfo, *framework.Status) { + logger := klog.FromContext(ctx) pod := podInfo.Pod scheduleResult, err := sched.SchedulePod(ctx, fwk, state, pod) if err != nil { @@ -135,7 +135,7 @@ func (sched *Scheduler) schedulingCycle( fitError, ok := err.(*framework.FitError) if !ok { - klog.ErrorS(err, "Error selecting node for pod", "pod", klog.KObj(pod)) + logger.Error(err, "Error selecting node for pod", "pod", klog.KObj(pod)) return ScheduleResult{nominatingInfo: clearNominatedNode}, podInfo, framework.AsStatus(err) } @@ -145,7 +145,7 @@ func (sched *Scheduler) schedulingCycle( // into the resources that were preempted, but this is harmless. if !fwk.HasPostFilterPlugins() { - klog.V(3).InfoS("No PostFilter plugins are registered, so no preemption will be performed") + logger.V(3).Info("No PostFilter plugins are registered, so no preemption will be performed") return ScheduleResult{}, podInfo, framework.NewStatus(framework.Unschedulable).WithError(err) } @@ -154,9 +154,9 @@ func (sched *Scheduler) schedulingCycle( msg := status.Message() fitError.Diagnosis.PostFilterMsg = msg if status.Code() == framework.Error { - klog.ErrorS(nil, "Status after running PostFilter plugins for pod", "pod", klog.KObj(pod), "status", msg) + logger.Error(nil, "Status after running PostFilter plugins for pod", "pod", klog.KObj(pod), "status", msg) } else { - klog.V(5).InfoS("Status after running PostFilter plugins for pod", "pod", klog.KObj(pod), "status", msg) + logger.V(5).Info("Status after running PostFilter plugins for pod", "pod", klog.KObj(pod), "status", msg) } var nominatingInfo *framework.NominatingInfo @@ -172,7 +172,7 @@ func (sched *Scheduler) schedulingCycle( assumedPodInfo := podInfo.DeepCopy() assumedPod := assumedPodInfo.Pod // assume modifies `assumedPod` by setting NodeName=scheduleResult.SuggestedHost - err = sched.assume(assumedPod, scheduleResult.SuggestedHost) + err = sched.assume(logger, assumedPod, scheduleResult.SuggestedHost) if err != nil { // This is most probably result of a BUG in retrying logic. // We report an error here so that pod scheduling can be retried. @@ -188,8 +188,8 @@ func (sched *Scheduler) schedulingCycle( if sts := fwk.RunReservePluginsReserve(ctx, state, assumedPod, scheduleResult.SuggestedHost); !sts.IsSuccess() { // trigger un-reserve to clean up state associated with the reserved Pod fwk.RunReservePluginsUnreserve(ctx, state, assumedPod, scheduleResult.SuggestedHost) - if forgetErr := sched.Cache.ForgetPod(assumedPod); forgetErr != nil { - klog.ErrorS(forgetErr, "Scheduler cache ForgetPod failed") + if forgetErr := sched.Cache.ForgetPod(logger, assumedPod); forgetErr != nil { + logger.Error(forgetErr, "Scheduler cache ForgetPod failed") } return ScheduleResult{nominatingInfo: clearNominatedNode}, @@ -202,8 +202,8 @@ func (sched *Scheduler) schedulingCycle( if !runPermitStatus.IsWait() && !runPermitStatus.IsSuccess() { // trigger un-reserve to clean up state associated with the reserved Pod fwk.RunReservePluginsUnreserve(ctx, state, assumedPod, scheduleResult.SuggestedHost) - if forgetErr := sched.Cache.ForgetPod(assumedPod); forgetErr != nil { - klog.ErrorS(forgetErr, "Scheduler cache ForgetPod failed") + if forgetErr := sched.Cache.ForgetPod(logger, assumedPod); forgetErr != nil { + logger.Error(forgetErr, "Scheduler cache ForgetPod failed") } return ScheduleResult{nominatingInfo: clearNominatedNode}, @@ -213,7 +213,7 @@ func (sched *Scheduler) schedulingCycle( // At the end of a successful scheduling cycle, pop and move up Pods if needed. if len(podsToActivate.Map) != 0 { - sched.SchedulingQueue.Activate(podsToActivate.Map) + sched.SchedulingQueue.Activate(logger, podsToActivate.Map) // Clear the entries after activation. podsToActivate.Map = make(map[string]*v1.Pod) } @@ -230,6 +230,7 @@ func (sched *Scheduler) bindingCycle( assumedPodInfo *framework.QueuedPodInfo, start time.Time, podsToActivate *framework.PodsToActivate) *framework.Status { + logger := klog.FromContext(ctx) assumedPod := assumedPodInfo.Pod @@ -249,7 +250,7 @@ func (sched *Scheduler) bindingCycle( } // Calculating nodeResourceString can be heavy. Avoid it if klog verbosity is below 2. - klog.V(2).InfoS("Successfully bound pod to node", "pod", klog.KObj(assumedPod), "node", scheduleResult.SuggestedHost, "evaluatedNodes", scheduleResult.EvaluatedNodes, "feasibleNodes", scheduleResult.FeasibleNodes) + logger.V(2).Info("Successfully bound pod to node", "pod", klog.KObj(assumedPod), "node", scheduleResult.SuggestedHost, "evaluatedNodes", scheduleResult.EvaluatedNodes, "feasibleNodes", scheduleResult.FeasibleNodes) metrics.PodScheduled(fwk.ProfileName(), metrics.SinceInSeconds(start)) metrics.PodSchedulingAttempts.Observe(float64(assumedPodInfo.Attempts)) metrics.PodSchedulingDuration.WithLabelValues(getAttemptsLabel(assumedPodInfo)).Observe(metrics.SinceInSeconds(assumedPodInfo.InitialAttemptTimestamp)) @@ -259,7 +260,7 @@ func (sched *Scheduler) bindingCycle( // At the end of a successful binding cycle, move up Pods if needed. if len(podsToActivate.Map) != 0 { - sched.SchedulingQueue.Activate(podsToActivate.Map) + sched.SchedulingQueue.Activate(logger, podsToActivate.Map) // Unlike the logic in schedulingCycle(), we don't bother deleting the entries // as `podsToActivate.Map` is no longer consumed. } @@ -275,12 +276,13 @@ func (sched *Scheduler) handleBindingCycleError( start time.Time, scheduleResult ScheduleResult, status *framework.Status) { + logger := klog.FromContext(ctx) assumedPod := podInfo.Pod // trigger un-reserve plugins to clean up state associated with the reserved Pod fwk.RunReservePluginsUnreserve(ctx, state, assumedPod, scheduleResult.SuggestedHost) - if forgetErr := sched.Cache.ForgetPod(assumedPod); forgetErr != nil { - klog.ErrorS(forgetErr, "scheduler cache ForgetPod failed") + if forgetErr := sched.Cache.ForgetPod(logger, assumedPod); forgetErr != nil { + logger.Error(forgetErr, "scheduler cache ForgetPod failed") } else { // "Forget"ing an assumed Pod in binding cycle should be treated as a PodDelete event, // as the assumed Pod had occupied a certain amount of resources in scheduler cache. @@ -289,11 +291,11 @@ func (sched *Scheduler) handleBindingCycleError( // It's intentional to "defer" this operation; otherwise MoveAllToActiveOrBackoffQueue() would // update `q.moveRequest` and thus move the assumed pod to backoffQ anyways. if status.IsUnschedulable() { - defer sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(internalqueue.AssignedPodDelete, func(pod *v1.Pod) bool { + defer sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(logger, internalqueue.AssignedPodDelete, func(pod *v1.Pod) bool { return assumedPod.UID != pod.UID }) } else { - sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(internalqueue.AssignedPodDelete, nil) + sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(logger, internalqueue.AssignedPodDelete, nil) } } @@ -309,11 +311,11 @@ func (sched *Scheduler) frameworkForPod(pod *v1.Pod) (framework.Framework, error } // skipPodSchedule returns true if we could skip scheduling the pod for specified cases. -func (sched *Scheduler) skipPodSchedule(fwk framework.Framework, pod *v1.Pod) bool { +func (sched *Scheduler) skipPodSchedule(ctx context.Context, fwk framework.Framework, pod *v1.Pod) bool { // Case 1: pod is being deleted. if pod.DeletionTimestamp != nil { fwk.EventRecorder().Eventf(pod, nil, v1.EventTypeWarning, "FailedScheduling", "Scheduling", "skip schedule deleting pod: %v/%v", pod.Namespace, pod.Name) - klog.V(3).InfoS("Skip schedule deleting pod", "pod", klog.KObj(pod)) + klog.FromContext(ctx).V(3).Info("Skip schedule deleting pod", "pod", klog.KObj(pod)) return true } @@ -322,6 +324,7 @@ func (sched *Scheduler) skipPodSchedule(fwk framework.Framework, pod *v1.Pod) bo // during its previous scheduling cycle but before getting assumed. isAssumed, err := sched.Cache.IsAssumedPod(pod) if err != nil { + // TODO(91633): pass ctx into a revised HandleError utilruntime.HandleError(fmt.Errorf("failed to check whether pod %s/%s is assumed: %v", pod.Namespace, pod.Name, err)) return false } @@ -334,8 +337,7 @@ func (sched *Scheduler) skipPodSchedule(fwk framework.Framework, pod *v1.Pod) bo func (sched *Scheduler) schedulePod(ctx context.Context, fwk framework.Framework, state *framework.CycleState, pod *v1.Pod) (result ScheduleResult, err error) { trace := utiltrace.New("Scheduling", utiltrace.Field{Key: "namespace", Value: pod.Namespace}, utiltrace.Field{Key: "name", Value: pod.Name}) defer trace.LogIfLong(100 * time.Millisecond) - - if err := sched.Cache.UpdateSnapshot(sched.nodeInfoSnapshot); err != nil { + if err := sched.Cache.UpdateSnapshot(klog.FromContext(ctx), sched.nodeInfoSnapshot); err != nil { return result, err } trace.Step("Snapshotting scheduler cache and node infos done") @@ -385,9 +387,10 @@ func (sched *Scheduler) schedulePod(ctx context.Context, fwk framework.Framework // Filters the nodes to find the ones that fit the pod based on the framework // filter plugins and filter extenders. func (sched *Scheduler) findNodesThatFitPod(ctx context.Context, fwk framework.Framework, state *framework.CycleState, pod *v1.Pod) ([]*v1.Node, framework.Diagnosis, error) { + logger := klog.FromContext(ctx) diagnosis := framework.Diagnosis{ NodeToStatusMap: make(framework.NodeToStatusMap), - UnschedulablePlugins: sets.NewString(), + UnschedulablePlugins: sets.New[string](), } allNodes, err := sched.nodeInfoSnapshot.NodeInfos().List() @@ -403,7 +406,7 @@ func (sched *Scheduler) findNodesThatFitPod(ctx context.Context, fwk framework.F // Record the messages from PreFilter in Diagnosis.PreFilterMsg. msg := s.Message() diagnosis.PreFilterMsg = msg - klog.V(5).InfoS("Status after running PreFilter plugins for pod", "pod", klog.KObj(pod), "status", msg) + logger.V(5).Info("Status after running PreFilter plugins for pod", "pod", klog.KObj(pod), "status", msg) // Status satisfying IsUnschedulable() gets injected into diagnosis.UnschedulablePlugins. if s.FailedPlugin() != "" { diagnosis.UnschedulablePlugins.Insert(s.FailedPlugin()) @@ -416,7 +419,7 @@ func (sched *Scheduler) findNodesThatFitPod(ctx context.Context, fwk framework.F if len(pod.Status.NominatedNodeName) > 0 { feasibleNodes, err := sched.evaluateNominatedNode(ctx, pod, fwk, state, diagnosis) if err != nil { - klog.ErrorS(err, "Evaluation failed on nominated node", "pod", klog.KObj(pod), "node", pod.Status.NominatedNodeName) + logger.Error(err, "Evaluation failed on nominated node", "pod", klog.KObj(pod), "node", pod.Status.NominatedNodeName) } // Nominated node passes all the filters, scheduler is good to assign this node to the pod. if len(feasibleNodes) != 0 { @@ -444,7 +447,7 @@ func (sched *Scheduler) findNodesThatFitPod(ctx context.Context, fwk framework.F return nil, diagnosis, err } - feasibleNodes, err = findNodesThatPassExtenders(sched.Extenders, pod, feasibleNodes, diagnosis.NodeToStatusMap) + feasibleNodes, err = findNodesThatPassExtenders(ctx, sched.Extenders, pod, feasibleNodes, diagnosis.NodeToStatusMap) if err != nil { return nil, diagnosis, err } @@ -463,7 +466,7 @@ func (sched *Scheduler) evaluateNominatedNode(ctx context.Context, pod *v1.Pod, return nil, err } - feasibleNodes, err = findNodesThatPassExtenders(sched.Extenders, pod, feasibleNodes, diagnosis.NodeToStatusMap) + feasibleNodes, err = findNodesThatPassExtenders(ctx, sched.Extenders, pod, feasibleNodes, diagnosis.NodeToStatusMap) if err != nil { return nil, err } @@ -573,7 +576,8 @@ func (sched *Scheduler) numFeasibleNodesToFind(percentageOfNodesToScore *int32, return numNodes } -func findNodesThatPassExtenders(extenders []framework.Extender, pod *v1.Pod, feasibleNodes []*v1.Node, statuses framework.NodeToStatusMap) ([]*v1.Node, error) { +func findNodesThatPassExtenders(ctx context.Context, extenders []framework.Extender, pod *v1.Pod, feasibleNodes []*v1.Node, statuses framework.NodeToStatusMap) ([]*v1.Node, error) { + logger := klog.FromContext(ctx) // Extenders are called sequentially. // Nodes in original feasibleNodes can be excluded in one extender, and pass on to the next // extender in a decreasing manner. @@ -593,7 +597,7 @@ func findNodesThatPassExtenders(extenders []framework.Extender, pod *v1.Pod, fea feasibleList, failedMap, failedAndUnresolvableMap, err := extender.Filter(pod, feasibleNodes) if err != nil { if extender.IsIgnorable() { - klog.InfoS("Skipping extender as it returned error and has ignorable flag set", "extender", extender, "err", err) + logger.Info("Skipping extender as it returned error and has ignorable flag set", "extender", extender, "err", err) continue } return nil, err @@ -639,6 +643,7 @@ func prioritizeNodes( pod *v1.Pod, nodes []*v1.Node, ) ([]framework.NodePluginScores, error) { + logger := klog.FromContext(ctx) // If no priority configs are provided, then all nodes will have a score of one. // This is required to generate the priority list in the required format if len(extenders) == 0 && !fwk.HasScorePlugins() { @@ -665,11 +670,11 @@ func prioritizeNodes( } // Additional details logged at level 10 if enabled. - klogV := klog.V(10) - if klogV.Enabled() { + loggerVTen := logger.V(10) + if loggerVTen.Enabled() { for _, nodeScore := range nodesScores { for _, pluginScore := range nodeScore.Scores { - klogV.InfoS("Plugin scored node for pod", "pod", klog.KObj(pod), "plugin", pluginScore.Name, "node", nodeScore.Name, "score", pluginScore.Score) + loggerVTen.Info("Plugin scored node for pod", "pod", klog.KObj(pod), "plugin", pluginScore.Name, "node", nodeScore.Name, "score", pluginScore.Score) } } } @@ -686,17 +691,15 @@ func prioritizeNodes( } wg.Add(1) go func(extIndex int) { - metrics.SchedulerGoroutines.WithLabelValues(metrics.PrioritizingExtender).Inc() metrics.Goroutines.WithLabelValues(metrics.PrioritizingExtender).Inc() defer func() { - metrics.SchedulerGoroutines.WithLabelValues(metrics.PrioritizingExtender).Dec() metrics.Goroutines.WithLabelValues(metrics.PrioritizingExtender).Dec() wg.Done() }() prioritizedList, weight, err := extenders[extIndex].Prioritize(pod, nodes) if err != nil { // Prioritization errors from extender can be ignored, let k8s/other extenders determine the priorities - klog.V(5).InfoS("Failed to run extender's priority function. No score given by this extender.", "error", err, "pod", klog.KObj(pod), "extender", extenders[extIndex].Name()) + logger.V(5).Info("Failed to run extender's priority function. No score given by this extender.", "error", err, "pod", klog.KObj(pod), "extender", extenders[extIndex].Name()) return } mu.Lock() @@ -704,8 +707,8 @@ func prioritizeNodes( for i := range *prioritizedList { nodename := (*prioritizedList)[i].Host score := (*prioritizedList)[i].Score - if klogV.Enabled() { - klogV.InfoS("Extender scored node for pod", "pod", klog.KObj(pod), "extender", extenders[extIndex].Name(), "node", nodename, "score", score) + if loggerVTen.Enabled() { + loggerVTen.Info("Extender scored node for pod", "pod", klog.KObj(pod), "extender", extenders[extIndex].Name(), "node", nodename, "score", score) } // MaxExtenderPriority may diverge from the max priority used in the scheduler and defined by MaxNodeScore, @@ -736,9 +739,9 @@ func prioritizeNodes( } } - if klogV.Enabled() { + if loggerVTen.Enabled() { for i := range nodesScores { - klogV.InfoS("Calculated node's final score for pod", "pod", klog.KObj(pod), "node", nodesScores[i].Name, "score", nodesScores[i].TotalScore) + loggerVTen.Info("Calculated node's final score for pod", "pod", klog.KObj(pod), "node", nodesScores[i].Name, "score", nodesScores[i].TotalScore) } } return nodesScores, nil @@ -771,15 +774,15 @@ func selectHost(nodeScores []framework.NodePluginScores) (string, error) { // assume signals to the cache that a pod is already in the cache, so that binding can be asynchronous. // assume modifies `assumed`. -func (sched *Scheduler) assume(assumed *v1.Pod, host string) error { +func (sched *Scheduler) assume(logger klog.Logger, assumed *v1.Pod, host string) error { // Optimistically assume that the binding will succeed and send it to apiserver // in the background. // If the binding fails, scheduler will release resources allocated to assumed pod // immediately. assumed.Spec.NodeName = host - if err := sched.Cache.AssumePod(assumed); err != nil { - klog.ErrorS(err, "Scheduler cache AssumePod failed") + if err := sched.Cache.AssumePod(logger, assumed); err != nil { + logger.Error(err, "Scheduler cache AssumePod failed") return err } // if "assumed" is a nominated pod, we should remove it from internal cache @@ -794,8 +797,9 @@ func (sched *Scheduler) assume(assumed *v1.Pod, host string) error { // The precedence for binding is: (1) extenders and (2) framework plugins. // We expect this to run asynchronously, so we handle binding metrics internally. func (sched *Scheduler) bind(ctx context.Context, fwk framework.Framework, assumed *v1.Pod, targetNode string, state *framework.CycleState) (status *framework.Status) { + logger := klog.FromContext(ctx) defer func() { - sched.finishBinding(fwk, assumed, targetNode, status) + sched.finishBinding(logger, fwk, assumed, targetNode, status) }() bound, err := sched.extendersBinding(assumed, targetNode) @@ -819,12 +823,12 @@ func (sched *Scheduler) extendersBinding(pod *v1.Pod, node string) (bool, error) return false, nil } -func (sched *Scheduler) finishBinding(fwk framework.Framework, assumed *v1.Pod, targetNode string, status *framework.Status) { - if finErr := sched.Cache.FinishBinding(assumed); finErr != nil { - klog.ErrorS(finErr, "Scheduler cache FinishBinding failed") +func (sched *Scheduler) finishBinding(logger klog.Logger, fwk framework.Framework, assumed *v1.Pod, targetNode string, status *framework.Status) { + if finErr := sched.Cache.FinishBinding(logger, assumed); finErr != nil { + logger.Error(finErr, "Scheduler cache FinishBinding failed") } if !status.IsSuccess() { - klog.V(1).InfoS("Failed to bind pod", "pod", klog.KObj(assumed)) + logger.V(1).Info("Failed to bind pod", "pod", klog.KObj(assumed)) return } @@ -843,6 +847,7 @@ func getAttemptsLabel(p *framework.QueuedPodInfo) string { // handleSchedulingFailure records an event for the pod that indicates the // pod has failed to schedule. Also, update the pod condition and nominated node name if set. func (sched *Scheduler) handleSchedulingFailure(ctx context.Context, fwk framework.Framework, podInfo *framework.QueuedPodInfo, status *framework.Status, nominatingInfo *framework.NominatingInfo, start time.Time) { + logger := klog.FromContext(ctx) reason := v1.PodReasonSchedulerError if status.IsUnschedulable() { reason = v1.PodReasonUnschedulable @@ -860,13 +865,13 @@ func (sched *Scheduler) handleSchedulingFailure(ctx context.Context, fwk framewo errMsg := status.Message() if err == ErrNoNodesAvailable { - klog.V(2).InfoS("Unable to schedule pod; no nodes are registered to the cluster; waiting", "pod", klog.KObj(pod), "err", err) - } else if fitError, ok := err.(*framework.FitError); ok { - // Inject UnschedulablePlugins to PodInfo, which will be used later for moving Pods between queues efficiently. + + logger.V(2).Info("Unable to schedule pod; no nodes are registered to the cluster; waiting", "pod", klog.KObj(pod)) + } else if fitError, ok := err.(*framework.FitError); ok { // Inject UnschedulablePlugins to PodInfo, which will be used later for moving Pods between queues efficiently. podInfo.UnschedulablePlugins = fitError.Diagnosis.UnschedulablePlugins - klog.V(2).InfoS("Unable to schedule pod; no fit; waiting", "pod", klog.KObj(pod), "err", errMsg) + logger.V(2).Info("Unable to schedule pod; no fit; waiting", "pod", klog.KObj(pod), "err", errMsg) } else if apierrors.IsNotFound(err) { - klog.V(2).InfoS("Unable to schedule pod, possibly due to node not found; waiting", "pod", klog.KObj(pod), "err", errMsg) + logger.V(2).Info("Unable to schedule pod, possibly due to node not found; waiting", "pod", klog.KObj(pod), "err", errMsg) if errStatus, ok := err.(apierrors.APIStatus); ok && errStatus.Status().Details.Kind == "node" { nodeName := errStatus.Status().Details.Name // when node is not found, We do not remove the node right away. Trying again to get @@ -874,32 +879,32 @@ func (sched *Scheduler) handleSchedulingFailure(ctx context.Context, fwk framewo _, err := fwk.ClientSet().CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) if err != nil && apierrors.IsNotFound(err) { node := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: nodeName}} - if err := sched.Cache.RemoveNode(&node); err != nil { - klog.V(4).InfoS("Node is not found; failed to remove it from the cache", "node", node.Name) + if err := sched.Cache.RemoveNode(logger, &node); err != nil { + logger.V(4).Info("Node is not found; failed to remove it from the cache", "node", node.Name) } } } } else { - klog.ErrorS(err, "Error scheduling pod; retrying", "pod", klog.KObj(pod)) + logger.Error(err, "Error scheduling pod; retrying", "pod", klog.KObj(pod)) } // Check if the Pod exists in informer cache. podLister := fwk.SharedInformerFactory().Core().V1().Pods().Lister() cachedPod, e := podLister.Pods(pod.Namespace).Get(pod.Name) if e != nil { - klog.InfoS("Pod doesn't exist in informer cache", "pod", klog.KObj(pod), "err", e) + logger.Info("Pod doesn't exist in informer cache", "pod", klog.KObj(pod), "err", e) } else { // In the case of extender, the pod may have been bound successfully, but timed out returning its response to the scheduler. // It could result in the live version to carry .spec.nodeName, and that's inconsistent with the internal-queued version. if len(cachedPod.Spec.NodeName) != 0 { - klog.InfoS("Pod has been assigned to node. Abort adding it back to queue.", "pod", klog.KObj(pod), "node", cachedPod.Spec.NodeName) + logger.Info("Pod has been assigned to node. Abort adding it back to queue.", "pod", klog.KObj(pod), "node", cachedPod.Spec.NodeName) } else { // As is from SharedInformer, we need to do a DeepCopy() here. // ignore this err since apiserver doesn't properly validate affinity terms // and we can't fix the validation for backwards compatibility. podInfo.PodInfo, _ = framework.NewPodInfo(cachedPod.DeepCopy()) - if err := sched.SchedulingQueue.AddUnschedulableIfNotPresent(podInfo, sched.SchedulingQueue.SchedulingCycle()); err != nil { - klog.ErrorS(err, "Error occurred") + if err := sched.SchedulingQueue.AddUnschedulableIfNotPresent(logger, podInfo, sched.SchedulingQueue.SchedulingCycle()); err != nil { + logger.Error(err, "Error occurred") } } } @@ -909,7 +914,8 @@ func (sched *Scheduler) handleSchedulingFailure(ctx context.Context, fwk framewo // and the time the scheduler receives a Pod Update for the nominated pod. // Here we check for nil only for tests. if sched.SchedulingQueue != nil { - sched.SchedulingQueue.AddNominatedPod(podInfo.PodInfo, nominatingInfo) + logger := klog.FromContext(ctx) + sched.SchedulingQueue.AddNominatedPod(logger, podInfo.PodInfo, nominatingInfo) } if err == nil { @@ -925,7 +931,7 @@ func (sched *Scheduler) handleSchedulingFailure(ctx context.Context, fwk framewo Reason: reason, Message: errMsg, }, nominatingInfo); err != nil { - klog.ErrorS(err, "Error updating pod", "pod", klog.KObj(pod)) + klog.FromContext(ctx).Error(err, "Error updating pod", "pod", klog.KObj(pod)) } } @@ -940,7 +946,8 @@ func truncateMessage(message string) string { } func updatePod(ctx context.Context, client clientset.Interface, pod *v1.Pod, condition *v1.PodCondition, nominatingInfo *framework.NominatingInfo) error { - klog.V(3).InfoS("Updating pod condition", "pod", klog.KObj(pod), "conditionType", condition.Type, "conditionStatus", condition.Status, "conditionReason", condition.Reason) + logger := klog.FromContext(ctx) + logger.V(3).Info("Updating pod condition", "pod", klog.KObj(pod), "conditionType", condition.Type, "conditionStatus", condition.Status, "conditionReason", condition.Reason) podStatusCopy := pod.Status.DeepCopy() // NominatedNodeName is updated only if we are trying to set it, and the value is // different from the existing one. diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/scheduler.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/scheduler.go index 540d46f038b2..406f2d965537 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/scheduler.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/scheduler.go @@ -96,11 +96,16 @@ type Scheduler struct { percentageOfNodesToScore int32 nextStartNodeIndex int + + // logger *must* be initialized when creating a Scheduler, + // otherwise logging functions will access a nil sink and + // panic. + logger klog.Logger } -func (s *Scheduler) applyDefaultHandlers() { - s.SchedulePod = s.schedulePod - s.FailureHandler = s.handleSchedulingFailure +func (sched *Scheduler) applyDefaultHandlers() { + sched.SchedulePod = sched.schedulePod + sched.FailureHandler = sched.handleSchedulingFailure } type schedulerOptions struct { @@ -239,17 +244,15 @@ var defaultSchedulerOptions = schedulerOptions{ } // New returns a Scheduler -func New(client clientset.Interface, +func New(ctx context.Context, + client clientset.Interface, informerFactory informers.SharedInformerFactory, dynInformerFactory dynamicinformer.DynamicSharedInformerFactory, recorderFactory profile.RecorderFactory, - stopCh <-chan struct{}, opts ...Option) (*Scheduler, error) { - stopEverything := stopCh - if stopEverything == nil { - stopEverything = wait.NeverStop - } + logger := klog.FromContext(ctx) + stopEverything := ctx.Done() options := defaultSchedulerOptions for _, opt := range opts { @@ -273,7 +276,7 @@ func New(client clientset.Interface, metrics.Register() - extenders, err := buildExtenders(options.extenders, options.profiles) + extenders, err := buildExtenders(logger, options.extenders, options.profiles) if err != nil { return nil, fmt.Errorf("couldn't build extenders: %w", err) } @@ -282,10 +285,10 @@ func New(client clientset.Interface, nodeLister := informerFactory.Core().V1().Nodes().Lister() snapshot := internalcache.NewEmptySnapshot() - clusterEventMap := make(map[framework.ClusterEvent]sets.String) - metricsRecorder := metrics.NewMetricsAsyncRecorder(1000, time.Second, stopCh) + clusterEventMap := make(map[framework.ClusterEvent]sets.Set[string]) + metricsRecorder := metrics.NewMetricsAsyncRecorder(1000, time.Second, stopEverything) - profiles, err := profile.NewMap(options.profiles, registry, recorderFactory, stopCh, + profiles, err := profile.NewMap(ctx, options.profiles, registry, recorderFactory, frameworkruntime.WithComponentConfigVersion(options.componentConfigVersion), frameworkruntime.WithClientSet(client), frameworkruntime.WithKubeConfig(options.kubeConfig), @@ -293,7 +296,6 @@ func New(client clientset.Interface, frameworkruntime.WithSnapshotSharedLister(snapshot), frameworkruntime.WithCaptureProfile(frameworkruntime.CaptureProfile(options.frameworkCapturer)), frameworkruntime.WithClusterEventMap(clusterEventMap), - frameworkruntime.WithClusterEventMap(clusterEventMap), frameworkruntime.WithParallelism(int(options.parallelism)), frameworkruntime.WithExtenders(extenders), frameworkruntime.WithMetricsRecorder(metricsRecorder), @@ -327,11 +329,11 @@ func New(client clientset.Interface, fwk.SetPodNominator(podQueue) } - schedulerCache := internalcache.New(durationToExpireAssumedPod, stopEverything) + schedulerCache := internalcache.New(ctx, durationToExpireAssumedPod) // Setup cache debugger. debugger := cachedebugger.New(nodeLister, podLister, schedulerCache, podQueue) - debugger.ListenForSignal(stopEverything) + debugger.ListenForSignal(ctx) sched := &Scheduler{ Cache: schedulerCache, @@ -339,10 +341,11 @@ func New(client clientset.Interface, nodeInfoSnapshot: snapshot, percentageOfNodesToScore: options.percentageOfNodesToScore, Extenders: extenders, - NextPod: internalqueue.MakeNextPodFunc(podQueue), + NextPod: internalqueue.MakeNextPodFunc(logger, podQueue), StopEverything: stopEverything, SchedulingQueue: podQueue, Profiles: profiles, + logger: logger, } sched.applyDefaultHandlers() @@ -353,7 +356,8 @@ func New(client clientset.Interface, // Run begins watching and scheduling. It starts scheduling and blocked until the context is done. func (sched *Scheduler) Run(ctx context.Context) { - sched.SchedulingQueue.Run() + logger := klog.FromContext(ctx) + sched.SchedulingQueue.Run(logger) // We need to start scheduleOne loop in a dedicated goroutine, // because scheduleOne function hangs on getting the next item @@ -375,7 +379,7 @@ func NewInformerFactory(cs clientset.Interface, resyncPeriod time.Duration) info return informerFactory } -func buildExtenders(extenders []schedulerapi.Extender, profiles []schedulerapi.KubeSchedulerProfile) ([]framework.Extender, error) { +func buildExtenders(logger klog.Logger, extenders []schedulerapi.Extender, profiles []schedulerapi.KubeSchedulerProfile) ([]framework.Extender, error) { var fExtenders []framework.Extender if len(extenders) == 0 { return nil, nil @@ -384,7 +388,7 @@ func buildExtenders(extenders []schedulerapi.Extender, profiles []schedulerapi.K var ignoredExtendedResources []string var ignorableExtenders []framework.Extender for i := range extenders { - klog.V(2).InfoS("Creating extender", "extender", extenders[i]) + logger.V(2).Info("Creating extender", "extender", extenders[i]) extender, err := NewHTTPExtender(&extenders[i]) if err != nil { return nil, err @@ -435,7 +439,7 @@ func buildExtenders(extenders []schedulerapi.Extender, profiles []schedulerapi.K type FailureHandlerFn func(ctx context.Context, fwk framework.Framework, podInfo *framework.QueuedPodInfo, status *framework.Status, nominatingInfo *framework.NominatingInfo, start time.Time) -func unionedGVKs(m map[framework.ClusterEvent]sets.String) map[framework.GVK]framework.ActionType { +func unionedGVKs(m map[framework.ClusterEvent]sets.Set[string]) map[framework.GVK]framework.ActionType { gvkMap := make(map[framework.GVK]framework.ActionType) for evt := range m { if _, ok := gvkMap[evt.Resource]; ok { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/util/utils.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/util/utils.go index 3b083878b410..a477742032d7 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/util/utils.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/util/utils.go @@ -67,12 +67,12 @@ func GetEarliestPodStartTime(victims *extenderv1.Victims) *metav1.Time { maxPriority := corev1helpers.PodPriority(victims.Pods[0]) for _, pod := range victims.Pods { - if corev1helpers.PodPriority(pod) == maxPriority { - if GetPodStartTime(pod).Before(earliestPodStartTime) { - earliestPodStartTime = GetPodStartTime(pod) + if podPriority := corev1helpers.PodPriority(pod); podPriority == maxPriority { + if podStartTime := GetPodStartTime(pod); podStartTime.Before(earliestPodStartTime) { + earliestPodStartTime = podStartTime } - } else if corev1helpers.PodPriority(pod) > maxPriority { - maxPriority = corev1helpers.PodPriority(pod) + } else if podPriority > maxPriority { + maxPriority = podPriority earliestPodStartTime = GetPodStartTime(pod) } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/conntrack/OWNERS b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/conntrack/OWNERS deleted file mode 100644 index 675d11afb440..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/conntrack/OWNERS +++ /dev/null @@ -1,8 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -approvers: - - sig-network-approvers -reviewers: - - sig-network-reviewers -labels: - - sig/network diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/filesystem/defaultfs.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/filesystem/defaultfs.go index a7eb01ffe361..0ddd2248fa9d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/filesystem/defaultfs.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/filesystem/defaultfs.go @@ -85,7 +85,7 @@ func (fs *DefaultFs) RemoveAll(path string) error { return os.RemoveAll(fs.prefix(path)) } -// Remove via os.RemoveAll +// Remove via os.Remove func (fs *DefaultFs) Remove(name string) error { return os.Remove(fs.prefix(name)) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/filesystem/filesystem.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/filesystem/filesystem.go index 43cd4aa7e291..6408e0fa838c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/filesystem/filesystem.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/filesystem/filesystem.go @@ -33,7 +33,7 @@ type Filesystem interface { RemoveAll(path string) error Remove(name string) error - // from "io/ioutil" + // from "os" ReadFile(filename string) ([]byte, error) TempDir(dir, prefix string) (string, error) TempFile(dir, prefix string) (File, error) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/hash/hash.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/hash/hash.go index 803f066a440b..0962e5cfb5b3 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/hash/hash.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/hash/hash.go @@ -17,9 +17,10 @@ limitations under the License. package hash import ( + "fmt" "hash" - "github.com/davecgh/go-spew/spew" + "k8s.io/apimachinery/pkg/util/dump" ) // DeepHashObject writes specified object to hash using the spew library @@ -27,11 +28,5 @@ import ( // ensuring the hash does not change when a pointer changes. func DeepHashObject(hasher hash.Hash, objectToWrite interface{}) { hasher.Reset() - printer := spew.ConfigState{ - Indent: " ", - SortKeys: true, - DisableMethods: true, - SpewKeys: true, - } - printer.Fprintf(hasher, "%#v", objectToWrite) + fmt.Fprintf(hasher, "%v", dump.ForHash(objectToWrite)) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/iptables/OWNERS b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/iptables/OWNERS index 80a5e13e3085..548730fa9528 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/iptables/OWNERS +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/iptables/OWNERS @@ -1,14 +1,8 @@ # See the OWNERS docs at https://go.k8s.io/owners reviewers: - - dcbw - - thockin - - danwinship + - sig-network-reviewers approvers: - - dcbw - - thockin - - danwinship -emeritus_approvers: - - eparis + - sig-network-approvers labels: - sig/network diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/ipvs/OWNERS b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/ipvs/OWNERS deleted file mode 100644 index 44fd07aeee53..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/ipvs/OWNERS +++ /dev/null @@ -1,16 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -reviewers: - - thockin - - andrewsykim - - uablrek -approvers: - - thockin - - andrewsykim - - uablrek -labels: - - sig/network - - area/ipvs -emeritus_approvers: - - lbernail - - m1093782566 diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/parsers/parsers.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/parsers/parsers.go index ef869cd768e0..75130a8628c9 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/parsers/parsers.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/parsers/parsers.go @@ -31,7 +31,7 @@ import ( func ParseImageName(image string) (string, string, string, error) { named, err := dockerref.ParseNormalizedNamed(image) if err != nil { - return "", "", "", fmt.Errorf("couldn't parse image name: %v", err) + return "", "", "", fmt.Errorf("couldn't parse image name %q: %v", image, err) } repoToPull := named.Name() diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/configmap/configmap.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/configmap/configmap.go index 7a1e5e58178b..ae7151149785 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/configmap/configmap.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/configmap/configmap.go @@ -252,7 +252,7 @@ func (b *configMapVolumeMounter) SetUpAt(dir string, mounterArgs volume.MounterA setPerms := func(_ string) error { // This may be the first time writing and new files get created outside the timestamp subdirectory: // change the permissions on the whole volume and not only in the timestamp directory. - return volume.SetVolumeOwnership(b, mounterArgs.FsGroup, nil /*fsGroupChangePolicy*/, volumeutil.FSGroupCompleteHook(b.plugin, nil)) + return volume.SetVolumeOwnership(b, dir, mounterArgs.FsGroup, nil /*fsGroupChangePolicy*/, volumeutil.FSGroupCompleteHook(b.plugin, nil)) } err = writer.Write(payload, setPerms) if err != nil { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_attacher.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_attacher.go index 8ffb3acf49cf..ef3c98258ac8 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_attacher.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_attacher.go @@ -595,14 +595,13 @@ func (c *csiAttacher) UnmountDevice(deviceMountPath string) error { driverName = data[volDataKey.driverName] volID = data[volDataKey.volHandle] } else { - klog.Error(log("UnmountDevice failed to load volume data file [%s]: %v", dataDir, err)) - - // The volume might have been mounted by old CSI volume plugin. Fall back to the old behavior: read PV from API server - driverName, volID, err = getDriverAndVolNameFromDeviceMountPath(c.k8s, deviceMountPath) - if err != nil { - klog.Errorf(log("attacher.UnmountDevice failed to get driver and volume name from device mount path: %v", err)) - return err + if errors.Is(err, os.ErrNotExist) { + klog.V(4).Info(log("attacher.UnmountDevice skipped because volume data file [%s] does not exist", dataDir)) + return nil } + + klog.Errorf(log("attacher.UnmountDevice failed to get driver and volume name from device mount path: %v", err)) + return err } if c.csiClient == nil { @@ -682,36 +681,6 @@ func makeDeviceMountPath(plugin *csiPlugin, spec *volume.Spec) (string, error) { return filepath.Join(plugin.host.GetPluginDir(plugin.GetPluginName()), driver, volSha, globalMountInGlobalPath), nil } -func getDriverAndVolNameFromDeviceMountPath(k8s kubernetes.Interface, deviceMountPath string) (string, string, error) { - // deviceMountPath structure: /var/lib/kubelet/plugins/kubernetes.io/csi/pv/{pvname}/globalmount - dir := filepath.Dir(deviceMountPath) - if file := filepath.Base(deviceMountPath); file != globalMountInGlobalPath { - return "", "", errors.New(log("getDriverAndVolNameFromDeviceMountPath failed, path did not end in %s", globalMountInGlobalPath)) - } - // dir is now /var/lib/kubelet/plugins/kubernetes.io/csi/pv/{pvname} - pvName := filepath.Base(dir) - - // Get PV and check for errors - pv, err := k8s.CoreV1().PersistentVolumes().Get(context.TODO(), pvName, metav1.GetOptions{}) - if err != nil { - return "", "", err - } - if pv == nil || pv.Spec.CSI == nil { - return "", "", errors.New(log("getDriverAndVolNameFromDeviceMountPath could not find CSI Persistent Volume Source for pv: %s", pvName)) - } - - // Get VolumeHandle and PluginName from pv - csiSource := pv.Spec.CSI - if csiSource.Driver == "" { - return "", "", errors.New(log("getDriverAndVolNameFromDeviceMountPath failed, driver name empty")) - } - if csiSource.VolumeHandle == "" { - return "", "", errors.New(log("getDriverAndVolNameFromDeviceMountPath failed, VolumeHandle empty")) - } - - return csiSource.Driver, csiSource.VolumeHandle, nil -} - func verifyAttachmentStatus(attachment *storage.VolumeAttachment, volumeHandle string) (bool, error) { // when we received a deleted event during attachment, fail fast if attachment == nil { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_mounter.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_mounter.go index 1974b0367531..468f882b8845 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_mounter.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_mounter.go @@ -333,7 +333,7 @@ func (c *csiMountMgr) SetUpAt(dir string, mounterArgs volume.MounterArgs) error // Driver doesn't support applying FSGroup. Kubelet must apply it instead. // fullPluginName helps to distinguish different driver from csi plugin - err := volume.SetVolumeOwnership(c, mounterArgs.FsGroup, mounterArgs.FSGroupChangePolicy, util.FSGroupCompleteHook(c.plugin, c.spec)) + err := volume.SetVolumeOwnership(c, dir, mounterArgs.FsGroup, mounterArgs.FSGroupChangePolicy, util.FSGroupCompleteHook(c.plugin, c.spec)) if err != nil { // At this point mount operation is successful: // 1. Since volume can not be used by the pod because of invalid permissions, we must return error diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_util.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_util.go index ee2bdc193b32..bb4d799ff3c8 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_util.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_util.go @@ -79,7 +79,7 @@ func loadVolumeData(dir string, fileName string) (map[string]string, error) { file, err := os.Open(dataFileName) if err != nil { - return nil, errors.New(log("failed to open volume data file [%s]: %v", dataFileName, err)) + return nil, fmt.Errorf("%s: %w", log("failed to open volume data file [%s]", dataFileName), err) } defer file.Close() data := map[string]string{} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/downwardapi/downwardapi.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/downwardapi/downwardapi.go index b13e6ea6015c..54364009d018 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/downwardapi/downwardapi.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/downwardapi/downwardapi.go @@ -223,7 +223,7 @@ func (b *downwardAPIVolumeMounter) SetUpAt(dir string, mounterArgs volume.Mounte setPerms := func(_ string) error { // This may be the first time writing and new files get created outside the timestamp subdirectory: // change the permissions on the whole volume and not only in the timestamp directory. - return volume.SetVolumeOwnership(b, mounterArgs.FsGroup, nil /*fsGroupChangePolicy*/, volumeutil.FSGroupCompleteHook(b.plugin, nil)) + return volume.SetVolumeOwnership(b, dir, mounterArgs.FsGroup, nil /*fsGroupChangePolicy*/, volumeutil.FSGroupCompleteHook(b.plugin, nil)) } err = writer.Write(data, setPerms) if err != nil { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/emptydir/empty_dir.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/emptydir/empty_dir.go index 9ad981c54bd4..192db424e676 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/emptydir/empty_dir.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/emptydir/empty_dir.go @@ -280,7 +280,7 @@ func (ed *emptyDir) SetUpAt(dir string, mounterArgs volume.MounterArgs) error { err = fmt.Errorf("unknown storage medium %q", ed.medium) } - volume.SetVolumeOwnership(ed, mounterArgs.FsGroup, nil /*fsGroupChangePolicy*/, volumeutil.FSGroupCompleteHook(ed.plugin, nil)) + volume.SetVolumeOwnership(ed, dir, mounterArgs.FsGroup, nil /*fsGroupChangePolicy*/, volumeutil.FSGroupCompleteHook(ed.plugin, nil)) // If setting up the quota fails, just log a message but don't actually error out. // We'll use the old du mechanism in this case, at least until we support @@ -519,10 +519,12 @@ func (ed *emptyDir) TearDownAt(dir string) error { } func (ed *emptyDir) teardownDefault(dir string) error { - // Remove any quota - err := fsquota.ClearQuota(ed.mounter, dir) - if err != nil { - klog.Warningf("Warning: Failed to clear quota on %s: %v", dir, err) + if utilfeature.DefaultFeatureGate.Enabled(features.LocalStorageCapacityIsolationFSQuotaMonitoring) { + // Remove any quota + err := fsquota.ClearQuota(ed.mounter, dir) + if err != nil { + klog.Warningf("Warning: Failed to clear quota on %s: %v", dir, err) + } } // Renaming the directory is not required anymore because the operation executor // now handles duplicate operations on the same volume diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/fc/disk_manager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/fc/disk_manager.go index bb054ea16618..02e15c4f85c5 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/fc/disk_manager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/fc/disk_manager.go @@ -91,7 +91,7 @@ func diskSetUp(manager diskManager, b fcDiskMounter, volPath string, mounter mou } if !b.readOnly { - volume.SetVolumeOwnership(&b, fsGroup, fsGroupChangePolicy, util.FSGroupCompleteHook(b.plugin, nil)) + volume.SetVolumeOwnership(&b, volPath, fsGroup, fsGroupChangePolicy, util.FSGroupCompleteHook(b.plugin, nil)) } return nil diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/mounter.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/mounter.go index 8098cfdb66ee..3821af7e9235 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/mounter.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/mounter.go @@ -95,7 +95,7 @@ func (f *flexVolumeMounter) SetUpAt(dir string, mounterArgs volume.MounterArgs) if !f.readOnly { if f.plugin.capabilities.FSGroup { // fullPluginName helps to distinguish different driver from flex volume plugin - volume.SetVolumeOwnership(f, mounterArgs.FsGroup, mounterArgs.FSGroupChangePolicy, util.FSGroupCompleteHook(f.plugin, f.spec)) + volume.SetVolumeOwnership(f, dir, mounterArgs.FsGroup, mounterArgs.FSGroupChangePolicy, util.FSGroupCompleteHook(f.plugin, f.spec)) } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/gcepd/gce_pd.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/gcepd/gce_pd.go index 7bbeade0ef08..8dd63cf623cd 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/gcepd/gce_pd.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/gcepd/gce_pd.go @@ -430,7 +430,7 @@ func (b *gcePersistentDiskMounter) SetUpAt(dir string, mounterArgs volume.Mounte klog.V(4).Infof("mount of disk %s succeeded", dir) if !b.readOnly { - if err := volume.SetVolumeOwnership(b, mounterArgs.FsGroup, mounterArgs.FSGroupChangePolicy, util.FSGroupCompleteHook(b.plugin, nil)); err != nil { + if err := volume.SetVolumeOwnership(b, dir, mounterArgs.FsGroup, mounterArgs.FSGroupChangePolicy, util.FSGroupCompleteHook(b.plugin, nil)); err != nil { klog.Errorf("SetVolumeOwnership returns error %v", err) } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/git_repo/git_repo.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/git_repo/git_repo.go index fe890032e27d..995018d90072 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/git_repo/git_repo.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/git_repo/git_repo.go @@ -235,7 +235,7 @@ func (b *gitRepoVolumeMounter) SetUpAt(dir string, mounterArgs volume.MounterArg return fmt.Errorf("failed to exec 'git reset --hard': %s: %v", output, err) } - volume.SetVolumeOwnership(b, mounterArgs.FsGroup, nil /*fsGroupChangePolicy*/, volumeutil.FSGroupCompleteHook(b.plugin, nil)) + volume.SetVolumeOwnership(b, dir, mounterArgs.FsGroup, nil /*fsGroupChangePolicy*/, volumeutil.FSGroupCompleteHook(b.plugin, nil)) volumeutil.SetReady(b.getMetaDir()) return nil diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/iscsi/disk_manager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/iscsi/disk_manager.go index 6d60e44efaf0..6aa8652bd6b0 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/iscsi/disk_manager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/iscsi/disk_manager.go @@ -96,7 +96,7 @@ func diskSetUp(manager diskManager, b iscsiDiskMounter, volPath string, mounter } if !b.readOnly { - volume.SetVolumeOwnership(&b, fsGroup, fsGroupChangePolicy, util.FSGroupCompleteHook(b.plugin, nil)) + volume.SetVolumeOwnership(&b, volPath, fsGroup, fsGroupChangePolicy, util.FSGroupCompleteHook(b.plugin, nil)) } return nil diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/local/local.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/local/local.go index ca0bc3040021..0c8fe0753967 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/local/local.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/local/local.go @@ -615,7 +615,7 @@ func (m *localVolumeMounter) SetUpAt(dir string, mounterArgs volume.MounterArgs) if !m.readOnly { // Volume owner will be written only once on the first volume mount if len(refs) == 0 { - return volume.SetVolumeOwnership(m, mounterArgs.FsGroup, mounterArgs.FSGroupChangePolicy, util.FSGroupCompleteHook(m.plugin, nil)) + return volume.SetVolumeOwnership(m, dir, mounterArgs.FsGroup, mounterArgs.FSGroupChangePolicy, util.FSGroupCompleteHook(m.plugin, nil)) } } return nil diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/plugins.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/plugins.go index e56d410a5055..2b19f97f134c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/plugins.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/plugins.go @@ -694,13 +694,11 @@ func (pm *VolumePluginMgr) FindPluginBySpec(spec *Spec) (VolumePlugin, error) { return match, nil } -// FindPluginByName fetches a plugin by name or by legacy name. If no plugin -// is found, returns error. +// FindPluginByName fetches a plugin by name. If no plugin is found, returns error. func (pm *VolumePluginMgr) FindPluginByName(name string) (VolumePlugin, error) { pm.mutex.RLock() defer pm.mutex.RUnlock() - // Once we can get rid of legacy names we can reduce this to a map lookup. var match VolumePlugin if v, found := pm.plugins[name]; found { match = v @@ -1065,7 +1063,7 @@ func NewPersistentVolumeRecyclerPodTemplate() *v1.Pod { Name: "pv-recycler", Image: "registry.k8s.io/debian-base:v2.0.0", Command: []string{"/bin/sh"}, - Args: []string{"-c", "test -e /scrub && rm -rf /scrub/..?* /scrub/.[!.]* /scrub/* && test -z \"$(ls -A /scrub)\" || exit 1"}, + Args: []string{"-c", "test -e /scrub && find /scrub -mindepth 1 -delete && test -z \"$(ls -A /scrub)\" || exit 1"}, VolumeMounts: []v1.VolumeMount{ { Name: "vol", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/portworx/portworx.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/portworx/portworx.go index e0eaf94495d3..6b9243f52341 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/portworx/portworx.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/portworx/portworx.go @@ -335,7 +335,7 @@ func (b *portworxVolumeMounter) SetUpAt(dir string, mounterArgs volume.MounterAr return err } if !b.readOnly { - volume.SetVolumeOwnership(b, mounterArgs.FsGroup, mounterArgs.FSGroupChangePolicy, util.FSGroupCompleteHook(b.plugin, nil)) + volume.SetVolumeOwnership(b, dir, mounterArgs.FsGroup, mounterArgs.FSGroupChangePolicy, util.FSGroupCompleteHook(b.plugin, nil)) } klog.Infof("Portworx Volume %s setup at %s", b.volumeID, dir) return nil diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/projected/projected.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/projected/projected.go index c82b38653e37..deb7728168a8 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/projected/projected.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/projected/projected.go @@ -233,7 +233,7 @@ func (s *projectedVolumeMounter) SetUpAt(dir string, mounterArgs volume.MounterA setPerms := func(_ string) error { // This may be the first time writing and new files get created outside the timestamp subdirectory: // change the permissions on the whole volume and not only in the timestamp directory. - return volume.SetVolumeOwnership(s, mounterArgs.FsGroup, nil /*fsGroupChangePolicy*/, volumeutil.FSGroupCompleteHook(s.plugin, nil)) + return volume.SetVolumeOwnership(s, dir, mounterArgs.FsGroup, nil /*fsGroupChangePolicy*/, volumeutil.FSGroupCompleteHook(s.plugin, nil)) } err = writer.Write(data, setPerms) if err != nil { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/rbd/disk_manager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/rbd/disk_manager.go index edff33540f4d..2131c7ecedd8 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/rbd/disk_manager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/rbd/disk_manager.go @@ -96,7 +96,7 @@ func diskSetUp(manager diskManager, b rbdMounter, volPath string, mounter mount. klog.V(3).Infof("rbd: successfully bind mount %s to %s with options %v", globalPDPath, volPath, mountOptions) if !b.ReadOnly { - volume.SetVolumeOwnership(&b, fsGroup, fsGroupChangePolicy, util.FSGroupCompleteHook(b.plugin, nil)) + volume.SetVolumeOwnership(&b, volPath, fsGroup, fsGroupChangePolicy, util.FSGroupCompleteHook(b.plugin, nil)) } return nil diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/secret/secret.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/secret/secret.go index f43f1bffa3b9..f1d2c9c59ffd 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/secret/secret.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/secret/secret.go @@ -247,7 +247,7 @@ func (b *secretVolumeMounter) SetUpAt(dir string, mounterArgs volume.MounterArgs setPerms := func(_ string) error { // This may be the first time writing and new files get created outside the timestamp subdirectory: // change the permissions on the whole volume and not only in the timestamp directory. - return volume.SetVolumeOwnership(b, mounterArgs.FsGroup, nil /*fsGroupChangePolicy*/, volumeutil.FSGroupCompleteHook(b.plugin, nil)) + return volume.SetVolumeOwnership(b, dir, mounterArgs.FsGroup, nil /*fsGroupChangePolicy*/, volumeutil.FSGroupCompleteHook(b.plugin, nil)) } err = writer.Write(payload, setPerms) if err != nil { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/fsquota/quota_linux.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/fsquota/quota_linux.go index 240cc356ee88..33b74ab3b5fa 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/fsquota/quota_linux.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/fsquota/quota_linux.go @@ -353,10 +353,11 @@ func AssignQuota(m mount.Interface, path string, poduid types.UID, bytes *resour } // When enforcing quotas are enabled, we'll condition this // on their being disabled also. - if ibytes > 0 { - ibytes = -1 + fsbytes := ibytes + if fsbytes > 0 { + fsbytes = -1 } - if err = setQuotaOnDir(path, id, ibytes); err == nil { + if err = setQuotaOnDir(path, id, fsbytes); err == nil { quotaPodMap[id] = internalPodUid quotaSizeMap[id] = ibytes podQuotaMap[internalPodUid] = id @@ -364,7 +365,7 @@ func AssignQuota(m mount.Interface, path string, poduid types.UID, bytes *resour dirPodMap[path] = internalPodUid podUidMap[internalPodUid] = externalPodUid podDirCountMap[internalPodUid]++ - klog.V(4).Infof("Assigning quota ID %d (%d) to %s", id, ibytes, path) + klog.V(4).Infof("Assigning quota ID %d (request limit %d, actual limit %d) to %s", id, ibytes, fsbytes, path) return nil } removeProjectID(path, id) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_executor.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_executor.go index b11183550b4a..f9b5e8160b9b 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_executor.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_executor.go @@ -28,7 +28,6 @@ import ( "github.com/go-logr/logr" "k8s.io/klog/v2" - "k8s.io/mount-utils" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -38,7 +37,6 @@ import ( "k8s.io/kubernetes/pkg/volume/util/hostutil" "k8s.io/kubernetes/pkg/volume/util/nestedpendingoperations" volumetypes "k8s.io/kubernetes/pkg/volume/util/types" - "k8s.io/kubernetes/pkg/volume/util/volumepathhandler" ) // OperationExecutor defines a set of operations for attaching, detaching, @@ -151,8 +149,6 @@ type OperationExecutor interface { ExpandInUseVolume(volumeToMount VolumeToMount, actualStateOfWorld ActualStateOfWorldMounterUpdater, currentSize resource.Quantity) error // ReconstructVolumeOperation construct a new volumeSpec and returns it created by plugin ReconstructVolumeOperation(volumeMode v1.PersistentVolumeMode, plugin volume.VolumePlugin, mapperPlugin volume.BlockVolumePlugin, uid types.UID, podName volumetypes.UniquePodName, volumeSpecName string, volumePath string, pluginName string) (volume.ReconstructedVolume, error) - // CheckVolumeExistenceOperation checks volume existence - CheckVolumeExistenceOperation(volumeSpec *volume.Spec, mountPath, volumeName string, mounter mount.Interface, uniqueVolumeName v1.UniqueVolumeName, podName volumetypes.UniquePodName, podUID types.UID, attachable volume.AttachableVolumePlugin) (bool, error) } // NewOperationExecutor returns a new instance of OperationExecutor. @@ -1092,61 +1088,3 @@ func (oe *operationExecutor) ReconstructVolumeOperation( Spec: volumeSpec, }, nil } - -// CheckVolumeExistenceOperation checks mount path directory if volume still exists -func (oe *operationExecutor) CheckVolumeExistenceOperation( - volumeSpec *volume.Spec, - mountPath, volumeName string, - mounter mount.Interface, - uniqueVolumeName v1.UniqueVolumeName, - podName volumetypes.UniquePodName, - podUID types.UID, - attachable volume.AttachableVolumePlugin) (bool, error) { - fsVolume, err := util.CheckVolumeModeFilesystem(volumeSpec) - if err != nil { - return false, err - } - - // Filesystem Volume case - // For attachable volume case, check mount path directory if volume is still existing and mounted. - // Return true if volume is mounted. - if fsVolume { - if attachable != nil { - var isNotMount bool - var mountCheckErr error - if mounter == nil { - return false, fmt.Errorf("mounter was not set for a filesystem volume") - } - if isNotMount, mountCheckErr = mount.IsNotMountPoint(mounter, mountPath); mountCheckErr != nil { - return false, fmt.Errorf("could not check whether the volume %q (spec.Name: %q) pod %q (UID: %q) is mounted with: %v", - uniqueVolumeName, - volumeName, - podName, - podUID, - mountCheckErr) - } - return !isNotMount, nil - } - // For non-attachable volume case, skip check and return true without mount point check - // since plugins may not have volume mount point. - return true, nil - } - - // Block Volume case - // Check mount path directory if volume still exists, then return true if volume - // is there. Either plugin is attachable or non-attachable, the plugin should - // have symbolic link associated to raw block device under pod device map - // if volume exists. - blkutil := volumepathhandler.NewBlockVolumePathHandler() - var islinkExist bool - var checkErr error - if islinkExist, checkErr = blkutil.IsSymlinkExist(mountPath); checkErr != nil { - return false, fmt.Errorf("could not check whether the block volume %q (spec.Name: %q) pod %q (UID: %q) is mapped to: %v", - uniqueVolumeName, - volumeName, - podName, - podUID, - checkErr) - } - return islinkExist, nil -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/util.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/util.go index bc33f5f2d424..d8db43a19c7e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/util.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/util.go @@ -688,9 +688,9 @@ func HasMountRefs(mountPath string, mountRefs []string) bool { // Neither of the above should be counted as a mount ref as those are handled // by the kubelet. What we're concerned about is a path like // /data/local/some/manual/mount - // As unmonting could interrupt usage from that mountpoint. + // As unmounting could interrupt usage from that mountpoint. // - // So instead of looking for the entire /var/lib/... path, the plugins/kuberentes.io/ + // So instead of looking for the entire /var/lib/... path, the plugins/kubernetes.io/ // suffix is trimmed off and searched for. // // If there isn't a /plugins/... path, the whole mountPath is used instead. @@ -706,7 +706,7 @@ func HasMountRefs(mountPath string, mountRefs []string) bool { return false } -// WriteVolumeCache flush disk data given the spcified mount path +// WriteVolumeCache flush disk data given the specified mount path func WriteVolumeCache(deviceMountPath string, exec utilexec.Interface) error { // If runtime os is windows, execute Write-VolumeCache powershell command on the disk if runtime.GOOS == "windows" { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/volume_linux.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/volume_linux.go index 57c02815029a..ec7f6da4bfe9 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/volume_linux.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/volume_linux.go @@ -40,22 +40,22 @@ const ( // SetVolumeOwnership modifies the given volume to be owned by // fsGroup, and sets SetGid so that newly created files are owned by // fsGroup. If fsGroup is nil nothing is done. -func SetVolumeOwnership(mounter Mounter, fsGroup *int64, fsGroupChangePolicy *v1.PodFSGroupChangePolicy, completeFunc func(types.CompleteFuncParam)) error { +func SetVolumeOwnership(mounter Mounter, dir string, fsGroup *int64, fsGroupChangePolicy *v1.PodFSGroupChangePolicy, completeFunc func(types.CompleteFuncParam)) error { if fsGroup == nil { return nil } timer := time.AfterFunc(30*time.Second, func() { - klog.Warningf("Setting volume ownership for %s and fsGroup set. If the volume has a lot of files then setting volume ownership could be slow, see https://github.com/kubernetes/kubernetes/issues/69699", mounter.GetPath()) + klog.Warningf("Setting volume ownership for %s and fsGroup set. If the volume has a lot of files then setting volume ownership could be slow, see https://github.com/kubernetes/kubernetes/issues/69699", dir) }) defer timer.Stop() - if skipPermissionChange(mounter, fsGroup, fsGroupChangePolicy) { - klog.V(3).InfoS("Skipping permission and ownership change for volume", "path", mounter.GetPath()) + if skipPermissionChange(mounter, dir, fsGroup, fsGroupChangePolicy) { + klog.V(3).InfoS("Skipping permission and ownership change for volume", "path", dir) return nil } - err := walkDeep(mounter.GetPath(), func(path string, info os.FileInfo, err error) error { + err := walkDeep(dir, func(path string, info os.FileInfo, err error) error { if err != nil { return err } @@ -104,14 +104,12 @@ func changeFilePermission(filename string, fsGroup *int64, readonly bool, info o return nil } -func skipPermissionChange(mounter Mounter, fsGroup *int64, fsGroupChangePolicy *v1.PodFSGroupChangePolicy) bool { - dir := mounter.GetPath() - +func skipPermissionChange(mounter Mounter, dir string, fsGroup *int64, fsGroupChangePolicy *v1.PodFSGroupChangePolicy) bool { if fsGroupChangePolicy == nil || *fsGroupChangePolicy != v1.FSGroupChangeOnRootMismatch { klog.V(4).InfoS("Perform recursive ownership change for directory", "path", dir) return false } - return !requiresPermissionChange(mounter.GetPath(), fsGroup, mounter.GetAttributes().ReadOnly) + return !requiresPermissionChange(dir, fsGroup, mounter.GetAttributes().ReadOnly) } func requiresPermissionChange(rootDir string, fsGroup *int64, readonly bool) bool { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/volume_unsupported.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/volume_unsupported.go index 20c56d4b63e2..3b5a200a6160 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/volume_unsupported.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/volume_unsupported.go @@ -24,6 +24,6 @@ import ( "k8s.io/kubernetes/pkg/volume/util/types" ) -func SetVolumeOwnership(mounter Mounter, fsGroup *int64, fsGroupChangePolicy *v1.PodFSGroupChangePolicy, completeFunc func(types.CompleteFuncParam)) error { +func SetVolumeOwnership(mounter Mounter, dir string, fsGroup *int64, fsGroupChangePolicy *v1.PodFSGroupChangePolicy, completeFunc func(types.CompleteFuncParam)) error { return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume.go index 0660eed66bb3..9d5dd3a4a7c2 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume.go @@ -277,7 +277,7 @@ func (b *vsphereVolumeMounter) SetUpAt(dir string, mounterArgs volume.MounterArg os.Remove(dir) return err } - volume.SetVolumeOwnership(b, mounterArgs.FsGroup, mounterArgs.FSGroupChangePolicy, util.FSGroupCompleteHook(b.plugin, nil)) + volume.SetVolumeOwnership(b, dir, mounterArgs.FsGroup, mounterArgs.FSGroupChangePolicy, util.FSGroupCompleteHook(b.plugin, nil)) klog.V(3).Infof("vSphere volume %s mounted to %s", b.volPath, dir) return nil diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume_util_windows.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume_util_windows.go index b9f2b6c590b6..ae85c73f4315 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume_util_windows.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume_util_windows.go @@ -40,6 +40,10 @@ func verifyDevicePath(path string) (string, error) { klog.V(4).Infof("Found vSphere disk attached with disk number %v", path) return path, nil } + // NOTE: If a powershell command that would return an array (e.g.: Get-Disk) would return an array of + // one element, powershell will in fact return that object directly, and **not an array containing + // that elemenent, which means piping it to ConvertTo-Json would not result in array as expected below. + // The following syntax forces it to always be an array. cmd := exec.Command("powershell", "/c", "Get-Disk | Select Number, SerialNumber | ConvertTo-JSON") output, err := cmd.Output() if err != nil { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/deployment.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/deployment.go index 84687f36b83e..b7bb7b7fe595 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/deployment.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/deployment.go @@ -21,11 +21,10 @@ import ( "fmt" "time" - "github.com/davecgh/go-spew/spew" - apps "k8s.io/api/apps/v1" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/dump" "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" podutil "k8s.io/kubernetes/pkg/api/v1/pod" @@ -37,7 +36,7 @@ type LogfFn func(format string, args ...interface{}) func LogReplicaSetsOfDeployment(deployment *apps.Deployment, allOldRSs []*apps.ReplicaSet, newRS *apps.ReplicaSet, logf LogfFn) { if newRS != nil { - logf(spew.Sprintf("New ReplicaSet %q of Deployment %q:\n%+v", newRS.Name, deployment.Name, *newRS)) + logf("New ReplicaSet %q of Deployment %q:\n%s", newRS.Name, deployment.Name, dump.Pretty(*newRS)) } else { logf("New ReplicaSet of Deployment %q is nil.", deployment.Name) } @@ -45,7 +44,7 @@ func LogReplicaSetsOfDeployment(deployment *apps.Deployment, allOldRSs []*apps.R logf("All old ReplicaSets of Deployment %q:", deployment.Name) } for i := range allOldRSs { - logf(spew.Sprintf("%+v", *allOldRSs[i])) + logf(dump.Pretty(*allOldRSs[i])) } } @@ -65,7 +64,7 @@ func LogPodsOfDeployment(c clientset.Interface, deployment *apps.Deployment, rsL if podutil.IsPodAvailable(&pod, minReadySeconds, metav1.Now()) { availability = "available" } - logf(spew.Sprintf("Pod %q is %s:\n%+v", pod.Name, availability, pod)) + logf("Pod %q is %s:\n%s", pod.Name, availability, dump.Pretty(pod)) } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/paths.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/paths.go index 9f1f6f5da34c..eaedb02aed25 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/paths.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/paths.go @@ -27,6 +27,11 @@ import ( // GetK8sRootDir returns the root directory for kubernetes, if present in the gopath. func GetK8sRootDir() (string, error) { + dir := os.Getenv("KUBE_ROOT") + if len(dir) > 0 { + return dir, nil + } + dir, err := RootDir() if err != nil { return "", err @@ -59,12 +64,17 @@ func RootDir() (string, error) { } // GetK8sBuildOutputDir returns the build output directory for k8s -func GetK8sBuildOutputDir() (string, error) { +// For dockerized build, targetArch (eg: 'linux/arm64', 'linux/amd64') must be explicitly specified +// For non dockerized build, targetArch is ignored +func GetK8sBuildOutputDir(isDockerizedBuild bool, targetArch string) (string, error) { k8sRoot, err := GetK8sRootDir() if err != nil { return "", err } buildOutputDir := filepath.Join(k8sRoot, "_output/local/go/bin") + if isDockerizedBuild { + buildOutputDir = filepath.Join(k8sRoot, "_output/dockerized/bin/", targetArch) + } if _, err := os.Stat(buildOutputDir); err != nil { return "", err } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/pki_helpers.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/pki_helpers.go index 06c3290493df..c96e5855cac4 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/pki_helpers.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/pki_helpers.go @@ -53,10 +53,12 @@ func EncodeCertPEM(cert *x509.Certificate) []byte { // NewSignedCert creates a signed certificate using the given CA certificate and key func NewSignedCert(cfg *certutil.Config, key crypto.Signer, caCert *x509.Certificate, caKey crypto.Signer) (*x509.Certificate, error) { - serial, err := cryptorand.Int(cryptorand.Reader, new(big.Int).SetInt64(math.MaxInt64)) + // returns a uniform random value in [0, max-1), then add 1 to serial to make it a uniform random value in [1, max). + serial, err := cryptorand.Int(cryptorand.Reader, new(big.Int).SetInt64(math.MaxInt64-1)) if err != nil { return nil, err } + serial = new(big.Int).Add(serial, big.NewInt(1)) if len(cfg.CommonName) == 0 { return nil, fmt.Errorf("must specify a CommonName") } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/runners.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/runners.go index 6e18e8b9cd25..3cbc8a7bba20 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/runners.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/runners.go @@ -183,8 +183,11 @@ type RCConfig struct { ServiceAccountTokenProjections int - //Additional containers to run in the pod + // Additional containers to run in the pod AdditionalContainers []v1.Container + + // Security context for created pods + SecurityContext *v1.SecurityContext } func (rc *RCConfig) RCConfigLog(fmt string, args ...interface{}) { @@ -335,11 +338,12 @@ func (config *DeploymentConfig) create() error { TerminationGracePeriodSeconds: config.getTerminationGracePeriodSeconds(nil), Containers: []v1.Container{ { - Name: config.Name, - Image: config.Image, - Command: config.Command, - Ports: []v1.ContainerPort{{ContainerPort: 80}}, - Lifecycle: config.Lifecycle, + Name: config.Name, + Image: config.Image, + Command: config.Command, + Ports: []v1.ContainerPort{{ContainerPort: 80}}, + Lifecycle: config.Lifecycle, + SecurityContext: config.SecurityContext, }, }, }, @@ -421,11 +425,12 @@ func (config *ReplicaSetConfig) create() error { TerminationGracePeriodSeconds: config.getTerminationGracePeriodSeconds(nil), Containers: []v1.Container{ { - Name: config.Name, - Image: config.Image, - Command: config.Command, - Ports: []v1.ContainerPort{{ContainerPort: 80}}, - Lifecycle: config.Lifecycle, + Name: config.Name, + Image: config.Image, + Command: config.Command, + Ports: []v1.ContainerPort{{ContainerPort: 80}}, + Lifecycle: config.Lifecycle, + SecurityContext: config.SecurityContext, }, }, }, @@ -499,10 +504,11 @@ func (config *JobConfig) create() error { TerminationGracePeriodSeconds: config.getTerminationGracePeriodSeconds(nil), Containers: []v1.Container{ { - Name: config.Name, - Image: config.Image, - Command: config.Command, - Lifecycle: config.Lifecycle, + Name: config.Name, + Image: config.Image, + Command: config.Command, + Lifecycle: config.Lifecycle, + SecurityContext: config.SecurityContext, }, }, RestartPolicy: v1.RestartPolicyOnFailure, @@ -612,12 +618,13 @@ func (config *RCConfig) create() error { Affinity: config.Affinity, Containers: []v1.Container{ { - Name: config.Name, - Image: config.Image, - Command: config.Command, - Ports: []v1.ContainerPort{{ContainerPort: 80}}, - ReadinessProbe: config.ReadinessProbe, - Lifecycle: config.Lifecycle, + Name: config.Name, + Image: config.Image, + Command: config.Command, + Ports: []v1.ContainerPort{{ContainerPort: 80}}, + ReadinessProbe: config.ReadinessProbe, + Lifecycle: config.Lifecycle, + SecurityContext: config.SecurityContext, }, }, DNSPolicy: *config.DNSPolicy, diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/vsphere/nodemanager.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/vsphere/nodemanager.go index 0fe96a7b79d5..5bd3eea6e369 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/vsphere/nodemanager.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/vsphere/nodemanager.go @@ -27,11 +27,14 @@ import ( "github.com/vmware/govmomi/object" "github.com/vmware/govmomi/vim25/mo" - "k8s.io/klog/v2" - v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" k8stypes "k8s.io/apimachinery/pkg/types" + coreclients "k8s.io/client-go/kubernetes/typed/core/v1" + corelisters "k8s.io/client-go/listers/core/v1" cloudprovider "k8s.io/cloud-provider" + "k8s.io/klog/v2" "k8s.io/legacy-cloud-providers/vsphere/vclib" ) @@ -61,6 +64,9 @@ type NodeManager struct { //CredentialsManager credentialManager *SecretCredentialManager + nodeLister corelisters.NodeLister + nodeGetter coreclients.NodesGetter + // Mutexes registeredNodesLock sync.RWMutex nodeInfoLock sync.RWMutex @@ -271,10 +277,43 @@ func (nm *NodeManager) GetNode(nodeName k8stypes.NodeName) (v1.Node, error) { nm.registeredNodesLock.RLock() node := nm.registeredNodes[convertToString(nodeName)] nm.registeredNodesLock.RUnlock() - if node == nil { - return v1.Node{}, vclib.ErrNoVMFound + if node != nil { + klog.V(4).Infof("Node %s found in vSphere cloud provider cache", nodeName) + return *node, nil + } + + if nm.nodeLister != nil { + klog.V(4).Infof("Node %s missing in vSphere cloud provider cache, trying node informer") + node, err := nm.nodeLister.Get(convertToString(nodeName)) + if err != nil { + if !errors.IsNotFound(err) { + return v1.Node{}, err + } + // Fall through with IsNotFound error and try to get the node from the API server + } else { + node := node.DeepCopy() + nm.addNode(node) + klog.V(4).Infof("Node %s found in vSphere cloud provider node informer", nodeName) + return *node, nil + } } - return *node, nil + + if nm.nodeGetter != nil { + klog.V(4).Infof("Node %s missing in vSphere cloud provider caches, trying the API server") + node, err := nm.nodeGetter.Nodes().Get(context.TODO(), convertToString(nodeName), metav1.GetOptions{}) + if err != nil { + if !errors.IsNotFound(err) { + return v1.Node{}, err + } + // Fall through with IsNotFound error to keep the code consistent with the above + } else { + nm.addNode(node) + klog.V(4).Infof("Node %s found in the API server", nodeName) + return *node, nil + } + } + klog.V(4).Infof("Node %s not found in vSphere cloud provider", nodeName) + return v1.Node{}, vclib.ErrNoVMFound } func (nm *NodeManager) getNodes() map[string]*v1.Node { @@ -515,3 +554,11 @@ func (nm *NodeManager) GetHostsInZone(ctx context.Context, zoneFailureDomain str klog.V(4).Infof("GetHostsInZone %v returning: %v", zoneFailureDomain, hosts) return hosts, nil } + +func (nm *NodeManager) SetNodeLister(nodeLister corelisters.NodeLister) { + nm.nodeLister = nodeLister +} + +func (nm *NodeManager) SetNodeGetter(nodeGetter coreclients.NodesGetter) { + nm.nodeGetter = nodeGetter +} diff --git a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/vsphere/vsphere.go b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/vsphere/vsphere.go index 756c3f6c9993..615094db6611 100644 --- a/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/vsphere/vsphere.go +++ b/cluster-autoscaler/vendor/k8s.io/legacy-cloud-providers/vsphere/vsphere.go @@ -276,6 +276,7 @@ func init() { // Initialize passes a Kubernetes clientBuilder interface to the cloud provider func (vs *VSphere) Initialize(clientBuilder cloudprovider.ControllerClientBuilder, stop <-chan struct{}) { vs.kubeClient = clientBuilder.ClientOrDie("vsphere-legacy-cloud-provider") + vs.nodeManager.SetNodeGetter(vs.kubeClient.CoreV1()) } // Initialize Node Informers @@ -318,6 +319,9 @@ func (vs *VSphere) SetInformers(informerFactory informers.SharedInformerFactory) cache.ResourceEventHandlerFuncs{UpdateFunc: vs.syncNodeZoneLabels}, zoneLabelsResyncPeriod, ) + + nodeLister := informerFactory.Core().V1().Nodes().Lister() + vs.nodeManager.SetNodeLister(nodeLister) klog.V(4).Infof("Node informers in vSphere cloud provider initialized") } diff --git a/cluster-autoscaler/vendor/k8s.io/mount-utils/mount_windows.go b/cluster-autoscaler/vendor/k8s.io/mount-utils/mount_windows.go index 7b2a2d1a15e0..4f2b7aee8739 100644 --- a/cluster-autoscaler/vendor/k8s.io/mount-utils/mount_windows.go +++ b/cluster-autoscaler/vendor/k8s.io/mount-utils/mount_windows.go @@ -299,26 +299,20 @@ func (mounter *SafeFormatAndMount) formatAndMountSensitive(source string, target } klog.V(4).Infof("diskMount: Disk successfully formatted, disk: %q, fstype: %q", source, fstype) - volumeIds, err := listVolumesOnDisk(source) + volumeIds, err := ListVolumesOnDisk(source) if err != nil { return err } driverPath := volumeIds[0] - target = NormalizeWindowsPath(target) - output, err := mounter.Exec.Command("cmd", "/c", "mklink", "/D", target, driverPath).CombinedOutput() - if err != nil { - klog.Errorf("mklink(%s, %s) failed: %v, output: %q", target, driverPath, err, string(output)) - return err - } - klog.V(2).Infof("formatAndMount disk(%s) fstype(%s) on(%s) with output(%s) successfully", driverPath, fstype, target, string(output)) - return nil + return mounter.MountSensitive(driverPath, target, fstype, options, sensitiveOptions) } // ListVolumesOnDisk - returns back list of volumes(volumeIDs) in the disk (requested in diskID). -func listVolumesOnDisk(diskID string) (volumeIDs []string, err error) { - cmd := fmt.Sprintf("(Get-Disk -DeviceId %s | Get-Partition | Get-Volume).UniqueId", diskID) +func ListVolumesOnDisk(diskID string) (volumeIDs []string, err error) { + // If a Disk has multiple volumes, Get-Volume may not return items in the same order. + cmd := fmt.Sprintf("(Get-Disk -DeviceId %s | Get-Partition | Get-Volume | Sort-Object -Property UniqueId).UniqueId", diskID) output, err := exec.Command("powershell", "/c", cmd).CombinedOutput() - klog.V(4).Infof("listVolumesOnDisk id from %s: %s", diskID, string(output)) + klog.V(4).Infof("ListVolumesOnDisk id from %s: %s", diskID, string(output)) if err != nil { return []string{}, fmt.Errorf("error list volumes on disk. cmd: %s, output: %s, error: %v", cmd, string(output), err) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpuset/OWNERS b/cluster-autoscaler/vendor/k8s.io/utils/cpuset/OWNERS similarity index 57% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpuset/OWNERS rename to cluster-autoscaler/vendor/k8s.io/utils/cpuset/OWNERS index d4c6257fd184..0ec2b0852722 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpuset/OWNERS +++ b/cluster-autoscaler/vendor/k8s.io/utils/cpuset/OWNERS @@ -1,7 +1,8 @@ # See the OWNERS docs at https://go.k8s.io/owners approvers: + - dchen1107 - derekwaynecarr -emeritus_approvers: - - ConnorDoyle - - vishh + - ffromani + - klueska + - SergeyKanzhelev diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpuset/cpuset.go b/cluster-autoscaler/vendor/k8s.io/utils/cpuset/cpuset.go similarity index 98% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpuset/cpuset.go rename to cluster-autoscaler/vendor/k8s.io/utils/cpuset/cpuset.go index f20a3fa029c4..52912d95bcdc 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpuset/cpuset.go +++ b/cluster-autoscaler/vendor/k8s.io/utils/cpuset/cpuset.go @@ -23,6 +23,8 @@ limitations under the License. // See http://man7.org/linux/man-pages/man7/cpuset.7.html#FORMATS for details. // // Future work can migrate this to use a 'set' library, and relax the dubious 'immutable' property. +// +// This package was originally developed in the 'kubernetes' repository. package cpuset import ( diff --git a/cluster-autoscaler/vendor/modules.txt b/cluster-autoscaler/vendor/modules.txt index 3b28e00a24fb..bdcdc93213e8 100644 --- a/cluster-autoscaler/vendor/modules.txt +++ b/cluster-autoscaler/vendor/modules.txt @@ -4,7 +4,7 @@ cloud.google.com/go/compute/internal # cloud.google.com/go/compute/metadata v0.2.3 ## explicit; go 1.19 cloud.google.com/go/compute/metadata -# github.com/Azure/azure-sdk-for-go v67.2.0+incompatible +# github.com/Azure/azure-sdk-for-go v68.0.0+incompatible ## explicit github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-12-01/compute @@ -24,11 +24,11 @@ github.com/Azure/azure-sdk-for-go/version # github.com/Azure/go-autorest v14.2.0+incompatible ## explicit github.com/Azure/go-autorest -# github.com/Azure/go-autorest/autorest v0.11.28 +# github.com/Azure/go-autorest/autorest v0.11.29 ## explicit; go 1.15 github.com/Azure/go-autorest/autorest github.com/Azure/go-autorest/autorest/azure -# github.com/Azure/go-autorest/autorest/adal v0.9.21 +# github.com/Azure/go-autorest/autorest/adal v0.9.23 ## explicit; go 1.15 github.com/Azure/go-autorest/autorest/adal # github.com/Azure/go-autorest/autorest/azure/auth v0.5.8 @@ -67,9 +67,10 @@ github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/mock # github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab ## explicit github.com/JeffAshton/win_pdh -# github.com/Microsoft/go-winio v0.4.17 -## explicit; go 1.12 +# github.com/Microsoft/go-winio v0.6.0 +## explicit; go 1.17 github.com/Microsoft/go-winio +github.com/Microsoft/go-winio/internal/socket github.com/Microsoft/go-winio/pkg/guid github.com/Microsoft/go-winio/pkg/security github.com/Microsoft/go-winio/vhd @@ -77,8 +78,6 @@ github.com/Microsoft/go-winio/vhd ## explicit; go 1.13 github.com/Microsoft/hcsshim github.com/Microsoft/hcsshim/computestorage -github.com/Microsoft/hcsshim/hcn -github.com/Microsoft/hcsshim/internal/cni github.com/Microsoft/hcsshim/internal/cow github.com/Microsoft/hcsshim/internal/hcs github.com/Microsoft/hcsshim/internal/hcs/schema1 @@ -91,8 +90,6 @@ github.com/Microsoft/hcsshim/internal/logfields github.com/Microsoft/hcsshim/internal/longpath github.com/Microsoft/hcsshim/internal/mergemaps github.com/Microsoft/hcsshim/internal/oc -github.com/Microsoft/hcsshim/internal/regstate -github.com/Microsoft/hcsshim/internal/runhcs github.com/Microsoft/hcsshim/internal/safefile github.com/Microsoft/hcsshim/internal/timeout github.com/Microsoft/hcsshim/internal/vmcompute @@ -166,7 +163,7 @@ github.com/beorn7/perks/quantile # github.com/blang/semver/v4 v4.0.0 ## explicit; go 1.14 github.com/blang/semver/v4 -# github.com/cenkalti/backoff/v4 v4.2.0 +# github.com/cenkalti/backoff/v4 v4.2.1 ## explicit; go 1.18 github.com/cenkalti/backoff/v4 # github.com/cespare/xxhash/v2 v2.2.0 @@ -176,24 +173,25 @@ github.com/cespare/xxhash/v2 ## explicit; go 1.13 github.com/checkpoint-restore/go-criu/v5 github.com/checkpoint-restore/go-criu/v5/rpc -# github.com/cilium/ebpf v0.7.0 -## explicit; go 1.16 +# github.com/cilium/ebpf v0.9.1 +## explicit; go 1.17 github.com/cilium/ebpf github.com/cilium/ebpf/asm +github.com/cilium/ebpf/btf github.com/cilium/ebpf/internal -github.com/cilium/ebpf/internal/btf +github.com/cilium/ebpf/internal/sys github.com/cilium/ebpf/internal/unix github.com/cilium/ebpf/link -# github.com/container-storage-interface/spec v1.7.0 +# github.com/container-storage-interface/spec v1.8.0 ## explicit; go 1.18 github.com/container-storage-interface/spec/lib/go/csi -# github.com/containerd/cgroups v1.0.1 -## explicit; go 1.13 +# github.com/containerd/cgroups v1.1.0 +## explicit; go 1.17 github.com/containerd/cgroups/stats/v1 # github.com/containerd/console v1.0.3 ## explicit; go 1.13 github.com/containerd/console -# github.com/containerd/ttrpc v1.1.0 +# github.com/containerd/ttrpc v1.2.2 ## explicit; go 1.13 github.com/containerd/ttrpc # github.com/coreos/go-semver v0.3.1 @@ -216,7 +214,7 @@ github.com/digitalocean/godo # github.com/dimchansky/utfbom v1.1.1 ## explicit github.com/dimchansky/utfbom -# github.com/docker/distribution v2.8.1+incompatible +# github.com/docker/distribution v2.8.2+incompatible ## explicit github.com/docker/distribution/digestset github.com/docker/distribution/reference @@ -239,9 +237,6 @@ github.com/felixge/httpsnoop # github.com/fsnotify/fsnotify v1.6.0 ## explicit; go 1.16 github.com/fsnotify/fsnotify -# github.com/ghodss/yaml v1.0.0 -## explicit -github.com/ghodss/yaml # github.com/go-logr/logr v1.2.4 ## explicit; go 1.16 github.com/go-logr/logr @@ -262,7 +257,7 @@ github.com/go-openapi/swag # github.com/godbus/dbus/v5 v5.0.6 ## explicit; go 1.12 github.com/godbus/dbus/v5 -# github.com/gofrs/uuid v4.0.0+incompatible +# github.com/gofrs/uuid v4.4.0+incompatible ## explicit github.com/gofrs/uuid # github.com/gogo/protobuf v1.3.2 @@ -272,7 +267,7 @@ github.com/gogo/protobuf/proto github.com/gogo/protobuf/protoc-gen-gogo/descriptor github.com/gogo/protobuf/sortkeys github.com/gogo/protobuf/types -# github.com/golang-jwt/jwt/v4 v4.4.2 +# github.com/golang-jwt/jwt/v4 v4.5.0 ## explicit; go 1.16 github.com/golang-jwt/jwt/v4 # github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da @@ -341,7 +336,7 @@ github.com/google/cadvisor/utils/sysfs github.com/google/cadvisor/utils/sysinfo github.com/google/cadvisor/version github.com/google/cadvisor/watcher -# github.com/google/cel-go v0.14.0 +# github.com/google/cel-go v0.16.0 ## explicit; go 1.18 github.com/google/cel-go/cel github.com/google/cel-go/checker @@ -361,13 +356,13 @@ github.com/google/cel-go/interpreter github.com/google/cel-go/interpreter/functions github.com/google/cel-go/parser github.com/google/cel-go/parser/gen -# github.com/google/gnostic v0.6.9 -## explicit; go 1.12 -github.com/google/gnostic/compiler -github.com/google/gnostic/extensions -github.com/google/gnostic/jsonschema -github.com/google/gnostic/openapiv2 -github.com/google/gnostic/openapiv3 +# github.com/google/gnostic-models v0.6.8 +## explicit; go 1.18 +github.com/google/gnostic-models/compiler +github.com/google/gnostic-models/extensions +github.com/google/gnostic-models/jsonschema +github.com/google/gnostic-models/openapiv2 +github.com/google/gnostic-models/openapiv3 # github.com/google/go-cmp v0.5.9 ## explicit; go 1.13 github.com/google/go-cmp/cmp @@ -448,9 +443,6 @@ github.com/mistifyio/go-zfs # github.com/mitchellh/go-homedir v1.1.0 ## explicit github.com/mitchellh/go-homedir -# github.com/mitchellh/mapstructure v1.5.0 -## explicit; go 1.14 -github.com/mitchellh/mapstructure # github.com/moby/ipvs v1.1.0 ## explicit; go 1.17 github.com/moby/ipvs @@ -482,8 +474,8 @@ github.com/mxk/go-flowrate/flowrate # github.com/opencontainers/go-digest v1.0.0 ## explicit; go 1.13 github.com/opencontainers/go-digest -# github.com/opencontainers/runc v1.1.4 -## explicit; go 1.16 +# github.com/opencontainers/runc v1.1.7 +## explicit; go 1.17 github.com/opencontainers/runc/libcontainer github.com/opencontainers/runc/libcontainer/apparmor github.com/opencontainers/runc/libcontainer/capabilities @@ -551,7 +543,7 @@ github.com/rubiojr/go-vhd/vhd # github.com/satori/go.uuid v1.2.0 ## explicit github.com/satori/go.uuid -# github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646 +# github.com/seccomp/libseccomp-golang v0.10.0 ## explicit; go 1.14 github.com/seccomp/libseccomp-golang # github.com/sirupsen/logrus v1.9.0 @@ -582,8 +574,8 @@ github.com/syndtr/gocapability/capability ## explicit; go 1.12 github.com/vishvananda/netlink github.com/vishvananda/netlink/nl -# github.com/vishvananda/netns v0.0.2 -## explicit; go 1.12 +# github.com/vishvananda/netns v0.0.4 +## explicit; go 1.17 github.com/vishvananda/netns # github.com/vmware/govmomi v0.30.0 ## explicit; go 1.17 @@ -618,24 +610,24 @@ github.com/vmware/govmomi/vim25/progress github.com/vmware/govmomi/vim25/soap github.com/vmware/govmomi/vim25/types github.com/vmware/govmomi/vim25/xml -# go.etcd.io/etcd/api/v3 v3.5.7 -## explicit; go 1.17 +# go.etcd.io/etcd/api/v3 v3.5.9 +## explicit; go 1.19 go.etcd.io/etcd/api/v3/authpb go.etcd.io/etcd/api/v3/etcdserverpb go.etcd.io/etcd/api/v3/membershippb go.etcd.io/etcd/api/v3/mvccpb go.etcd.io/etcd/api/v3/v3rpc/rpctypes go.etcd.io/etcd/api/v3/version -# go.etcd.io/etcd/client/pkg/v3 v3.5.7 -## explicit; go 1.17 +# go.etcd.io/etcd/client/pkg/v3 v3.5.9 +## explicit; go 1.19 go.etcd.io/etcd/client/pkg/v3/fileutil go.etcd.io/etcd/client/pkg/v3/logutil go.etcd.io/etcd/client/pkg/v3/systemd go.etcd.io/etcd/client/pkg/v3/tlsutil go.etcd.io/etcd/client/pkg/v3/transport go.etcd.io/etcd/client/pkg/v3/types -# go.etcd.io/etcd/client/v3 v3.5.7 -## explicit; go 1.17 +# go.etcd.io/etcd/client/v3 v3.5.9 +## explicit; go 1.19 go.etcd.io/etcd/client/v3 go.etcd.io/etcd/client/v3/credentials go.etcd.io/etcd/client/v3/internal/endpoint @@ -749,6 +741,9 @@ golang.org/x/crypto/salsa20/salsa ## explicit; go 1.18 golang.org/x/exp/constraints golang.org/x/exp/slices +# golang.org/x/mod v0.10.0 +## explicit; go 1.17 +golang.org/x/mod/semver # golang.org/x/net v0.9.0 ## explicit; go 1.17 golang.org/x/net/bpf @@ -777,9 +772,10 @@ golang.org/x/oauth2/jwt # golang.org/x/sync v0.1.0 ## explicit golang.org/x/sync/singleflight -# golang.org/x/sys v0.7.0 +# golang.org/x/sys v0.8.0 ## explicit; go 1.17 golang.org/x/sys/cpu +golang.org/x/sys/execabs golang.org/x/sys/internal/unsafeheader golang.org/x/sys/plan9 golang.org/x/sys/unix @@ -817,6 +813,24 @@ golang.org/x/text/width # golang.org/x/time v0.3.0 ## explicit golang.org/x/time/rate +# golang.org/x/tools v0.8.0 +## explicit; go 1.18 +golang.org/x/tools/cmd/stringer +golang.org/x/tools/go/gcexportdata +golang.org/x/tools/go/internal/packagesdriver +golang.org/x/tools/go/packages +golang.org/x/tools/go/types/objectpath +golang.org/x/tools/internal/event +golang.org/x/tools/internal/event/core +golang.org/x/tools/internal/event/keys +golang.org/x/tools/internal/event/label +golang.org/x/tools/internal/gcimporter +golang.org/x/tools/internal/gocommand +golang.org/x/tools/internal/packagesinternal +golang.org/x/tools/internal/pkgbits +golang.org/x/tools/internal/tokeninternal +golang.org/x/tools/internal/typeparams +golang.org/x/tools/internal/typesinternal # google.golang.org/api v0.114.0 ## explicit; go 1.19 google.golang.org/api/compute/v0.alpha @@ -847,16 +861,21 @@ google.golang.org/appengine/internal/modules google.golang.org/appengine/internal/remote_api google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/urlfetch -# google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 +# google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54 +## explicit; go 1.19 +google.golang.org/genproto/internal +google.golang.org/genproto/protobuf/field_mask +# google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9 ## explicit; go 1.19 google.golang.org/genproto/googleapis/api google.golang.org/genproto/googleapis/api/annotations google.golang.org/genproto/googleapis/api/expr/v1alpha1 google.golang.org/genproto/googleapis/api/httpbody +# google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 +## explicit; go 1.19 google.golang.org/genproto/googleapis/rpc/code google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status -google.golang.org/genproto/protobuf/field_mask # google.golang.org/grpc v1.54.0 ## explicit; go 1.17 google.golang.org/grpc @@ -970,7 +989,7 @@ gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 -# k8s.io/api v0.27.0 => k8s.io/api v0.27.0 +# k8s.io/api v0.28.0-alpha.2 => k8s.io/api v0.28.0-alpha.2 ## explicit; go 1.20 k8s.io/api/admission/v1 k8s.io/api/admission/v1beta1 @@ -1026,7 +1045,7 @@ k8s.io/api/scheduling/v1beta1 k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 -# k8s.io/apimachinery v0.27.0 => k8s.io/apimachinery v0.27.0 +# k8s.io/apimachinery v0.28.0-alpha.2 => k8s.io/apimachinery v0.28.0-alpha.2 ## explicit; go 1.20 k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors @@ -1058,6 +1077,7 @@ k8s.io/apimachinery/pkg/selection k8s.io/apimachinery/pkg/types k8s.io/apimachinery/pkg/util/cache k8s.io/apimachinery/pkg/util/diff +k8s.io/apimachinery/pkg/util/dump k8s.io/apimachinery/pkg/util/errors k8s.io/apimachinery/pkg/util/framer k8s.io/apimachinery/pkg/util/httpstream @@ -1087,7 +1107,7 @@ k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/netutil k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/apiserver v0.27.0 => k8s.io/apiserver v0.27.0 +# k8s.io/apiserver v0.28.0-alpha.2 => k8s.io/apiserver v0.28.0-alpha.2 ## explicit; go 1.20 k8s.io/apiserver/pkg/admission k8s.io/apiserver/pkg/admission/cel @@ -1148,6 +1168,7 @@ k8s.io/apiserver/pkg/authorization/path k8s.io/apiserver/pkg/authorization/union k8s.io/apiserver/pkg/cel k8s.io/apiserver/pkg/cel/common +k8s.io/apiserver/pkg/cel/environment k8s.io/apiserver/pkg/cel/library k8s.io/apiserver/pkg/cel/openapi k8s.io/apiserver/pkg/cel/openapi/resolver @@ -1229,7 +1250,7 @@ k8s.io/apiserver/plugin/pkg/audit/truncate k8s.io/apiserver/plugin/pkg/audit/webhook k8s.io/apiserver/plugin/pkg/authenticator/token/webhook k8s.io/apiserver/plugin/pkg/authorizer/webhook -# k8s.io/client-go v0.27.0 => k8s.io/client-go v0.27.0 +# k8s.io/client-go v0.28.0-alpha.2 => k8s.io/client-go v0.28.0-alpha.2 ## explicit; go 1.20 k8s.io/client-go/applyconfigurations/admissionregistration/v1 k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1 @@ -1555,7 +1576,7 @@ k8s.io/client-go/util/homedir k8s.io/client-go/util/keyutil k8s.io/client-go/util/retry k8s.io/client-go/util/workqueue -# k8s.io/cloud-provider v0.27.0 => k8s.io/cloud-provider v0.27.0 +# k8s.io/cloud-provider v0.28.0-alpha.2 => k8s.io/cloud-provider v0.28.0-alpha.2 ## explicit; go 1.20 k8s.io/cloud-provider k8s.io/cloud-provider/api @@ -1577,7 +1598,7 @@ k8s.io/cloud-provider/volume/helpers # k8s.io/cloud-provider-aws v1.27.0 ## explicit; go 1.20 k8s.io/cloud-provider-aws/pkg/providers/v1 -# k8s.io/component-base v0.27.0 => k8s.io/component-base v0.27.0 +# k8s.io/component-base v0.28.0-alpha.2 => k8s.io/component-base v0.28.0-alpha.2 ## explicit; go 1.20 k8s.io/component-base/cli/flag k8s.io/component-base/codec @@ -1605,7 +1626,7 @@ k8s.io/component-base/tracing k8s.io/component-base/tracing/api/v1 k8s.io/component-base/version k8s.io/component-base/version/verflag -# k8s.io/component-helpers v0.27.0 => k8s.io/component-helpers v0.27.0 +# k8s.io/component-helpers v0.28.0-alpha.2 => k8s.io/component-helpers v0.28.0-alpha.2 ## explicit; go 1.20 k8s.io/component-helpers/apimachinery/lease k8s.io/component-helpers/node/topology @@ -1615,7 +1636,7 @@ k8s.io/component-helpers/scheduling/corev1 k8s.io/component-helpers/scheduling/corev1/nodeaffinity k8s.io/component-helpers/storage/ephemeral k8s.io/component-helpers/storage/volume -# k8s.io/controller-manager v0.27.0 => k8s.io/controller-manager v0.27.0 +# k8s.io/controller-manager v0.28.0-alpha.2 => k8s.io/controller-manager v0.28.0-alpha.2 ## explicit; go 1.20 k8s.io/controller-manager/config k8s.io/controller-manager/config/v1 @@ -1627,19 +1648,19 @@ k8s.io/controller-manager/pkg/features k8s.io/controller-manager/pkg/features/register k8s.io/controller-manager/pkg/leadermigration/config k8s.io/controller-manager/pkg/leadermigration/options -# k8s.io/cri-api v0.0.0 => k8s.io/cri-api v0.27.0 +# k8s.io/cri-api v0.28.0-alpha.2 => k8s.io/cri-api v0.28.0-alpha.2 ## explicit; go 1.20 k8s.io/cri-api/pkg/apis k8s.io/cri-api/pkg/apis/runtime/v1 k8s.io/cri-api/pkg/errors -# k8s.io/csi-translation-lib v0.27.0 => k8s.io/csi-translation-lib v0.27.0 +# k8s.io/csi-translation-lib v0.27.0 => k8s.io/csi-translation-lib v0.28.0-alpha.2 ## explicit; go 1.20 k8s.io/csi-translation-lib k8s.io/csi-translation-lib/plugins -# k8s.io/dynamic-resource-allocation v0.0.0 => k8s.io/dynamic-resource-allocation v0.27.0 +# k8s.io/dynamic-resource-allocation v0.0.0 => k8s.io/dynamic-resource-allocation v0.28.0-alpha.2 ## explicit; go 1.20 k8s.io/dynamic-resource-allocation/resourceclaim -# k8s.io/klog/v2 v2.90.1 +# k8s.io/klog/v2 v2.100.1 ## explicit; go 1.13 k8s.io/klog/v2 k8s.io/klog/v2/internal/buffer @@ -1647,13 +1668,13 @@ k8s.io/klog/v2/internal/clock k8s.io/klog/v2/internal/dbg k8s.io/klog/v2/internal/serialize k8s.io/klog/v2/internal/severity -# k8s.io/kms v0.27.0 => k8s.io/kms v0.27.0 +# k8s.io/kms v0.28.0-alpha.2 => k8s.io/kms v0.28.0-alpha.2 ## explicit; go 1.20 k8s.io/kms/apis/v1beta1 k8s.io/kms/apis/v2 k8s.io/kms/pkg/service k8s.io/kms/pkg/util -# k8s.io/kube-openapi v0.0.0-20230327201221-f5883ff37f0c +# k8s.io/kube-openapi v0.0.0-20230601164746-7562a1006961 ## explicit; go 1.19 k8s.io/kube-openapi/pkg/builder k8s.io/kube-openapi/pkg/builder3 @@ -1675,19 +1696,15 @@ k8s.io/kube-openapi/pkg/validation/errors k8s.io/kube-openapi/pkg/validation/spec k8s.io/kube-openapi/pkg/validation/strfmt k8s.io/kube-openapi/pkg/validation/strfmt/bson -# k8s.io/kube-proxy v0.0.0 => k8s.io/kube-proxy v0.27.0 -## explicit; go 1.20 -k8s.io/kube-proxy/config/v1alpha1 -# k8s.io/kube-scheduler v0.0.0 => k8s.io/kube-scheduler v0.27.0 +# k8s.io/kube-scheduler v0.0.0 => k8s.io/kube-scheduler v0.28.0-alpha.2 ## explicit; go 1.20 k8s.io/kube-scheduler/config/v1 -k8s.io/kube-scheduler/config/v1beta2 k8s.io/kube-scheduler/config/v1beta3 k8s.io/kube-scheduler/extender/v1 -# k8s.io/kubectl v0.0.0 => k8s.io/kubectl v0.27.0 +# k8s.io/kubectl v0.0.0 => k8s.io/kubectl v0.28.0-alpha.2 ## explicit; go 1.20 k8s.io/kubectl/pkg/scale -# k8s.io/kubelet v0.27.0 => k8s.io/kubelet v0.27.0 +# k8s.io/kubelet v0.28.0-alpha.2 => k8s.io/kubelet v0.28.0-alpha.2 ## explicit; go 1.20 k8s.io/kubelet/config/v1 k8s.io/kubelet/config/v1alpha1 @@ -1704,9 +1721,11 @@ k8s.io/kubelet/pkg/apis/pluginregistration/v1 k8s.io/kubelet/pkg/apis/podresources/v1 k8s.io/kubelet/pkg/apis/podresources/v1alpha1 k8s.io/kubelet/pkg/apis/stats/v1alpha1 -# k8s.io/kubernetes v1.27.0 +k8s.io/kubelet/pkg/cri/streaming +k8s.io/kubelet/pkg/cri/streaming/portforward +k8s.io/kubelet/pkg/cri/streaming/remotecommand +# k8s.io/kubernetes v1.28.0-alpha.2 ## explicit; go 1.20 -k8s.io/kubernetes/cmd/kube-proxy/app k8s.io/kubernetes/cmd/kubelet/app k8s.io/kubernetes/cmd/kubelet/app/options k8s.io/kubernetes/pkg/api/legacyscheme @@ -1769,7 +1788,6 @@ k8s.io/kubernetes/pkg/kubelet/cm/containermap k8s.io/kubernetes/pkg/kubelet/cm/cpumanager k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology -k8s.io/kubernetes/pkg/kubelet/cm/cpuset k8s.io/kubernetes/pkg/kubelet/cm/devicemanager k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/checkpoint k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/plugin/v1beta1 @@ -1786,9 +1804,6 @@ k8s.io/kubernetes/pkg/kubelet/configmap k8s.io/kubernetes/pkg/kubelet/container k8s.io/kubernetes/pkg/kubelet/container/testing k8s.io/kubernetes/pkg/kubelet/cri/remote -k8s.io/kubernetes/pkg/kubelet/cri/streaming -k8s.io/kubernetes/pkg/kubelet/cri/streaming/portforward -k8s.io/kubernetes/pkg/kubelet/cri/streaming/remotecommand k8s.io/kubernetes/pkg/kubelet/envvars k8s.io/kubernetes/pkg/kubelet/events k8s.io/kubernetes/pkg/kubelet/eviction @@ -1858,27 +1873,21 @@ k8s.io/kubernetes/pkg/probe/grpc k8s.io/kubernetes/pkg/probe/http k8s.io/kubernetes/pkg/probe/tcp k8s.io/kubernetes/pkg/proxy -k8s.io/kubernetes/pkg/proxy/apis -k8s.io/kubernetes/pkg/proxy/apis/config -k8s.io/kubernetes/pkg/proxy/apis/config/scheme -k8s.io/kubernetes/pkg/proxy/apis/config/v1alpha1 -k8s.io/kubernetes/pkg/proxy/apis/config/validation k8s.io/kubernetes/pkg/proxy/config +k8s.io/kubernetes/pkg/proxy/conntrack k8s.io/kubernetes/pkg/proxy/healthcheck -k8s.io/kubernetes/pkg/proxy/iptables k8s.io/kubernetes/pkg/proxy/ipvs +k8s.io/kubernetes/pkg/proxy/ipvs/util k8s.io/kubernetes/pkg/proxy/metaproxier k8s.io/kubernetes/pkg/proxy/metrics k8s.io/kubernetes/pkg/proxy/util k8s.io/kubernetes/pkg/proxy/util/iptables -k8s.io/kubernetes/pkg/proxy/winkernel k8s.io/kubernetes/pkg/registry/core/service/allocator k8s.io/kubernetes/pkg/scheduler k8s.io/kubernetes/pkg/scheduler/apis/config k8s.io/kubernetes/pkg/scheduler/apis/config/latest k8s.io/kubernetes/pkg/scheduler/apis/config/scheme k8s.io/kubernetes/pkg/scheduler/apis/config/v1 -k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta2 k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta3 k8s.io/kubernetes/pkg/scheduler/apis/config/validation k8s.io/kubernetes/pkg/scheduler/framework @@ -1920,7 +1929,6 @@ k8s.io/kubernetes/pkg/security/apparmor k8s.io/kubernetes/pkg/securitycontext k8s.io/kubernetes/pkg/util/async k8s.io/kubernetes/pkg/util/config -k8s.io/kubernetes/pkg/util/conntrack k8s.io/kubernetes/pkg/util/filesystem k8s.io/kubernetes/pkg/util/flag k8s.io/kubernetes/pkg/util/flock @@ -1929,7 +1937,6 @@ k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff k8s.io/kubernetes/pkg/util/hash k8s.io/kubernetes/pkg/util/ipset k8s.io/kubernetes/pkg/util/iptables -k8s.io/kubernetes/pkg/util/ipvs k8s.io/kubernetes/pkg/util/labels k8s.io/kubernetes/pkg/util/node k8s.io/kubernetes/pkg/util/oom @@ -1976,7 +1983,7 @@ k8s.io/kubernetes/pkg/volume/vsphere_volume k8s.io/kubernetes/pkg/windows/service k8s.io/kubernetes/test/utils k8s.io/kubernetes/third_party/forked/golang/expansion -# k8s.io/legacy-cloud-providers v0.0.0 => k8s.io/legacy-cloud-providers v0.27.0 +# k8s.io/legacy-cloud-providers v0.0.0 => k8s.io/legacy-cloud-providers v0.28.0-alpha.2 ## explicit; go 1.20 k8s.io/legacy-cloud-providers/azure k8s.io/legacy-cloud-providers/azure/auth @@ -2018,7 +2025,7 @@ k8s.io/legacy-cloud-providers/gce/gcpcredential k8s.io/legacy-cloud-providers/vsphere k8s.io/legacy-cloud-providers/vsphere/vclib k8s.io/legacy-cloud-providers/vsphere/vclib/diskmanagers -# k8s.io/mount-utils v0.26.0-alpha.0 => k8s.io/mount-utils v0.27.0 +# k8s.io/mount-utils v0.26.0-alpha.0 => k8s.io/mount-utils v0.28.0-alpha.2 ## explicit; go 1.20 k8s.io/mount-utils # k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 @@ -2026,6 +2033,7 @@ k8s.io/mount-utils k8s.io/utils/buffer k8s.io/utils/clock k8s.io/utils/clock/testing +k8s.io/utils/cpuset k8s.io/utils/exec k8s.io/utils/inotify k8s.io/utils/integer @@ -2118,33 +2126,33 @@ sigs.k8s.io/yaml # github.com/aws/aws-sdk-go/service/eks => github.com/aws/aws-sdk-go/service/eks v1.38.49 # github.com/digitalocean/godo => github.com/digitalocean/godo v1.27.0 # github.com/rancher/go-rancher => github.com/rancher/go-rancher v0.1.0 -# k8s.io/api => k8s.io/api v0.27.0 -# k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.27.0 -# k8s.io/apimachinery => k8s.io/apimachinery v0.27.0 -# k8s.io/apiserver => k8s.io/apiserver v0.27.0 -# k8s.io/cli-runtime => k8s.io/cli-runtime v0.27.0 -# k8s.io/client-go => k8s.io/client-go v0.27.0 -# k8s.io/cloud-provider => k8s.io/cloud-provider v0.27.0 -# k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.27.0 -# k8s.io/code-generator => k8s.io/code-generator v0.27.0 -# k8s.io/component-base => k8s.io/component-base v0.27.0 -# k8s.io/component-helpers => k8s.io/component-helpers v0.27.0 -# k8s.io/controller-manager => k8s.io/controller-manager v0.27.0 -# k8s.io/cri-api => k8s.io/cri-api v0.27.0 -# k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.27.0 -# k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.27.0 -# k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.27.0 -# k8s.io/kube-proxy => k8s.io/kube-proxy v0.27.0 -# k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.27.0 -# k8s.io/kubectl => k8s.io/kubectl v0.27.0 -# k8s.io/kubelet => k8s.io/kubelet v0.27.0 -# k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.27.0 -# k8s.io/metrics => k8s.io/metrics v0.27.0 -# k8s.io/mount-utils => k8s.io/mount-utils v0.27.0 -# k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.27.0 -# k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.27.0 -# k8s.io/sample-controller => k8s.io/sample-controller v0.27.0 -# k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.27.0 -# k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.27.0 -# k8s.io/kms => k8s.io/kms v0.27.0 +# k8s.io/api => k8s.io/api v0.28.0-alpha.2 +# k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.28.0-alpha.2 +# k8s.io/apimachinery => k8s.io/apimachinery v0.28.0-alpha.2 +# k8s.io/apiserver => k8s.io/apiserver v0.28.0-alpha.2 +# k8s.io/cli-runtime => k8s.io/cli-runtime v0.28.0-alpha.2 +# k8s.io/client-go => k8s.io/client-go v0.28.0-alpha.2 +# k8s.io/cloud-provider => k8s.io/cloud-provider v0.28.0-alpha.2 +# k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.28.0-alpha.2 +# k8s.io/code-generator => k8s.io/code-generator v0.28.0-alpha.2 +# k8s.io/component-base => k8s.io/component-base v0.28.0-alpha.2 +# k8s.io/component-helpers => k8s.io/component-helpers v0.28.0-alpha.2 +# k8s.io/controller-manager => k8s.io/controller-manager v0.28.0-alpha.2 +# k8s.io/cri-api => k8s.io/cri-api v0.28.0-alpha.2 +# k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.28.0-alpha.2 +# k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.28.0-alpha.2 +# k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.28.0-alpha.2 +# k8s.io/kube-proxy => k8s.io/kube-proxy v0.28.0-alpha.2 +# k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.28.0-alpha.2 +# k8s.io/kubectl => k8s.io/kubectl v0.28.0-alpha.2 +# k8s.io/kubelet => k8s.io/kubelet v0.28.0-alpha.2 +# k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.28.0-alpha.2 +# k8s.io/metrics => k8s.io/metrics v0.28.0-alpha.2 +# k8s.io/mount-utils => k8s.io/mount-utils v0.28.0-alpha.2 +# k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.28.0-alpha.2 +# k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.28.0-alpha.2 +# k8s.io/sample-controller => k8s.io/sample-controller v0.28.0-alpha.2 +# k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.28.0-alpha.2 +# k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.28.0-alpha.2 +# k8s.io/kms => k8s.io/kms v0.28.0-alpha.2 # k8s.io/noderesourcetopology-api => k8s.io/noderesourcetopology-api v0.27.0 diff --git a/cluster-autoscaler/version/version.go b/cluster-autoscaler/version/version.go index 40f62afc4824..17931f6e0a99 100644 --- a/cluster-autoscaler/version/version.go +++ b/cluster-autoscaler/version/version.go @@ -17,4 +17,4 @@ limitations under the License. package version // ClusterAutoscalerVersion contains version of CA. -const ClusterAutoscalerVersion = "1.27.0-alpha.1" +const ClusterAutoscalerVersion = "1.28.0-alpha.2" diff --git a/vertical-pod-autoscaler/OWNERS b/vertical-pod-autoscaler/OWNERS index a18552707f43..5e0d01703867 100644 --- a/vertical-pod-autoscaler/OWNERS +++ b/vertical-pod-autoscaler/OWNERS @@ -6,6 +6,7 @@ reviewers: - kgolab - jbartosik - krzysied +- voelzmo emeritus_approvers: - schylek # 2022-09-30 labels: diff --git a/vertical-pod-autoscaler/README.md b/vertical-pod-autoscaler/README.md index f707a381872d..e861dc47d816 100644 --- a/vertical-pod-autoscaler/README.md +++ b/vertical-pod-autoscaler/README.md @@ -48,12 +48,13 @@ procedure described below. # Installation -The current default version is Vertical Pod Autoscaler 0.13.0 +The current default version is Vertical Pod Autoscaler 0.14.0 ### Compatibility | VPA version | Kubernetes version | |-----------------|--------------------| +| 0.14 | 1.25+ | | 0.13 | 1.25+ | | 0.12 | 1.25+ | | 0.11 | 1.22 - 1.24 | diff --git a/vertical-pod-autoscaler/RELEASE.md b/vertical-pod-autoscaler/RELEASE.md index 59933307f043..7ff2862c172e 100644 --- a/vertical-pod-autoscaler/RELEASE.md +++ b/vertical-pod-autoscaler/RELEASE.md @@ -25,6 +25,8 @@ We use the issue to communicate what is state of the release. 1. [ ] Wait for all VPA changes that will be in the release to merge. 2. [ ] Wait for [the end to end tests](https://k8s-testgrid.appspot.com/sig-autoscaling-vpa) to run with all VPA changes included. + To see what code was actually tested, look for `===== last commit =====` + entries in the full `build-log.txt` of a given test run. 3. [ ] Make sure the end to end VPA tests are green. ### New minor release @@ -45,8 +47,20 @@ We use the issue to communicate what is state of the release. ## Build and stage images +Create a fresh clone of the repo and switch to the `vpa-release-0.${minor}` +branch. This makes sure you have no local changes while building the images. + +For example: ```sh -for component in recommender updater admission-controller ; do REGISTRY=gcr.io/k8s-staging-autoscaling TAG=[*vpa-version*] make release --directory=pkg/${component}; done +git clone git@github.com:kubernetes/autoscaler.git +git switch vpa-release-0.14 +``` + +Once in the freshly cloned repo, build and stage the images. + +```sh +cd vertical-pod-autoscaler/ +for component in recommender updater admission-controller ; do TAG=`grep 'const VerticalPodAutoscalerVersion = ' common/version.go | cut -d '"' -f 2` REGISTRY=gcr.io/k8s-staging-autoscaling make release --directory=pkg/${component}; done ``` ## Test the release @@ -54,45 +68,46 @@ for component in recommender updater admission-controller ; do REGISTRY=gcr.io/k 1. [ ] Create a Kubernetes cluster. If you're using GKE you can use the following command: ```shell - $ gcloud container clusters create e2e-test --machine-type=n1-standard-2 --image-type=COS_CONTAINERD --num-nodes=3 + gcloud container clusters create e2e-test --machine-type=n1-standard-2 --image-type=COS_CONTAINERD --num-nodes=3 ``` 1. [ ] Create clusterrole. If you're using GKE you can use the following command: ```shell - $ kubectl create clusterrolebinding my-cluster-admin-binding --clusterrole=cluster-admin --user=`gcloud config get-value account` + kubectl create clusterrolebinding my-cluster-admin-binding --clusterrole=cluster-admin --user=`gcloud config get-value account` ``` 1. [ ] Deploy VPA: ```shell - $ REGISTRY=gcr.io/k8s-staging-autoscaling TAG=[*vpa-version*] ./hack/vpa-up.sh + REGISTRY=gcr.io/k8s-staging-autoscaling TAG=`grep 'const VerticalPodAutoscalerVersion = ' common/version.go | cut -d '"' -f 2` ./hack/vpa-up.sh ``` 1. [ ] [Run](https://github.com/kubernetes/autoscaler/blob/master/vertical-pod-autoscaler/hack/run-e2e-tests.sh) the `full-vpa` test suite: ```shell - $ ./hack/run-e2e-tests.sh full-vpa + ./hack/run-e2e-tests.sh full-vpa ``` ## Promote image To promote image from staging repo send out PR updating -[autoscaling images mapping](https://github.com/kubernetes/k8s.io/blob/master/k8s.gcr.io/images/k8s-staging-autoscaling/images.yaml) +[autoscaling images mapping](https://github.com/kubernetes/k8s.io/blob/master/registry.k8s.io/images/k8s-staging-autoscaling/images.yaml) ([example](https://github.com/kubernetes/k8s.io/pull/1318)). +NOTE: Please use the [add-version.sh +script](https://github.com/kubernetes/k8s.io/blob/main/registry.k8s.io/images/k8s-staging-autoscaling/add-version.sh) +to prepare the changes automatically. + When PR merges the promoter will run automatically and upload the image from staging repo to final repo. The post submit job status can be tracked on [testgrid](https://testgrid.k8s.io/sig-k8s-infra-k8sio#post-k8sio-image-promo). To verify if the promoter finished its job one can use gcloud. E.g.: ```sh -$ gcloud container images describe us.gcr.io/k8s-artifacts-prod/autoscaling/vpa-recommender:[*vpa-version*] +gcloud container images describe registry.k8s.io/autoscaling/vpa-recommender:[*vpa-version*] ``` -You can also take a look at the images in the -[k8s-artifacts repo](https://us.gcr.io/k8s-artifacts-prod/autoscaling/). - ## Finalize release NOTE: We currently use two tags for releases: @@ -131,7 +146,7 @@ sure nothing we care about will break if we do. 1. [ ] To create and publish a github release from pushed tag go to https://github.com/kubernetes/autoscaler/releases/tag/vertical-pod-autoscaler-[*vpa-version*], - press `Edit release`, complete release title and release notes, tick the + press `Create release from tag`, complete release title and release notes, tick the `This is a pre-release` box and press `Publish release`. ## Permissions @@ -156,4 +171,4 @@ sure nothing we care about will break if we do. A member of the [autoscaler-admins](https://github.com/orgs/kubernetes/teams/autoscaler-admins) - can add you to add you as a collaborator. \ No newline at end of file + can add you to add you as a collaborator. diff --git a/vertical-pod-autoscaler/builder/Dockerfile b/vertical-pod-autoscaler/builder/Dockerfile index 00ba8ebf9c2e..5985ce2b73cd 100644 --- a/vertical-pod-autoscaler/builder/Dockerfile +++ b/vertical-pod-autoscaler/builder/Dockerfile @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -FROM golang:1.20.4 +FROM golang:1.20.5 LABEL maintainer="Beata Skiba " ENV GOPATH /gopath/ diff --git a/vertical-pod-autoscaler/common/version.go b/vertical-pod-autoscaler/common/version.go index 18e056fb1dff..a42564707430 100644 --- a/vertical-pod-autoscaler/common/version.go +++ b/vertical-pod-autoscaler/common/version.go @@ -17,4 +17,4 @@ limitations under the License. package common // VerticalPodAutoscalerVersion is the version of VPA. -const VerticalPodAutoscalerVersion = "0.13.0" +const VerticalPodAutoscalerVersion = "0.14.0" diff --git a/vertical-pod-autoscaler/deploy/admission-controller-deployment.yaml b/vertical-pod-autoscaler/deploy/admission-controller-deployment.yaml index da2b4bcc587f..29b3dd278e85 100644 --- a/vertical-pod-autoscaler/deploy/admission-controller-deployment.yaml +++ b/vertical-pod-autoscaler/deploy/admission-controller-deployment.yaml @@ -20,7 +20,7 @@ spec: runAsUser: 65534 # nobody containers: - name: admission-controller - image: registry.k8s.io/autoscaling/vpa-admission-controller:0.13.0 + image: registry.k8s.io/autoscaling/vpa-admission-controller:0.14.0 imagePullPolicy: Always env: - name: NAMESPACE diff --git a/vertical-pod-autoscaler/deploy/recommender-deployment-high.yaml b/vertical-pod-autoscaler/deploy/recommender-deployment-high.yaml index d6e079e3f853..91f3b90c6281 100644 --- a/vertical-pod-autoscaler/deploy/recommender-deployment-high.yaml +++ b/vertical-pod-autoscaler/deploy/recommender-deployment-high.yaml @@ -26,7 +26,7 @@ spec: runAsUser: 65534 # nobody containers: - name: recommender - image: registry.k8s.io/autoscaling/vpa-recommender:0.13.0 + image: registry.k8s.io/autoscaling/vpa-recommender:0.14.0 imagePullPolicy: Always args: - --name=performance diff --git a/vertical-pod-autoscaler/deploy/recommender-deployment-low.yaml b/vertical-pod-autoscaler/deploy/recommender-deployment-low.yaml index 28d8e75e391e..03c7145dfb08 100644 --- a/vertical-pod-autoscaler/deploy/recommender-deployment-low.yaml +++ b/vertical-pod-autoscaler/deploy/recommender-deployment-low.yaml @@ -26,7 +26,7 @@ spec: runAsUser: 65534 # nobody containers: - name: recommender - image: registry.k8s.io/autoscaling/vpa-recommender:0.13.0 + image: registry.k8s.io/autoscaling/vpa-recommender:0.14.0 imagePullPolicy: Always args: - --name=frugal diff --git a/vertical-pod-autoscaler/deploy/recommender-deployment.yaml b/vertical-pod-autoscaler/deploy/recommender-deployment.yaml index 38a728c79773..7714c0626601 100644 --- a/vertical-pod-autoscaler/deploy/recommender-deployment.yaml +++ b/vertical-pod-autoscaler/deploy/recommender-deployment.yaml @@ -20,7 +20,7 @@ spec: runAsUser: 65534 # nobody containers: - name: recommender - image: registry.k8s.io/autoscaling/vpa-recommender:0.13.0 + image: registry.k8s.io/autoscaling/vpa-recommender:0.14.0 imagePullPolicy: Always resources: limits: diff --git a/vertical-pod-autoscaler/deploy/updater-deployment.yaml b/vertical-pod-autoscaler/deploy/updater-deployment.yaml index 0d6287b706da..eda37a5a9ced 100644 --- a/vertical-pod-autoscaler/deploy/updater-deployment.yaml +++ b/vertical-pod-autoscaler/deploy/updater-deployment.yaml @@ -20,7 +20,7 @@ spec: runAsUser: 65534 # nobody containers: - name: updater - image: registry.k8s.io/autoscaling/vpa-updater:0.13.0 + image: registry.k8s.io/autoscaling/vpa-updater:0.14.0 imagePullPolicy: Always env: - name: NAMESPACE diff --git a/vertical-pod-autoscaler/enhancements/4016-in-place-updates-support/README.md b/vertical-pod-autoscaler/enhancements/4016-in-place-updates-support/README.md new file mode 100644 index 000000000000..1b73ce1034d6 --- /dev/null +++ b/vertical-pod-autoscaler/enhancements/4016-in-place-updates-support/README.md @@ -0,0 +1,190 @@ +# AEP-4016: Support for in place updates in VPA + + +- [Summary](#summary) + - [Goals](#goals) + - [Non-Goals](#non-goals) +- [Proposal](#proposal) +- [Context](#context) +- [Design Details](#design-details) + - [Applying Updates During Pod Admission](#applying-updates-during-pod-admission) + - [Applying Disruption-free Updates](#Applying-disruption-free-updates) + - [Applying Disruptive Updates](#applying-disruptive-updates) + - [Comparison of `UpdateMode`s](#comparison-of-updatemodes) + - [Test Plan](#test-plan) +- [Implementation History](#implementation-history) + + +## Summary + + +VPA applies its recommendations with a mutating webhook, during pod creation. It can also evict +pods expecting that it will apply the recommendation when the pod is recreated. This is a +disruptive process so VPA has some mechanism to avoid too frequent disruptive updates. + +This proposal allows VPA to apply its recommendations more frequently, with less disruption by +using the +[in-place update feature] which is an alpha feature [available in Kubernetes 1.27.] This proposal enables only core uses +of in place updates in VPA with intention of gathering more feedback. Any more advanced uses of in place updates in VPA +(like applying different recommendations during pod initialization) will be introduced as separate enhancement +proposals. + +[in-place update feature]: https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/1287-in-place-update-pod-resources +[available in Kubernetes 1.27.]: https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.27.md#api-change-3 + +### Goals + +* Allow VPA to actuate without disruption, +* Allow VPA to actuate more frequently, when it can actuate without disruption, +* Allow VPA to actuate in situations where actuation by eviction doesn't work. + +### Non-Goals + +* Improved handling of injected sidecars + * Separate AEP will improve VPAs handling of injected sidecars. + +## Proposal + +Add new supported values of [`UpdateMode`]: + +* `InPlaceOnly` and +* `InPlaceOrRecreate`. + +[`UpdateMode`]: https://github.com/kubernetes/autoscaler/blob/71b489f5aec3899157b37472cdf36a1de223d011/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1/types.go#L124 + +## Context + +[In-place update of pod resources KEP] is available in alpha in [Kubernetes 1.27]. The feature allows changing container +resources while the container is running. It also adds [`ResizePolicy`] field to Container. This field indicates for an +individual resource if a container needs to be restarted by kubelet when the resource is changed. For example it may be +the case that a Container automatically adapts to a change in CPU, but needs to be restarted for a change in Memory to +take effect. + +[In-place update of pod resources KEP]: https://github.com/kubernetes/enhancements/issues/1287 +[Kubernetes 1.27]: https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.27.md#api-change-3 +[`ResizePolicy`]: https://github.com/kubernetes/api/blob/8360d82aecbc72aa039281a394ebed2eaf0c0ccc/core/v1/types.go#L2448 + +## Design Details + +In the update modes existing before (`Initial` and `Recreate`) only admission controller was changing pod spec (updater +was responsible for evicting pods so that admission controller could change them during admission). + +In the newly added `InPlaceOnly` and `InPlaceOrRecreate` modes VPA Updater will execute in-place updates (which change +pod spec without involving admission controller). + +### Applying Updates During Pod Admission + +For VPAs in `InPlaceOnly` and `InPlaceOrRecreate` modes VPA Admission Controller will apply updates to starting pods, +like it does for VPAs in `Initial`, `Auto`, and `Recreate` modes. + +### Applying Disruption-free Updates + +When an update only changes resources for which a container indicates that it doesn't require a restart, then VPA can +attempt to actuate the change without disrupting the pod. VPA Updater will: +* attempt to actuate such updates in place, +* attempt to actuate them if difference between recommendation and request is at least 10% + * even if pod has been running for less than 12h, +* if necessary execute only partial updates (if some changes would require a restart). + +### Applying Disruptive Updates + +In both `InPlaceOnly` and `InPlaceOrRecreate` modes VPA updater will attempt to apply updates that require container +restart in place. It will update them under conditions that would trigger update with `Recreate` mode. That is it will +apply disruptive updates if: + +* Any container has a request below the corresponding `LowerBound` or +* Any container has a request above the corresponding `UpperBound` or +* Difference between sum of pods requests and sum of recommendation `Target`s is more than 10% and the pod has been + running undisrupted for at least 12h. + * Successful disruptive update counts as disruption here (and prevents further disruptive updates to the pod for 12h). + +In `InPlaceOrRecreate` mode (but not in `InPlaceOnly` mode) VPA updater will evict pod to actuate a recommendation if it +attempted to apply the recommendation in place and failed. + +VPA updater will consider that the update failed if: +* The pod has `.status.resize: Infeasible` or +* The pod has `.status.resize: Deferred` and more than 1 minute elapsed since the update or +* The pod has `.status.resize: InProgress` and more than 1 hour elapsed since the update: + * There seems to be a bug where containers that say they need to be restarted get stuck in update, hopefully it gets + fixed and we don't have to worry about this by beta. +* Patch attempt will return an error. + * If the attempt fails because it would change pods QoS: + * `InPlaceOrRecreate` will treat it as any other failure and consider evicting the pod. + * `InPlaceOnly` will consider applying request slightly lower than the limit. + +Those failure modes shouldn't disrupt pod operations, only update. If there are problems that can disrupt pod operation +we should consider not implementing the `InPlaceOnly` mode. + +### Comparison of `UpdateMode`s + +VPA updater considers the following conditions when deciding if it should apply an update: +- [`CanEvict`]: + - Pod is `Pending` or + - There are enough running pods in the controller. +- Quick OOM: + - Any container in the pod had a quick OOM ([by default less than 10 minutes] after the container started) and + - There is difference between sum of recommendations and sum of current requests over containers (see + [`defaultPriorityProcessor.GetUpdatePriority`]). +- Long-lived pod - started enough time ago ([by default 12h]) +- Significant change - difference between sum of requests over containers and sum of recommendations are different + enough ([by default 10%]). +- [Outside recommended range]: + - At least one container has at least one resource request lower than the lower bound of the corresponding + recommendation or + - At least one container has at least one resource request higher than the upper bound of the corresponding + recommendation. +- Disruption-free update - doesn't change any resources for which the relevant container specifies + `RestartPolicy: RestartContainer`. + +`Auto` / `Recreate` evicts pod if: + * [`CanEvict`] returns true for the pod, and it meets at least one of the following conditions: + * Quick OOM, + * Outside recommended range, + * Long-lived pod with significant change. + +`InPlaceOnly` and `InPlaceOrRecreate` will attempt to apply a disruption-free update in place if it meets at least one +of the following conditions: +* Quick OOM, +* Outside recommended range, +* Significant change. + +`InPlaceOnly` and `InPlaceOrRecreate` when considering a disruption-free update in place ignore some conditions that +influence eviction decission in the `Recreate` mode: +* [`CanEvict`] won't be checked and +* Pods with significant change can be updated even if they are not long-lived. + +`InPlaceOnly` and `InPlaceOrRecreate` will attempt to apply updates that are **not** disruption-free in place under +the same conditions that apply to updates in the `Recreate` mode. + +`InPlaceOrRecreate` will attempt to apply updates that by eviction when: +* VPA already attempted to apply the update in-place and failed and +* it meets conditions for applying in the `Recreate` mode. + +[`CanEvict`]: https://github.com/kubernetes/autoscaler/blob/114a35961a85efdf3f36859350764e5e2c0c7013/vertical-pod-autoscaler/pkg/updater/eviction/pods_eviction_restriction.go#LL100C10-L100C37 +[by default less than 10 minutes]: https://github.com/kubernetes/autoscaler/blob/114a35961a85efdf3f36859350764e5e2c0c7013/vertical-pod-autoscaler/pkg/updater/priority/update_priority_calculator.go#L37 +[`UpdatePriorityCalculator.AddPod`]: https://github.com/kubernetes/autoscaler/blob/114a35961a85efdf3f36859350764e5e2c0c7013/vertical-pod-autoscaler/pkg/updater/priority/update_priority_calculator.go#L81 +[by default 12h]: https://github.com/kubernetes/autoscaler/blob/114a35961a85efdf3f36859350764e5e2c0c7013/vertical-pod-autoscaler/pkg/updater/priority/update_priority_calculator.go#L35 +[by default 10%]: https://github.com/kubernetes/autoscaler/blob/114a35961a85efdf3f36859350764e5e2c0c7013/vertical-pod-autoscaler/pkg/updater/priority/update_priority_calculator.go#L33 +[Outside recommended range]: https://github.com/kubernetes/autoscaler/blob/114a35961a85efdf3f36859350764e5e2c0c7013/vertical-pod-autoscaler/pkg/updater/priority/priority_processor.go#L73 + +### Test Plan + +The following test scenarios will be added to e2e tests. Both `InPlaceOnly` and `InPlaceOrRecreate` modes will be tested +and they should behave the same: + +* Admission controller applies recommendation to pod controlled by VPA. +* Disruption-free in-place update applied to all containers of a pod (request in recommendation bounds). +* Partial disruption-free update applied to some containers of a pod, some disruptive changes skipped (request in + recommendation bounds). +* Disruptive in-place update applied to all containers of a pod (request out ouf recommendation bounds). + +There will be also scenarios testing differences between `InPlaceOnly` and `InPlaceOrRecreate` modesL +* Disruptive in-place update will fail. In `InPlaceOnly` pod should not be evicted, in the `InPlaceOrRecreate` pod + should be evicted and the recommendation applied. +* VPA attempts an update that would change Pods QoS (`RequestsOnly` scaling, request initially < limit, recommendation + equal to limit). In `InPlaceOnly` pod should not be evicted, request slightly lower than the recommendation will be + applied. In the `InPlaceOrRecreate` pod should be evicted and the recommendation applied. + +## Implementation History + +- 2023-05-10: initial version \ No newline at end of file diff --git a/vertical-pod-autoscaler/hack/vpa-process-yaml.sh b/vertical-pod-autoscaler/hack/vpa-process-yaml.sh index f843d11a8e95..328f8aaf3f30 100755 --- a/vertical-pod-autoscaler/hack/vpa-process-yaml.sh +++ b/vertical-pod-autoscaler/hack/vpa-process-yaml.sh @@ -32,7 +32,7 @@ if [ $# -eq 0 ]; then fi DEFAULT_REGISTRY="registry.k8s.io/autoscaling" -DEFAULT_TAG="0.13.0" +DEFAULT_TAG="0.14.0" REGISTRY_TO_APPLY=${REGISTRY-$DEFAULT_REGISTRY} TAG_TO_APPLY=${TAG-$DEFAULT_TAG} diff --git a/vertical-pod-autoscaler/hack/vpa-process-yamls.sh b/vertical-pod-autoscaler/hack/vpa-process-yamls.sh index d30552efcbe7..aa7aafef04c4 100755 --- a/vertical-pod-autoscaler/hack/vpa-process-yamls.sh +++ b/vertical-pod-autoscaler/hack/vpa-process-yamls.sh @@ -42,7 +42,7 @@ fi ACTION=$1 COMPONENTS="vpa-v1-crd-gen vpa-rbac updater-deployment recommender-deployment admission-controller-deployment" case ${ACTION} in -delete|diff|print) COMPONENTS+=" vpa-beta2-crd" ;; +delete|diff) COMPONENTS+=" vpa-beta2-crd" ;; esac if [ $# -gt 1 ]; then diff --git a/vertical-pod-autoscaler/pkg/admission-controller/Makefile b/vertical-pod-autoscaler/pkg/admission-controller/Makefile index 2f49dab74e4a..b540b3fba2dc 100644 --- a/vertical-pod-autoscaler/pkg/admission-controller/Makefile +++ b/vertical-pod-autoscaler/pkg/admission-controller/Makefile @@ -42,7 +42,7 @@ ifndef TAG ERR = $(error TAG is undefined) $(ERR) endif - docker buildx build --pull --platform linux/$* -t ${REGISTRY}/${FULL_COMPONENT}-$*:${TAG} --build-arg ARCH=$* . + docker buildx build --pull --load --platform linux/$* -t ${REGISTRY}/${FULL_COMPONENT}-$*:${TAG} --build-arg ARCH=$* . .PHONY: docker-push docker-push: $(addprefix sub-push-,$(ALL_ARCHITECTURES)) push-multi-arch; @@ -76,11 +76,23 @@ build-in-docker: $(addprefix build-in-docker-,$(ALL_ARCHITECTURES)) .PHONY: build-in-docker-* build-in-docker-%: clean docker-builder + echo '=============== local git status ===============' + git status + echo '=============== last commit ===============' + git log -1 + echo '=============== bulding from the above ===============' docker run -v `pwd`/../..:/gopath/src/k8s.io/autoscaler/vertical-pod-autoscaler vpa-autoscaling-builder:latest bash -c 'cd /gopath/src/k8s.io/autoscaler/vertical-pod-autoscaler && make build-binary-with-vendor-$* -C pkg/admission-controller' +.PHONY: create-buildx-builder +create-buildx-builder: + BUILDER=$(shell docker buildx create --driver=docker-container --use) + +.PHONY: remove-buildx-builder +remove-buildx-builder: + docker buildx rm ${BUILDER} .PHONY: release -release: build-in-docker docker-build docker-push +release: build-in-docker create-buildx-builder docker-build remove-buildx-builder docker-push @echo "Full in-docker release ${FULL_COMPONENT}:${TAG} completed" clean: $(addprefix clean-,$(ALL_ARCHITECTURES)) diff --git a/vertical-pod-autoscaler/pkg/recommender/Makefile b/vertical-pod-autoscaler/pkg/recommender/Makefile index 9ee5e87e373b..c51d0e944045 100644 --- a/vertical-pod-autoscaler/pkg/recommender/Makefile +++ b/vertical-pod-autoscaler/pkg/recommender/Makefile @@ -42,7 +42,7 @@ ifndef TAG ERR = $(error TAG is undefined) $(ERR) endif - docker buildx build --pull --platform linux/$* -t ${REGISTRY}/${FULL_COMPONENT}-$*:${TAG} --build-arg ARCH=$* . + docker buildx build --pull --load --platform linux/$* -t ${REGISTRY}/${FULL_COMPONENT}-$*:${TAG} --build-arg ARCH=$* . .PHONY: docker-push docker-push: $(addprefix sub-push-,$(ALL_ARCHITECTURES)) push-multi-arch; @@ -76,9 +76,22 @@ build-in-docker: $(addprefix build-in-docker-,$(ALL_ARCHITECTURES)) .PHONY: build-in-docker-* build-in-docker-%: clean docker-builder + echo '=============== local git status ===============' + git status + echo '=============== last commit ===============' + git log -1 + echo '=============== bulding from the above ===============' docker run -v `pwd`/../..:/gopath/src/k8s.io/autoscaler/vertical-pod-autoscaler vpa-autoscaling-builder:latest bash -c 'cd /gopath/src/k8s.io/autoscaler/vertical-pod-autoscaler && make build-binary-with-vendor-$* -C pkg/recommender' -release: build-in-docker docker-build docker-push +.PHONY: create-buildx-builder +create-buildx-builder: + BUILDER=$(shell docker buildx create --driver=docker-container --use) + +.PHONY: remove-buildx-builder +remove-buildx-builder: + docker buildx rm ${BUILDER} + +release: build-in-docker create-buildx-builder docker-build remove-buildx-builder docker-push @echo "Full in-docker release ${FULL_COMPONENT}:${TAG} completed" clean: $(addprefix clean-,$(ALL_ARCHITECTURES)) diff --git a/vertical-pod-autoscaler/pkg/updater/Makefile b/vertical-pod-autoscaler/pkg/updater/Makefile index bcc20d9e51c1..8f7a4a35d2bd 100644 --- a/vertical-pod-autoscaler/pkg/updater/Makefile +++ b/vertical-pod-autoscaler/pkg/updater/Makefile @@ -42,7 +42,7 @@ ifndef TAG ERR = $(error TAG is undefined) $(ERR) endif - docker buildx build --pull --platform linux/$* -t ${REGISTRY}/${FULL_COMPONENT}-$*:${TAG} --build-arg ARCH=$* . + docker buildx build --pull --load --platform linux/$* -t ${REGISTRY}/${FULL_COMPONENT}-$*:${TAG} --build-arg ARCH=$* . .PHONY: docker-push docker-push: $(addprefix sub-push-,$(ALL_ARCHITECTURES)) push-multi-arch; @@ -76,10 +76,23 @@ build-in-docker: $(addprefix build-in-docker-,$(ALL_ARCHITECTURES)) .PHONY: build-in-docker-* build-in-docker-%: clean docker-builder + echo '=============== local git status ===============' + git status + echo '=============== last commit ===============' + git log -1 + echo '=============== bulding from the above ===============' docker run -v `pwd`/../..:/gopath/src/k8s.io/autoscaler/vertical-pod-autoscaler vpa-autoscaling-builder:latest bash -c 'cd /gopath/src/k8s.io/autoscaler/vertical-pod-autoscaler && make build-binary-with-vendor-$* -C pkg/updater' +.PHONY: create-buildx-builder +create-buildx-builder: + BUILDER=$(shell docker buildx create --driver=docker-container --use) + +.PHONY: remove-buildx-builder +remove-buildx-builder: + docker buildx rm ${BUILDER} + .PHONY: release -release: build-in-docker docker-build docker-push +release: build-in-docker create-buildx-builder docker-build remove-buildx-builder docker-push @echo "Full in-docker release ${FULL_COMPONENT}:${TAG} completed" clean: $(addprefix clean-,$(ALL_ARCHITECTURES))