diff --git a/go.mod b/go.mod index deefd952f..5760b1b9a 100644 --- a/go.mod +++ b/go.mod @@ -21,15 +21,15 @@ require ( golang.org/x/net v0.10.0 google.golang.org/grpc v1.51.0 google.golang.org/protobuf v1.30.0 - k8s.io/api v0.27.2 - k8s.io/apimachinery v0.27.2 - k8s.io/client-go v0.27.2 - k8s.io/component-base v0.27.2 + k8s.io/api v0.27.3 + k8s.io/apimachinery v0.27.3 + k8s.io/client-go v0.27.3 + k8s.io/component-base v0.27.3 k8s.io/klog/v2 v2.100.1 k8s.io/kubernetes v1.27.2 k8s.io/mount-utils v0.27.2 k8s.io/utils v0.0.0-20230209194617-a36077c30491 - sigs.k8s.io/cloud-provider-azure v1.27.1-0.20230613061957-d0cd51201edc + sigs.k8s.io/cloud-provider-azure v1.27.1-0.20230630150243-d45a7951c84a sigs.k8s.io/yaml v1.3.0 ) @@ -39,10 +39,10 @@ require ( github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.6.1 github.com/go-ini/ini v1.67.0 github.com/jongio/azidext/go/azidext v0.4.0 - github.com/onsi/ginkgo/v2 v2.10.0 + github.com/onsi/ginkgo/v2 v2.11.0 github.com/pkg/errors v0.9.1 github.com/satori/go.uuid v1.2.0 - k8s.io/apiserver v0.27.2 + k8s.io/apiserver v0.27.3 k8s.io/pod-security-admission v0.27.2 ) @@ -126,11 +126,11 @@ require ( go.uber.org/atomic v1.7.0 // indirect go.uber.org/multierr v1.6.0 // indirect go.uber.org/zap v1.19.0 // indirect - golang.org/x/crypto v0.9.0 // indirect + golang.org/x/crypto v0.10.0 // indirect golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b // indirect - golang.org/x/sync v0.2.0 // indirect + golang.org/x/sync v0.3.0 // indirect golang.org/x/sys v0.9.0 // indirect - golang.org/x/term v0.8.0 // indirect + golang.org/x/term v0.9.0 // indirect golang.org/x/text v0.10.0 // indirect golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect golang.org/x/tools v0.9.3 // indirect @@ -141,13 +141,13 @@ require ( gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/apiextensions-apiserver v0.0.0 // indirect - k8s.io/cloud-provider v0.27.2 // indirect - k8s.io/component-helpers v0.27.2 // indirect - k8s.io/controller-manager v0.27.2 // indirect - k8s.io/kms v0.27.2 // indirect + k8s.io/cloud-provider v0.27.3 // indirect + k8s.io/component-helpers v0.27.3 // indirect + k8s.io/controller-manager v0.27.3 // indirect + k8s.io/kms v0.27.3 // indirect k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f // indirect k8s.io/kubectl v0.0.0 // indirect - k8s.io/kubelet v0.27.2 // indirect + k8s.io/kubelet v0.27.3 // indirect sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect diff --git a/go.sum b/go.sum index c91cb6852..e11d01023 100644 --- a/go.sum +++ b/go.sum @@ -330,8 +330,8 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/onsi/ginkgo/v2 v2.10.0 h1:sfUl4qgLdvkChZrWCYndY2EAu9BRIw1YphNAzy1VNWs= -github.com/onsi/ginkgo/v2 v2.10.0/go.mod h1:UDQOh5wbQUlMnkLfVaIUMtQ1Vus92oM+P2JX1aulgcE= +github.com/onsi/ginkgo/v2 v2.11.0 h1:WgqUCUt/lT6yXoQ8Wef0fsNn5cAuMK7+KT9UFRz2tcU= +github.com/onsi/ginkgo/v2 v2.11.0/go.mod h1:ZhrRA5XmEE3x3rhlzamx/JJvujdZoJ2uvgI7kR0iZvM= github.com/onsi/gomega v1.27.8 h1:gegWiwZjBsf2DgiSbf5hpokZ98JVDMcWkUiigk6/KXc= github.com/onsi/gomega v1.27.8/go.mod h1:2J8vzI/s+2shY9XHRApDkdgPo1TKT7P2u6fXeJKFnNQ= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= @@ -472,8 +472,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= -golang.org/x/crypto v0.9.0 h1:LF6fAI+IutBocDJ2OT0Q1g8plpYljMZ4+lty+dsqw3g= -golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0= +golang.org/x/crypto v0.10.0 h1:LKqV2xt9+kDzSTfOhx4FrkEBcMrAgHSYgzywV9zcGmM= +golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -567,8 +567,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI= -golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -622,8 +622,8 @@ golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.8.0 h1:n5xxQn2i3PC0yLAbjTpNT85q/Kgzcr2gIoX9OrJUols= -golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.9.0 h1:GRRCnKYhdQrD8kfRAdQ6Zcw1P0OcELxGLKJvtjVMZ28= +golang.org/x/term v0.9.0/go.mod h1:M6DEAAIenWoTxdKrOltXcmDY3rSplQUkrvaDU5FcQyo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -837,8 +837,8 @@ k8s.io/controller-manager v0.27.2/go.mod h1:2HzIhmjKxSH5dJVjYLuJ7/v9HYluNDcHLh6Z k8s.io/csi-translation-lib v0.27.2 h1:HbwiOk+M3jIkTC+e5nxUCwmux68OguKV/g9NaHDQhzs= k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kms v0.27.2 h1:wCdmPCa3kubcVd3AssOeaVjLQSu45k5g/vruJ3iqwDU= -k8s.io/kms v0.27.2/go.mod h1:dahSqjI05J55Fo5qipzvHSRbm20d7llrSeQjjl86A7c= +k8s.io/kms v0.27.3 h1:O6mZqi647ZLmxxkEv5Q9jMlmcXOh42CBD+A3MxI6zaQ= +k8s.io/kms v0.27.3/go.mod h1:VDfnSIK0dk5J+jasbe+kKpb3CQVwlcDeBLyq59P2KyY= k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f h1:2kWPakN3i/k81b0gvD5C5FJ2kxm1WrQFanWchyKuqGg= k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f/go.mod h1:byini6yhqGC14c3ebc/QwanvYwhuMWF6yz2F8uwW8eg= k8s.io/kubectl v0.27.2 h1:sSBM2j94MHBFRWfHIWtEXWCicViQzZsb177rNsKBhZg= @@ -858,8 +858,8 @@ rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2 h1:trsWhjU5jZrx6UvFu4WzQDrN7Pga4a7Qg+zcfcj64PA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2/go.mod h1:+qG7ISXqCDVVcyO8hLn12AKVYYUjM7ftlqsqmrhMZE0= -sigs.k8s.io/cloud-provider-azure v1.27.1-0.20230613061957-d0cd51201edc h1:VuFd2muy0t7XTFv+pYnb+Y27taqrkPr0dYshXHFeJr8= -sigs.k8s.io/cloud-provider-azure v1.27.1-0.20230613061957-d0cd51201edc/go.mod h1:2fuRslJpbPeTFvLqpsEKyO7+oJqM4i8Bv3Z/LF8v4YI= +sigs.k8s.io/cloud-provider-azure v1.27.1-0.20230630150243-d45a7951c84a h1:w46tbjUdarV//n510EavcAZAz1iUk+wjX0NzzVTsAU0= +sigs.k8s.io/cloud-provider-azure v1.27.1-0.20230630150243-d45a7951c84a/go.mod h1:loPr/85Nm8kXVIh4OZgkJvmKMr+CzrQhbcAqOD3mxfk= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= diff --git a/pkg/blob/blob.go b/pkg/blob/blob.go index bf89cb2fc..fe5c2fcf4 100644 --- a/pkg/blob/blob.go +++ b/pkg/blob/blob.go @@ -202,9 +202,9 @@ type Driver struct { // a map storing all volumes created by this driver volMap sync.Map // a timed cache storing all volumeIDs and storage accounts that are using data plane API - dataPlaneAPIVolCache *azcache.TimedCache + dataPlaneAPIVolCache azcache.Resource // a timed cache storing account search history (solve account list throttling issue) - accountSearchCache *azcache.TimedCache + accountSearchCache azcache.Resource } // NewDriver Creates a NewCSIDriver object. Assumes vendor version is equal to driver version & @@ -236,10 +236,10 @@ func NewDriver(options *DriverOptions) *Driver { var err error getter := func(key string) (interface{}, error) { return nil, nil } - if d.accountSearchCache, err = azcache.NewTimedcache(time.Minute, getter); err != nil { + if d.accountSearchCache, err = azcache.NewTimedCache(time.Minute, getter, false); err != nil { klog.Fatalf("%v", err) } - if d.dataPlaneAPIVolCache, err = azcache.NewTimedcache(10*time.Minute, getter); err != nil { + if d.dataPlaneAPIVolCache, err = azcache.NewTimedCache(10*time.Minute, getter, false); err != nil { klog.Fatalf("%v", err) } return &d diff --git a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md index f06d37740..cb72bd6f2 100644 --- a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md +++ b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md @@ -1,3 +1,21 @@ +## 2.11.0 + +In prior versions of Ginkgo specs the CLI filter flags (e.g. `--focus`, `--label-filter`) would _override_ any programmatic focus. This behavior has proved surprising and confusing in at least the following ways: + +- users cannot combine programmatic filters and CLI filters to more efficiently select subsets of tests +- CLI filters can override programmatic focus on CI systems resulting in an exit code of 0 despite the presence of (incorrectly!) committed focused specs. + +Going forward Ginkgo will AND all programmatic and CLI filters. Moreover, the presence of any programmatic focused tests will always result in a non-zero exit code. + +This change is technically a change in Ginkgo's external contract and may require some users to make changes to successfully adopt. Specifically: it's possible some users were intentionally using CLI filters to override programmatic focus. If this is you please open an issue so we can explore solutions to the underlying problem you are trying to solve. + +### Fixes +- Programmatic focus is no longer overwrriten by CLI filters [d6bba86] + +### Maintenance +- Bump github.com/onsi/gomega from 1.27.7 to 1.27.8 (#1218) [4a70a38] +- Bump golang.org/x/sys from 0.8.0 to 0.9.0 (#1219) [97eda4d] + ## 2.10.0 ### Features diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/focus.go b/vendor/github.com/onsi/ginkgo/v2/internal/focus.go index 966ea0c1a..e3da7d14d 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/focus.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/focus.go @@ -8,22 +8,22 @@ import ( ) /* - If a container marked as focus has a descendant that is also marked as focus, Ginkgo's policy is to - unmark the container's focus. This gives developers a more intuitive experience when debugging specs. - It is common to focus a container to just run a subset of specs, then identify the specific specs within the container to focus - - this policy allows the developer to simply focus those specific specs and not need to go back and turn the focus off of the container: - - As a common example, consider: - - FDescribe("something to debug", function() { - It("works", function() {...}) - It("works", function() {...}) - FIt("doesn't work", function() {...}) - It("works", function() {...}) - }) - - here the developer's intent is to focus in on the `"doesn't work"` spec and not to run the adjacent specs in the focused `"something to debug"` container. - The nested policy applied by this function enables this behavior. +If a container marked as focus has a descendant that is also marked as focus, Ginkgo's policy is to +unmark the container's focus. This gives developers a more intuitive experience when debugging specs. +It is common to focus a container to just run a subset of specs, then identify the specific specs within the container to focus - +this policy allows the developer to simply focus those specific specs and not need to go back and turn the focus off of the container: + +As a common example, consider: + + FDescribe("something to debug", function() { + It("works", function() {...}) + It("works", function() {...}) + FIt("doesn't work", function() {...}) + It("works", function() {...}) + }) + +here the developer's intent is to focus in on the `"doesn't work"` spec and not to run the adjacent specs in the focused `"something to debug"` container. +The nested policy applied by this function enables this behavior. */ func ApplyNestedFocusPolicyToTree(tree *TreeNode) { var walkTree func(tree *TreeNode) bool @@ -44,46 +44,43 @@ func ApplyNestedFocusPolicyToTree(tree *TreeNode) { } /* - Ginkgo supports focussing specs using `FIt`, `FDescribe`, etc. - this is called "programmatic focus" - It also supports focussing specs using regular expressions on the command line (`-focus=`, `-skip=`) that match against spec text - and file filters (`-focus-files=`, `-skip-files=`) that match against code locations for nodes in specs. +Ginkgo supports focussing specs using `FIt`, `FDescribe`, etc. - this is called "programmatic focus" +It also supports focussing specs using regular expressions on the command line (`-focus=`, `-skip=`) that match against spec text and file filters (`-focus-files=`, `-skip-files=`) that match against code locations for nodes in specs. - If any of the CLI flags are provided they take precedence. The file filters run first followed by the regex filters. +When both programmatic and file filters are provided their results are ANDed together. If multiple kinds of filters are provided, the file filters run first followed by the regex filters. - This function sets the `Skip` property on specs by applying Ginkgo's focus policy: - - If there are no CLI arguments and no programmatic focus, do nothing. - - If there are no CLI arguments but a spec somewhere has programmatic focus, skip any specs that have no programmatic focus. - - If there are CLI arguments parse them and skip any specs that either don't match the focus filters or do match the skip filters. +This function sets the `Skip` property on specs by applying Ginkgo's focus policy: +- If there are no CLI arguments and no programmatic focus, do nothing. +- If a spec somewhere has programmatic focus skip any specs that have no programmatic focus. +- If there are CLI arguments parse them and skip any specs that either don't match the focus filters or do match the skip filters. - *Note:* specs with pending nodes are Skipped when created by NewSpec. +*Note:* specs with pending nodes are Skipped when created by NewSpec. */ func ApplyFocusToSpecs(specs Specs, description string, suiteLabels Labels, suiteConfig types.SuiteConfig) (Specs, bool) { focusString := strings.Join(suiteConfig.FocusStrings, "|") skipString := strings.Join(suiteConfig.SkipStrings, "|") - hasFocusCLIFlags := focusString != "" || skipString != "" || len(suiteConfig.SkipFiles) > 0 || len(suiteConfig.FocusFiles) > 0 || suiteConfig.LabelFilter != "" - type SkipCheck func(spec Spec) bool // by default, skip any specs marked pending skipChecks := []SkipCheck{func(spec Spec) bool { return spec.Nodes.HasNodeMarkedPending() }} hasProgrammaticFocus := false - if !hasFocusCLIFlags { - // check for programmatic focus - for _, spec := range specs { - if spec.Nodes.HasNodeMarkedFocus() && !spec.Nodes.HasNodeMarkedPending() { - skipChecks = append(skipChecks, func(spec Spec) bool { return !spec.Nodes.HasNodeMarkedFocus() }) - hasProgrammaticFocus = true - break - } + for _, spec := range specs { + if spec.Nodes.HasNodeMarkedFocus() && !spec.Nodes.HasNodeMarkedPending() { + hasProgrammaticFocus = true + break } } + if hasProgrammaticFocus { + skipChecks = append(skipChecks, func(spec Spec) bool { return !spec.Nodes.HasNodeMarkedFocus() }) + } + if suiteConfig.LabelFilter != "" { labelFilter, _ := types.ParseLabelFilter(suiteConfig.LabelFilter) - skipChecks = append(skipChecks, func(spec Spec) bool { - return !labelFilter(UnionOfLabels(suiteLabels, spec.Nodes.UnionOfLabels())) + skipChecks = append(skipChecks, func(spec Spec) bool { + return !labelFilter(UnionOfLabels(suiteLabels, spec.Nodes.UnionOfLabels())) }) } diff --git a/vendor/github.com/onsi/ginkgo/v2/types/version.go b/vendor/github.com/onsi/ginkgo/v2/types/version.go index b7ed8ff79..f895739b8 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/version.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/version.go @@ -1,3 +1,3 @@ package types -const VERSION = "2.10.0" +const VERSION = "2.11.0" diff --git a/vendor/golang.org/x/crypto/ssh/common.go b/vendor/golang.org/x/crypto/ssh/common.go index e6a77f26a..dc6f301de 100644 --- a/vendor/golang.org/x/crypto/ssh/common.go +++ b/vendor/golang.org/x/crypto/ssh/common.go @@ -85,7 +85,7 @@ var supportedHostKeyAlgos = []string{ // This is based on RFC 4253, section 6.4, but with hmac-md5 variants removed // because they have reached the end of their useful life. var supportedMACs = []string{ - "hmac-sha2-256-etm@openssh.com", "hmac-sha2-256", "hmac-sha1", "hmac-sha1-96", + "hmac-sha2-512-etm@openssh.com", "hmac-sha2-256-etm@openssh.com", "hmac-sha2-256", "hmac-sha1", "hmac-sha1-96", } var supportedCompressions = []string{compressionNone} diff --git a/vendor/golang.org/x/crypto/ssh/mac.go b/vendor/golang.org/x/crypto/ssh/mac.go index c07a06285..0a21af47e 100644 --- a/vendor/golang.org/x/crypto/ssh/mac.go +++ b/vendor/golang.org/x/crypto/ssh/mac.go @@ -10,6 +10,7 @@ import ( "crypto/hmac" "crypto/sha1" "crypto/sha256" + "crypto/sha512" "hash" ) @@ -46,6 +47,9 @@ func (t truncatingMAC) Size() int { func (t truncatingMAC) BlockSize() int { return t.hmac.BlockSize() } var macModes = map[string]*macMode{ + "hmac-sha2-512-etm@openssh.com": {64, true, func(key []byte) hash.Hash { + return hmac.New(sha512.New, key) + }}, "hmac-sha2-256-etm@openssh.com": {32, true, func(key []byte) hash.Hash { return hmac.New(sha256.New, key) }}, diff --git a/vendor/modules.txt b/vendor/modules.txt index e5796bdff..3e9431a79 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -314,7 +314,7 @@ github.com/modern-go/reflect2 # github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 ## explicit github.com/munnerz/goautoneg -# github.com/onsi/ginkgo/v2 v2.10.0 +# github.com/onsi/ginkgo/v2 v2.11.0 ## explicit; go 1.18 github.com/onsi/ginkgo/v2 github.com/onsi/ginkgo/v2/config @@ -506,7 +506,7 @@ go.uber.org/zap/internal/color go.uber.org/zap/internal/exit go.uber.org/zap/zapcore go.uber.org/zap/zapgrpc -# golang.org/x/crypto v0.9.0 +# golang.org/x/crypto v0.10.0 ## explicit; go 1.17 golang.org/x/crypto/blowfish golang.org/x/crypto/chacha20 @@ -543,8 +543,8 @@ golang.org/x/net/websocket ## explicit; go 1.11 golang.org/x/oauth2 golang.org/x/oauth2/internal -# golang.org/x/sync v0.2.0 -## explicit +# golang.org/x/sync v0.3.0 +## explicit; go 1.17 golang.org/x/sync/singleflight # golang.org/x/sys v0.9.0 ## explicit; go 1.17 @@ -554,7 +554,7 @@ golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows golang.org/x/sys/windows/registry -# golang.org/x/term v0.8.0 +# golang.org/x/term v0.9.0 ## explicit; go 1.17 golang.org/x/term # golang.org/x/text v0.10.0 @@ -707,7 +707,7 @@ gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 -# k8s.io/api v0.27.2 => k8s.io/api v0.27.2 +# k8s.io/api v0.27.3 => k8s.io/api v0.27.2 ## explicit; go 1.20 k8s.io/api/admission/v1 k8s.io/api/admission/v1beta1 @@ -767,7 +767,7 @@ k8s.io/api/storage/v1beta1 ## explicit; go 1.20 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 -# k8s.io/apimachinery v0.27.2 => k8s.io/apimachinery v0.27.2 +# k8s.io/apimachinery v0.27.3 => k8s.io/apimachinery v0.27.2 ## explicit; go 1.20 k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors @@ -827,7 +827,7 @@ k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/netutil k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/apiserver v0.27.2 => k8s.io/apiserver v0.27.2 +# k8s.io/apiserver v0.27.3 => k8s.io/apiserver v0.27.2 ## explicit; go 1.20 k8s.io/apiserver/pkg/admission k8s.io/apiserver/pkg/admission/cel @@ -969,7 +969,7 @@ k8s.io/apiserver/plugin/pkg/audit/truncate k8s.io/apiserver/plugin/pkg/audit/webhook k8s.io/apiserver/plugin/pkg/authenticator/token/webhook k8s.io/apiserver/plugin/pkg/authorizer/webhook -# k8s.io/client-go v0.27.2 => k8s.io/client-go v0.27.2 +# k8s.io/client-go v0.27.3 => k8s.io/client-go v0.27.2 ## explicit; go 1.20 k8s.io/client-go/applyconfigurations/admissionregistration/v1 k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1 @@ -1290,7 +1290,7 @@ k8s.io/client-go/util/homedir k8s.io/client-go/util/keyutil k8s.io/client-go/util/retry k8s.io/client-go/util/workqueue -# k8s.io/cloud-provider v0.27.2 => k8s.io/cloud-provider v0.27.2 +# k8s.io/cloud-provider v0.27.3 => k8s.io/cloud-provider v0.27.2 ## explicit; go 1.20 k8s.io/cloud-provider k8s.io/cloud-provider/api @@ -1308,7 +1308,7 @@ k8s.io/cloud-provider/service/helpers k8s.io/cloud-provider/volume k8s.io/cloud-provider/volume/errors k8s.io/cloud-provider/volume/helpers -# k8s.io/component-base v0.27.2 => k8s.io/component-base v0.27.2 +# k8s.io/component-base v0.27.3 => k8s.io/component-base v0.27.2 ## explicit; go 1.20 k8s.io/component-base/cli/flag k8s.io/component-base/config @@ -1330,14 +1330,14 @@ k8s.io/component-base/metrics/testutil k8s.io/component-base/tracing k8s.io/component-base/tracing/api/v1 k8s.io/component-base/version -# k8s.io/component-helpers v0.27.2 => k8s.io/component-helpers v0.27.2 +# k8s.io/component-helpers v0.27.3 => k8s.io/component-helpers v0.27.2 ## explicit; go 1.20 k8s.io/component-helpers/node/util k8s.io/component-helpers/node/util/sysctl k8s.io/component-helpers/scheduling/corev1 k8s.io/component-helpers/scheduling/corev1/nodeaffinity k8s.io/component-helpers/storage/volume -# k8s.io/controller-manager v0.27.2 => k8s.io/controller-manager v0.27.2 +# k8s.io/controller-manager v0.27.3 => k8s.io/controller-manager v0.27.2 ## explicit; go 1.20 k8s.io/controller-manager/config k8s.io/controller-manager/config/v1 @@ -1357,7 +1357,7 @@ k8s.io/klog/v2/internal/clock k8s.io/klog/v2/internal/dbg k8s.io/klog/v2/internal/serialize k8s.io/klog/v2/internal/severity -# k8s.io/kms v0.27.2 +# k8s.io/kms v0.27.3 ## explicit; go 1.20 k8s.io/kms/apis/v1beta1 k8s.io/kms/apis/v2 @@ -1389,7 +1389,7 @@ k8s.io/kube-openapi/pkg/validation/strfmt/bson ## explicit; go 1.20 k8s.io/kubectl/pkg/scale k8s.io/kubectl/pkg/util/podutils -# k8s.io/kubelet v0.27.2 => k8s.io/kubelet v0.27.2 +# k8s.io/kubelet v0.27.3 => k8s.io/kubelet v0.27.2 ## explicit; go 1.20 k8s.io/kubelet/pkg/apis k8s.io/kubelet/pkg/apis/stats/v1alpha1 @@ -1496,7 +1496,7 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/metrics sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/common/metrics sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client -# sigs.k8s.io/cloud-provider-azure v1.27.1-0.20230613061957-d0cd51201edc +# sigs.k8s.io/cloud-provider-azure v1.27.1-0.20230630150243-d45a7951c84a ## explicit; go 1.20 sigs.k8s.io/cloud-provider-azure/pkg/azureclients sigs.k8s.io/cloud-provider-azure/pkg/azureclients/armclient diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/cache/azure_cache.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/cache/azure_cache.go index 3e061ed32..c60eaa036 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/cache/azure_cache.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/cache/azure_cache.go @@ -62,28 +62,56 @@ func cacheKeyFunc(obj interface{}) (string, error) { return obj.(*AzureCacheEntry).Key, nil } +// Resource operations +type Resource interface { + Get(key string, crt AzureCacheReadType) (interface{}, error) + GetWithDeepCopy(key string, crt AzureCacheReadType) (interface{}, error) + Delete(key string) error + Set(key string, data interface{}) + Update(key string, data interface{}) + + GetStore() cache.Store + Lock() + Unlock() +} + // TimedCache is a cache with TTL. type TimedCache struct { - Store cache.Store - Lock sync.Mutex + Store cache.Store + MutexLock sync.RWMutex + TTL time.Duration + + resourceProvider Resource +} + +type ResourceProvider struct { Getter GetFunc - TTL time.Duration } -// NewTimedcache creates a new TimedCache. -func NewTimedcache(ttl time.Duration, getter GetFunc) (*TimedCache, error) { +// NewTimedCache creates a new azcache.Resource. +func NewTimedCache(ttl time.Duration, getter GetFunc, disabled bool) (Resource, error) { if getter == nil { return nil, fmt.Errorf("getter is not provided") } - return &TimedCache{ + provider := &ResourceProvider{ Getter: getter, + } + + if disabled { + return provider, nil + } + + timedCache := &TimedCache{ // switch to using NewStore instead of NewTTLStore so that we can // reuse entries for calls that are fine with reading expired/stalled data. // with NewTTLStore, entries are not returned if they have already expired. - Store: cache.NewStore(cacheKeyFunc), - TTL: ttl, - }, nil + Store: cache.NewStore(cacheKeyFunc), + MutexLock: sync.RWMutex{}, + TTL: ttl, + resourceProvider: provider, + } + return timedCache, nil } // getInternal returns AzureCacheEntry by key. If the key is not cached yet, @@ -100,8 +128,8 @@ func (t *TimedCache) getInternal(key string) (*AzureCacheEntry, error) { // lock here to ensure if entry doesn't exist, we add a new entry // avoiding overwrites - t.Lock.Lock() - defer t.Lock.Unlock() + t.Lock() + defer t.Unlock() // Another goroutine might have written the same key. entry, exists, err = t.Store.GetByKey(key) @@ -127,6 +155,10 @@ func (t *TimedCache) Get(key string, crt AzureCacheReadType) (interface{}, error return t.get(key, crt) } +func (c *ResourceProvider) Get(key string, _ AzureCacheReadType) (interface{}, error) { + return c.Getter(key) +} + // Get returns the requested item by key with deep copy. func (t *TimedCache) GetWithDeepCopy(key string, crt AzureCacheReadType) (interface{}, error) { data, err := t.get(key, crt) @@ -134,6 +166,10 @@ func (t *TimedCache) GetWithDeepCopy(key string, crt AzureCacheReadType) (interf return copied, err } +func (c *ResourceProvider) GetWithDeepCopy(key string, _ AzureCacheReadType) (interface{}, error) { + return c.Getter(key) +} + func (t *TimedCache) get(key string, crt AzureCacheReadType) (interface{}, error) { entry, err := t.getInternal(key) if err != nil { @@ -157,7 +193,7 @@ func (t *TimedCache) get(key string, crt AzureCacheReadType) (interface{}, error // Data is not cached yet, cache data is expired or requested force refresh // cache it by getter. entry is locked before getting to ensure concurrent // gets don't result in multiple ARM calls. - data, err := t.Getter(key) + data, err := t.resourceProvider.Get(key, CacheReadTypeDefault /* not matter */) if err != nil { return nil, err } @@ -177,6 +213,10 @@ func (t *TimedCache) Delete(key string) error { }) } +func (c *ResourceProvider) Delete(_ string) error { + return nil +} + // Set sets the data cache for the key. // It is only used for testing. func (t *TimedCache) Set(key string, data interface{}) { @@ -187,6 +227,8 @@ func (t *TimedCache) Set(key string, data interface{}) { }) } +func (c *ResourceProvider) Set(_ string, _ interface{}) {} + // Update updates the data cache for the key. func (t *TimedCache) Update(key string, data interface{}) { if entry, err := t.getInternal(key); err == nil { @@ -202,3 +244,25 @@ func (t *TimedCache) Update(key string, data interface{}) { }) } } + +func (c *ResourceProvider) Update(_ string, _ interface{}) {} + +func (t *TimedCache) GetStore() cache.Store { + return t.Store +} + +func (c *ResourceProvider) GetStore() cache.Store { + return nil +} + +func (t *TimedCache) Lock() { + t.MutexLock.Lock() +} + +func (t *TimedCache) Unlock() { + t.MutexLock.Unlock() +} + +func (c *ResourceProvider) Lock() {} + +func (c *ResourceProvider) Unlock() {} diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/consts/consts.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/consts/consts.go index 286a950ff..0cbbbb819 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/consts/consts.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/consts/consts.go @@ -194,6 +194,12 @@ const ( BackoffJitterDefault = 1.0 ) +// IP family variables +const ( + IPVersionIPv6 bool = true + IPVersionIPv4 bool = false +) + // LB variables for dual-stack var ( // Service.Spec.LoadBalancerIP has been deprecated and may be removed in a future release. Those two annotations are introduced as alternatives to set IPv4/IPv6 LoadBalancer IPs. @@ -241,7 +247,7 @@ const ( // ServiceAnnotationLoadBalancerMode is the annotation used on the service to specify // which load balancer should be associated with the service. This is valid when using the basic - // load balancer or turn on the multiple standard load balancers mode, or it would be ignored. + // sku load balancer, or it would be ignored. // 1. Default mode - service has no annotation ("service.beta.kubernetes.io/azure-load-balancer-mode") // In this case the Loadbalancer of the primary VMSS/VMAS is selected. // 2. "__auto__" mode - service is annotated with __auto__ value, this when loadbalancer from any VMSS/VMAS @@ -315,11 +321,15 @@ const ( // If omitted, the default value is false ServiceAnnotationDisableLoadBalancerFloatingIP = "service.beta.kubernetes.io/azure-disable-load-balancer-floating-ip" - // ServiceAnnotationAzurePIPTags sets the additional Public IPs (split by comma) besides the service's Public IP configured on LoadBalancer. + // ServiceAnnotationAdditionalPublicIPs sets the additional Public IPs (split by comma) besides the service's Public IP configured on LoadBalancer. // These additional Public IPs would be consumed by kube-proxy to configure the iptables rules on each node. Note they would not be configured // automatically on Azure LoadBalancer. Instead, they need to be configured manually (e.g. on Azure cross-region LoadBalancer by another operator). ServiceAnnotationAdditionalPublicIPs = "service.beta.kubernetes.io/azure-additional-public-ips" + // ServiceAnnotationLoadBalancerConfigurations is the list of load balancer configurations the service can use. + // The list is separated by comma. It will be omitted if multi-slb is not used. + ServiceAnnotationLoadBalancerConfigurations = "service.beta.kubernetes.io/azure-load-balancer-configurations" + // ServiceTagKey is the service key applied for public IP tags. ServiceTagKey = "k8s-azure-service" LegacyServiceTagKey = "service" diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/consts/helpers.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/consts/helpers.go index ec42a5bd8..9706b55aa 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/consts/helpers.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/consts/helpers.go @@ -23,7 +23,6 @@ import ( "strings" v1 "k8s.io/api/core/v1" - "k8s.io/utils/net" ) // IsK8sServiceHasHAModeEnabled return if HA Mode is enabled in kubernetes service annotations @@ -36,10 +35,6 @@ func IsK8sServiceUsingInternalLoadBalancer(service *v1.Service) bool { return expectAttributeInSvcAnnotationBeEqualTo(service.Annotations, ServiceAnnotationLoadBalancerInternal, TrueAnnotationValue) } -func IsK8sServiceInternalIPv6(service *v1.Service) bool { - return IsK8sServiceUsingInternalLoadBalancer(service) && net.IsIPv6String(service.Spec.ClusterIP) -} - // IsK8sServiceDisableLoadBalancerFloatingIP return if floating IP in load balancer is disabled in kubernetes service annotations func IsK8sServiceDisableLoadBalancerFloatingIP(service *v1.Service) bool { return expectAttributeInSvcAnnotationBeEqualTo(service.Annotations, ServiceAnnotationDisableLoadBalancerFloatingIP, TrueAnnotationValue) @@ -142,3 +137,17 @@ func expectAttributeInSvcAnnotationBeEqualTo(annotations map[string]string, key } return false } + +// getLoadBalancerConfigurationsNames parse the annotation and return the names of the load balancer configurations. +func GetLoadBalancerConfigurationsNames(service *v1.Service) []string { + var names []string + for key, lbConfig := range service.Annotations { + if strings.EqualFold(key, ServiceAnnotationLoadBalancerConfigurations) { + names = append(names, strings.Split(lbConfig, ",")...) + } + } + for i := range names { + names[i] = strings.ToLower(strings.TrimSpace(names[i])) + } + return names +} diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure.go index a572ad06d..72e3d047c 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure.go @@ -34,6 +34,7 @@ import ( "github.com/Azure/go-autorest/autorest/azure" v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/informers" @@ -255,6 +256,60 @@ type Config struct { // If the API is not used, the migration will be done by decoupling all nodes on the backend pool and then re-attaching // node IPs, which will introduce service downtime. The downtime increases with the number of nodes in the backend pool. EnableMigrateToIPBasedBackendPoolAPI bool `json:"enableMigrateToIPBasedBackendPoolAPI" yaml:"enableMigrateToIPBasedBackendPoolAPI"` + + // MultipleStandardLoadBalancerConfigurations stores the properties regarding multiple standard load balancers. + // It will be ignored if LoadBalancerBackendPoolConfigurationType is nodeIPConfiguration. + // If the length is not 0, it is assumed the multiple standard load balancers mode is on. In this case, + // there must be one configuration named “” or an error will be reported. + MultipleStandardLoadBalancerConfigurations []MultipleStandardLoadBalancerConfiguration `json:"multipleStandardLoadBalancerConfigurations,omitempty" yaml:"multipleStandardLoadBalancerConfigurations,omitempty"` + + // DisableAPICallCache disables the cache for Azure API calls. It is for ARG support and not all resources will be disabled. + DisableAPICallCache bool `json:"disableAPICallCache,omitempty" yaml:"disableAPICallCache,omitempty"` +} + +// MultipleStandardLoadBalancerConfiguration stores the properties regarding multiple standard load balancers. +type MultipleStandardLoadBalancerConfiguration struct { + // Name of the public load balancer. There will be an internal load balancer + // created if needed, and the name will be `-internal`. The internal lb + // shares the same configurations as the external one. The internal lbs + // are not needed to be included in `MultipleStandardLoadBalancerConfigurations`. + // There must be a name of “” in the load balancer configuration list. + Name string `json:"name" yaml:"name"` + + MultipleStandardLoadBalancerConfigurationSpec + + MultipleStandardLoadBalancerConfigurationStatus +} + +// MultipleStandardLoadBalancerConfigurationSpec stores the properties regarding multiple standard load balancers. +type MultipleStandardLoadBalancerConfigurationSpec struct { + // This load balancer can have services placed on it. Defaults to true, + // can be set to false to drain and eventually remove a load balancer. + // This only affects services that will be using the LB. For services + // that is currently using the LB, they will not be affected. + AllowServicePlacement *bool `json:"allowServicePlacement" yaml:"allowServicePlacement"` + + // A string value that must specify the name of an existing vmSet. + // All nodes in the given vmSet will always be added to this load balancer. + // A vmSet can only be the primary vmSet for a single load balancer. + PrimaryVMSet string `json:"primaryVMSet" yaml:"primaryVMSet"` + + // Services that must match this selector can be placed on this load balancer. If not supplied, + // services with any labels can be created on the load balancer. + ServiceLabelSelector *metav1.LabelSelector `json:"serviceLabelSelector" yaml:"serviceLabelSelector"` + + // Services created in namespaces with the supplied label will be allowed to select that load balancer. + // If not supplied, services created in any namespaces can be created on that load balancer. + ServiceNamespaceSelector *metav1.LabelSelector `json:"serviceNamespaceSelector" yaml:"serviceNamespaceSelector"` + + // Nodes matching this selector will be preferentially added to the load balancers that + // they match selectors for. NodeSelector does not override primaryAgentPool for node allocation. + NodeSelector *metav1.LabelSelector `json:"nodeSelector" yaml:"nodeSelector"` +} + +// MultipleStandardLoadBalancerConfigurationStatus stores the properties regarding multiple standard load balancers. +type MultipleStandardLoadBalancerConfigurationStatus struct { + ActiveServices sets.Set[string] `json:"activeServices" yaml:"activeServices"` } type InitSecretConfig struct { @@ -347,16 +402,16 @@ type Cloud struct { eventRecorder record.EventRecorder routeUpdater *delayedRouteUpdater - vmCache *azcache.TimedCache - lbCache *azcache.TimedCache - nsgCache *azcache.TimedCache - rtCache *azcache.TimedCache + vmCache azcache.Resource + lbCache azcache.Resource + nsgCache azcache.Resource + rtCache azcache.Resource // public ip cache // key: [resourceGroupName] // Value: sync.Map of [pipName]*PublicIPAddress - pipCache *azcache.TimedCache + pipCache azcache.Resource // use LB frontEndIpConfiguration ID as the key and search for PLS attached to the frontEnd - plsCache *azcache.TimedCache + plsCache azcache.Resource // Add service lister to always get latest service serviceLister corelisters.ServiceLister @@ -365,6 +420,8 @@ type Cloud struct { *ManagedDiskController *controllerCommon + + multipleStandardLoadBalancerConfigurationsSynced bool } // NewCloud returns a Cloud with initialized clients @@ -538,6 +595,12 @@ func (az *Cloud) InitializeCloudFromConfig(ctx context.Context, config *Config, } } + if az.useMultipleStandardLoadBalancers() { + if err := az.checkEnableMultipleStandardLoadBalancers(); err != nil { + return err + } + } + env, err := ratelimitconfig.ParseAzureEnvironment(config.Cloud, config.ResourceManagerEndpoint, config.IdentitySystem) if err != nil { return err @@ -653,6 +716,22 @@ func (az *Cloud) InitializeCloudFromConfig(ctx context.Context, config *Config, return nil } +func (az *Cloud) useMultipleStandardLoadBalancers() bool { + return az.useStandardLoadBalancer() && len(az.MultipleStandardLoadBalancerConfigurations) > 0 +} + +func (az *Cloud) useSingleStandardLoadBalancer() bool { + return az.useStandardLoadBalancer() && len(az.MultipleStandardLoadBalancerConfigurations) == 0 +} + +// Multiple standard load balancer mode only supports IP-based load balancers. +func (az *Cloud) checkEnableMultipleStandardLoadBalancers() error { + if az.isLBBackendPoolTypeNodeIPConfig() { + return fmt.Errorf("multiple standard load balancers cannot be used with backend pool type %s", consts.LoadBalancerBackendPoolConfigurationTypeNodeIPConfiguration) + } + return nil +} + func (az *Cloud) isLBBackendPoolTypeNodeIPConfig() bool { return strings.EqualFold(az.LoadBalancerBackendPoolConfigurationType, consts.LoadBalancerBackendPoolConfigurationTypeNodeIPConfiguration) } @@ -666,6 +745,10 @@ func (az *Cloud) getPutVMSSVMBatchSize() int { } func (az *Cloud) initCaches() (err error) { + if az.Config.DisableAPICallCache { + klog.Infof("API call cache is disabled, ignore logs about cache operations") + } + az.vmCache, err = az.newVMCache() if err != nil { return err diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_backoff.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_backoff.go index c351c1054..29cca3ee1 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_backoff.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_backoff.go @@ -17,30 +17,10 @@ limitations under the License. package provider import ( - "context" - "encoding/json" - "errors" - "fmt" - "net/http" "regexp" - "strings" - "time" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2022-07-01/network" - - v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" - cloudprovider "k8s.io/cloud-provider" - "k8s.io/klog/v2" - "k8s.io/utils/pointer" - - azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache" - "sigs.k8s.io/cloud-provider-azure/pkg/consts" - "sigs.k8s.io/cloud-provider-azure/pkg/retry" ) var ( @@ -67,641 +47,3 @@ func (az *Cloud) Event(obj runtime.Object, eventType, reason, message string) { az.eventRecorder.Event(obj, eventType, reason, message) } } - -// GetVirtualMachineWithRetry invokes az.getVirtualMachine with exponential backoff retry -func (az *Cloud) GetVirtualMachineWithRetry(name types.NodeName, crt azcache.AzureCacheReadType) (compute.VirtualMachine, error) { - var machine compute.VirtualMachine - var retryErr error - err := wait.ExponentialBackoff(az.RequestBackoff(), func() (bool, error) { - machine, retryErr = az.getVirtualMachine(name, crt) - if errors.Is(retryErr, cloudprovider.InstanceNotFound) { - return true, cloudprovider.InstanceNotFound - } - if retryErr != nil { - klog.Errorf("GetVirtualMachineWithRetry(%s): backoff failure, will retry, err=%v", name, retryErr) - return false, nil - } - klog.V(2).Infof("GetVirtualMachineWithRetry(%s): backoff success", name) - return true, nil - }) - if errors.Is(err, wait.ErrWaitTimeout) { - err = retryErr - } - return machine, err -} - -// ListVirtualMachines invokes az.VirtualMachinesClient.List with exponential backoff retry -func (az *Cloud) ListVirtualMachines(resourceGroup string) ([]compute.VirtualMachine, error) { - ctx, cancel := getContextWithCancel() - defer cancel() - - allNodes, rerr := az.VirtualMachinesClient.List(ctx, resourceGroup) - if rerr != nil { - klog.Errorf("VirtualMachinesClient.List(%v) failure with err=%v", resourceGroup, rerr) - return nil, rerr.Error() - } - klog.V(6).Infof("VirtualMachinesClient.List(%v) success", resourceGroup) - return allNodes, nil -} - -// getPrivateIPsForMachine is wrapper for optional backoff getting private ips -// list of a node by name -func (az *Cloud) getPrivateIPsForMachine(nodeName types.NodeName) ([]string, error) { - return az.getPrivateIPsForMachineWithRetry(nodeName) -} - -func (az *Cloud) getPrivateIPsForMachineWithRetry(nodeName types.NodeName) ([]string, error) { - var privateIPs []string - err := wait.ExponentialBackoff(az.RequestBackoff(), func() (bool, error) { - var retryErr error - privateIPs, retryErr = az.VMSet.GetPrivateIPsByNodeName(string(nodeName)) - if retryErr != nil { - // won't retry since the instance doesn't exist on Azure. - if errors.Is(retryErr, cloudprovider.InstanceNotFound) { - return true, retryErr - } - klog.Errorf("GetPrivateIPsByNodeName(%s): backoff failure, will retry,err=%v", nodeName, retryErr) - return false, nil - } - klog.V(3).Infof("GetPrivateIPsByNodeName(%s): backoff success", nodeName) - return true, nil - }) - return privateIPs, err -} - -func (az *Cloud) getIPForMachine(nodeName types.NodeName) (string, string, error) { - return az.GetIPForMachineWithRetry(nodeName) -} - -// GetIPForMachineWithRetry invokes az.getIPForMachine with exponential backoff retry -func (az *Cloud) GetIPForMachineWithRetry(name types.NodeName) (string, string, error) { - var ip, publicIP string - err := wait.ExponentialBackoff(az.RequestBackoff(), func() (bool, error) { - var retryErr error - ip, publicIP, retryErr = az.VMSet.GetIPByNodeName(string(name)) - if retryErr != nil { - klog.Errorf("GetIPForMachineWithRetry(%s): backoff failure, will retry,err=%v", name, retryErr) - return false, nil - } - klog.V(3).Infof("GetIPForMachineWithRetry(%s): backoff success", name) - return true, nil - }) - return ip, publicIP, err -} - -// CreateOrUpdateSecurityGroup invokes az.SecurityGroupsClient.CreateOrUpdate with exponential backoff retry -func (az *Cloud) CreateOrUpdateSecurityGroup(sg network.SecurityGroup) error { - ctx, cancel := getContextWithCancel() - defer cancel() - - rerr := az.SecurityGroupsClient.CreateOrUpdate(ctx, az.SecurityGroupResourceGroup, *sg.Name, sg, pointer.StringDeref(sg.Etag, "")) - klog.V(10).Infof("SecurityGroupsClient.CreateOrUpdate(%s): end", *sg.Name) - if rerr == nil { - // Invalidate the cache right after updating - _ = az.nsgCache.Delete(*sg.Name) - return nil - } - - nsgJSON, _ := json.Marshal(sg) - klog.Warningf("CreateOrUpdateSecurityGroup(%s) failed: %v, NSG request: %s", pointer.StringDeref(sg.Name, ""), rerr.Error(), string(nsgJSON)) - - // Invalidate the cache because ETAG precondition mismatch. - if rerr.HTTPStatusCode == http.StatusPreconditionFailed { - klog.V(3).Infof("SecurityGroup cache for %s is cleanup because of http.StatusPreconditionFailed", *sg.Name) - _ = az.nsgCache.Delete(*sg.Name) - } - - // Invalidate the cache because another new operation has canceled the current request. - if strings.Contains(strings.ToLower(rerr.Error().Error()), consts.OperationCanceledErrorMessage) { - klog.V(3).Infof("SecurityGroup cache for %s is cleanup because CreateOrUpdateSecurityGroup is canceled by another operation", *sg.Name) - _ = az.nsgCache.Delete(*sg.Name) - } - - return rerr.Error() -} - -func cleanupSubnetInFrontendIPConfigurations(lb *network.LoadBalancer) network.LoadBalancer { - if lb.LoadBalancerPropertiesFormat == nil || lb.FrontendIPConfigurations == nil { - return *lb - } - - frontendIPConfigurations := *lb.FrontendIPConfigurations - for i := range frontendIPConfigurations { - config := frontendIPConfigurations[i] - if config.FrontendIPConfigurationPropertiesFormat != nil && - config.Subnet != nil && - config.Subnet.ID != nil { - subnet := network.Subnet{ - ID: config.Subnet.ID, - } - if config.Subnet.Name != nil { - subnet.Name = config.FrontendIPConfigurationPropertiesFormat.Subnet.Name - } - config.FrontendIPConfigurationPropertiesFormat.Subnet = &subnet - frontendIPConfigurations[i] = config - continue - } - } - - lb.FrontendIPConfigurations = &frontendIPConfigurations - return *lb -} - -// CreateOrUpdateLB invokes az.LoadBalancerClient.CreateOrUpdate with exponential backoff retry -func (az *Cloud) CreateOrUpdateLB(service *v1.Service, lb network.LoadBalancer) error { - ctx, cancel := getContextWithCancel() - defer cancel() - - lb = cleanupSubnetInFrontendIPConfigurations(&lb) - - rgName := az.getLoadBalancerResourceGroup() - rerr := az.LoadBalancerClient.CreateOrUpdate(ctx, rgName, pointer.StringDeref(lb.Name, ""), lb, pointer.StringDeref(lb.Etag, "")) - klog.V(10).Infof("LoadBalancerClient.CreateOrUpdate(%s): end", *lb.Name) - if rerr == nil { - // Invalidate the cache right after updating - _ = az.lbCache.Delete(*lb.Name) - return nil - } - - lbJSON, _ := json.Marshal(lb) - klog.Warningf("LoadBalancerClient.CreateOrUpdate(%s) failed: %v, LoadBalancer request: %s", pointer.StringDeref(lb.Name, ""), rerr.Error(), string(lbJSON)) - - // Invalidate the cache because ETAG precondition mismatch. - if rerr.HTTPStatusCode == http.StatusPreconditionFailed { - klog.V(3).Infof("LoadBalancer cache for %s is cleanup because of http.StatusPreconditionFailed", pointer.StringDeref(lb.Name, "")) - _ = az.lbCache.Delete(*lb.Name) - } - - retryErrorMessage := rerr.Error().Error() - // Invalidate the cache because another new operation has canceled the current request. - if strings.Contains(strings.ToLower(retryErrorMessage), consts.OperationCanceledErrorMessage) { - klog.V(3).Infof("LoadBalancer cache for %s is cleanup because CreateOrUpdate is canceled by another operation", pointer.StringDeref(lb.Name, "")) - _ = az.lbCache.Delete(*lb.Name) - } - - // The LB update may fail because the referenced PIP is not in the Succeeded provisioning state - if strings.Contains(strings.ToLower(retryErrorMessage), strings.ToLower(consts.ReferencedResourceNotProvisionedMessageCode)) { - matches := pipErrorMessageRE.FindStringSubmatch(retryErrorMessage) - if len(matches) != 3 { - klog.Errorf("Failed to parse the retry error message %s", retryErrorMessage) - return rerr.Error() - } - pipRG, pipName := matches[1], matches[2] - klog.V(3).Infof("The public IP %s referenced by load balancer %s is not in Succeeded provisioning state, will try to update it", pipName, pointer.StringDeref(lb.Name, "")) - pip, _, err := az.getPublicIPAddress(pipRG, pipName, azcache.CacheReadTypeDefault) - if err != nil { - klog.Errorf("Failed to get the public IP %s in resource group %s: %v", pipName, pipRG, err) - return rerr.Error() - } - // Perform a dummy update to fix the provisioning state - err = az.CreateOrUpdatePIP(service, pipRG, pip) - if err != nil { - klog.Errorf("Failed to update the public IP %s in resource group %s: %v", pipName, pipRG, err) - return rerr.Error() - } - // Invalidate the LB cache, return the error, and the controller manager - // would retry the LB update in the next reconcile loop - _ = az.lbCache.Delete(*lb.Name) - } - - return rerr.Error() -} - -func (az *Cloud) CreateOrUpdateLBBackendPool(lbName string, backendPool network.BackendAddressPool) error { - ctx, cancel := getContextWithCancel() - defer cancel() - - klog.V(4).Infof("CreateOrUpdateLBBackendPool: updating backend pool %s in LB %s", pointer.StringDeref(backendPool.Name, ""), lbName) - rerr := az.LoadBalancerClient.CreateOrUpdateBackendPools(ctx, az.getLoadBalancerResourceGroup(), lbName, pointer.StringDeref(backendPool.Name, ""), backendPool, pointer.StringDeref(backendPool.Etag, "")) - if rerr == nil { - // Invalidate the cache right after updating - _ = az.lbCache.Delete(lbName) - return nil - } - - // Invalidate the cache because ETAG precondition mismatch. - if rerr.HTTPStatusCode == http.StatusPreconditionFailed { - klog.V(3).Infof("LoadBalancer cache for %s is cleanup because of http.StatusPreconditionFailed", lbName) - _ = az.lbCache.Delete(lbName) - } - - retryErrorMessage := rerr.Error().Error() - // Invalidate the cache because another new operation has canceled the current request. - if strings.Contains(strings.ToLower(retryErrorMessage), consts.OperationCanceledErrorMessage) { - klog.V(3).Infof("LoadBalancer cache for %s is cleanup because CreateOrUpdate is canceled by another operation", lbName) - _ = az.lbCache.Delete(lbName) - } - - return rerr.Error() -} - -func (az *Cloud) DeleteLBBackendPool(lbName, backendPoolName string) error { - ctx, cancel := getContextWithCancel() - defer cancel() - - klog.V(4).Infof("DeleteLBBackendPool: deleting backend pool %s in LB %s", backendPoolName, lbName) - rerr := az.LoadBalancerClient.DeleteLBBackendPool(ctx, az.getLoadBalancerResourceGroup(), lbName, backendPoolName) - if rerr == nil { - // Invalidate the cache right after updating - _ = az.lbCache.Delete(lbName) - return nil - } - - // Invalidate the cache because ETAG precondition mismatch. - if rerr.HTTPStatusCode == http.StatusPreconditionFailed { - klog.V(3).Infof("LoadBalancer cache for %s is cleanup because of http.StatusPreconditionFailed", lbName) - _ = az.lbCache.Delete(lbName) - } - - retryErrorMessage := rerr.Error().Error() - // Invalidate the cache because another new operation has canceled the current request. - if strings.Contains(strings.ToLower(retryErrorMessage), consts.OperationCanceledErrorMessage) { - klog.V(3).Infof("LoadBalancer cache for %s is cleanup because CreateOrUpdate is canceled by another operation", lbName) - _ = az.lbCache.Delete(lbName) - } - - return rerr.Error() -} - -// ListManagedLBs invokes az.LoadBalancerClient.List and filter out -// those that are not managed by cloud provider azure or not associated to a managed VMSet. -func (az *Cloud) ListManagedLBs(service *v1.Service, nodes []*v1.Node, clusterName string) ([]network.LoadBalancer, error) { - allLBs, err := az.ListLB(service) - if err != nil { - return nil, err - } - - if allLBs == nil { - klog.Warningf("ListManagedLBs: no LBs found") - return nil, nil - } - - // return early if wantLb=false - if nodes == nil { - klog.V(4).Infof("ListManagedLBs: return all LBs in the resource group %s, including unmanaged LBs", az.getLoadBalancerResourceGroup()) - return allLBs, nil - } - - agentPoolLBs := make([]network.LoadBalancer, 0) - agentPoolVMSetNames, err := az.VMSet.GetAgentPoolVMSetNames(nodes) - if err != nil { - return nil, fmt.Errorf("ListManagedLBs: failed to get agent pool vmSet names: %w", err) - } - - agentPoolVMSetNamesSet := sets.New[string]() - if agentPoolVMSetNames != nil && len(*agentPoolVMSetNames) > 0 { - for _, vmSetName := range *agentPoolVMSetNames { - klog.V(6).Infof("ListManagedLBs: found agent pool vmSet name %s", vmSetName) - agentPoolVMSetNamesSet.Insert(strings.ToLower(vmSetName)) - } - } - - for _, lb := range allLBs { - vmSetNameFromLBName := az.mapLoadBalancerNameToVMSet(pointer.StringDeref(lb.Name, ""), clusterName) - if strings.EqualFold(strings.TrimSuffix(pointer.StringDeref(lb.Name, ""), consts.InternalLoadBalancerNameSuffix), clusterName) || - agentPoolVMSetNamesSet.Has(strings.ToLower(vmSetNameFromLBName)) { - agentPoolLBs = append(agentPoolLBs, lb) - klog.V(4).Infof("ListManagedLBs: found agent pool LB %s", pointer.StringDeref(lb.Name, "")) - } - } - - return agentPoolLBs, nil -} - -// ListLB invokes az.LoadBalancerClient.List with exponential backoff retry -func (az *Cloud) ListLB(service *v1.Service) ([]network.LoadBalancer, error) { - ctx, cancel := getContextWithCancel() - defer cancel() - - rgName := az.getLoadBalancerResourceGroup() - allLBs, rerr := az.LoadBalancerClient.List(ctx, rgName) - if rerr != nil { - if rerr.IsNotFound() { - return nil, nil - } - az.Event(service, v1.EventTypeWarning, "ListLoadBalancers", rerr.Error().Error()) - klog.Errorf("LoadBalancerClient.List(%v) failure with err=%v", rgName, rerr) - return nil, rerr.Error() - } - klog.V(2).Infof("LoadBalancerClient.List(%v) success", rgName) - return allLBs, nil -} - -// CreateOrUpdatePIP invokes az.PublicIPAddressesClient.CreateOrUpdate with exponential backoff retry -func (az *Cloud) CreateOrUpdatePIP(service *v1.Service, pipResourceGroup string, pip network.PublicIPAddress) error { - ctx, cancel := getContextWithCancel() - defer cancel() - - rerr := az.PublicIPAddressesClient.CreateOrUpdate(ctx, pipResourceGroup, pointer.StringDeref(pip.Name, ""), pip) - klog.V(10).Infof("PublicIPAddressesClient.CreateOrUpdate(%s, %s): end", pipResourceGroup, pointer.StringDeref(pip.Name, "")) - if rerr == nil { - // Invalidate the cache right after updating - _ = az.pipCache.Delete(pipResourceGroup) - return nil - } - - pipJSON, _ := json.Marshal(pip) - klog.Warningf("PublicIPAddressesClient.CreateOrUpdate(%s, %s) failed: %s, PublicIP request: %s", pipResourceGroup, pointer.StringDeref(pip.Name, ""), rerr.Error().Error(), string(pipJSON)) - az.Event(service, v1.EventTypeWarning, "CreateOrUpdatePublicIPAddress", rerr.Error().Error()) - - // Invalidate the cache because ETAG precondition mismatch. - if rerr.HTTPStatusCode == http.StatusPreconditionFailed { - klog.V(3).Infof("PublicIP cache for (%s, %s) is cleanup because of http.StatusPreconditionFailed", pipResourceGroup, pointer.StringDeref(pip.Name, "")) - _ = az.pipCache.Delete(pipResourceGroup) - } - - retryErrorMessage := rerr.Error().Error() - // Invalidate the cache because another new operation has canceled the current request. - if strings.Contains(strings.ToLower(retryErrorMessage), consts.OperationCanceledErrorMessage) { - klog.V(3).Infof("PublicIP cache for (%s, %s) is cleanup because CreateOrUpdate is canceled by another operation", pipResourceGroup, pointer.StringDeref(pip.Name, "")) - _ = az.pipCache.Delete(pipResourceGroup) - } - - return rerr.Error() -} - -// CreateOrUpdateInterface invokes az.InterfacesClient.CreateOrUpdate with exponential backoff retry -func (az *Cloud) CreateOrUpdateInterface(service *v1.Service, nic network.Interface) error { - ctx, cancel := getContextWithCancel() - defer cancel() - - rerr := az.InterfacesClient.CreateOrUpdate(ctx, az.ResourceGroup, *nic.Name, nic) - klog.V(10).Infof("InterfacesClient.CreateOrUpdate(%s): end", *nic.Name) - if rerr != nil { - klog.Errorf("InterfacesClient.CreateOrUpdate(%s) failed: %s", *nic.Name, rerr.Error().Error()) - az.Event(service, v1.EventTypeWarning, "CreateOrUpdateInterface", rerr.Error().Error()) - return rerr.Error() - } - - return nil -} - -// DeletePublicIP invokes az.PublicIPAddressesClient.Delete with exponential backoff retry -func (az *Cloud) DeletePublicIP(service *v1.Service, pipResourceGroup string, pipName string) error { - ctx, cancel := getContextWithCancel() - defer cancel() - - rerr := az.PublicIPAddressesClient.Delete(ctx, pipResourceGroup, pipName) - if rerr != nil { - klog.Errorf("PublicIPAddressesClient.Delete(%s) failed: %s", pipName, rerr.Error().Error()) - az.Event(service, v1.EventTypeWarning, "DeletePublicIPAddress", rerr.Error().Error()) - - if strings.Contains(rerr.Error().Error(), consts.CannotDeletePublicIPErrorMessageCode) { - klog.Warningf("DeletePublicIP for public IP %s failed with error %v, this is because other resources are referencing the public IP. The deletion of the service will continue.", pipName, rerr.Error()) - return nil - } - return rerr.Error() - } - - // Invalidate the cache right after deleting - _ = az.pipCache.Delete(pipResourceGroup) - return nil -} - -// DeleteLB invokes az.LoadBalancerClient.Delete with exponential backoff retry -func (az *Cloud) DeleteLB(service *v1.Service, lbName string) *retry.Error { - ctx, cancel := getContextWithCancel() - defer cancel() - - rgName := az.getLoadBalancerResourceGroup() - rerr := az.LoadBalancerClient.Delete(ctx, rgName, lbName) - if rerr == nil { - // Invalidate the cache right after updating - _ = az.lbCache.Delete(lbName) - return nil - } - - klog.Errorf("LoadBalancerClient.Delete(%s) failed: %s", lbName, rerr.Error().Error()) - az.Event(service, v1.EventTypeWarning, "DeleteLoadBalancer", rerr.Error().Error()) - return rerr -} - -// CreateOrUpdateRouteTable invokes az.RouteTablesClient.CreateOrUpdate with exponential backoff retry -func (az *Cloud) CreateOrUpdateRouteTable(routeTable network.RouteTable) error { - ctx, cancel := getContextWithCancel() - defer cancel() - - rerr := az.RouteTablesClient.CreateOrUpdate(ctx, az.RouteTableResourceGroup, az.RouteTableName, routeTable, pointer.StringDeref(routeTable.Etag, "")) - if rerr == nil { - // Invalidate the cache right after updating - _ = az.rtCache.Delete(*routeTable.Name) - return nil - } - - rtJSON, _ := json.Marshal(routeTable) - klog.Warningf("RouteTablesClient.CreateOrUpdate(%s) failed: %v, RouteTable request: %s", pointer.StringDeref(routeTable.Name, ""), rerr.Error(), string(rtJSON)) - - // Invalidate the cache because etag mismatch. - if rerr.HTTPStatusCode == http.StatusPreconditionFailed { - klog.V(3).Infof("Route table cache for %s is cleanup because of http.StatusPreconditionFailed", *routeTable.Name) - _ = az.rtCache.Delete(*routeTable.Name) - } - // Invalidate the cache because another new operation has canceled the current request. - if strings.Contains(strings.ToLower(rerr.Error().Error()), consts.OperationCanceledErrorMessage) { - klog.V(3).Infof("Route table cache for %s is cleanup because CreateOrUpdateRouteTable is canceled by another operation", *routeTable.Name) - _ = az.rtCache.Delete(*routeTable.Name) - } - klog.Errorf("RouteTablesClient.CreateOrUpdate(%s) failed: %v", az.RouteTableName, rerr.Error()) - return rerr.Error() -} - -// CreateOrUpdateRoute invokes az.RoutesClient.CreateOrUpdate with exponential backoff retry -func (az *Cloud) CreateOrUpdateRoute(route network.Route) error { - ctx, cancel := getContextWithCancel() - defer cancel() - - rerr := az.RoutesClient.CreateOrUpdate(ctx, az.RouteTableResourceGroup, az.RouteTableName, *route.Name, route, pointer.StringDeref(route.Etag, "")) - klog.V(10).Infof("RoutesClient.CreateOrUpdate(%s): end", *route.Name) - if rerr == nil { - _ = az.rtCache.Delete(az.RouteTableName) - return nil - } - - if rerr.HTTPStatusCode == http.StatusPreconditionFailed { - klog.V(3).Infof("Route cache for %s is cleanup because of http.StatusPreconditionFailed", *route.Name) - _ = az.rtCache.Delete(az.RouteTableName) - } - // Invalidate the cache because another new operation has canceled the current request. - if strings.Contains(strings.ToLower(rerr.Error().Error()), consts.OperationCanceledErrorMessage) { - klog.V(3).Infof("Route cache for %s is cleanup because CreateOrUpdateRouteTable is canceled by another operation", *route.Name) - _ = az.rtCache.Delete(az.RouteTableName) - } - return rerr.Error() -} - -// DeleteRouteWithName invokes az.RoutesClient.CreateOrUpdate with exponential backoff retry -func (az *Cloud) DeleteRouteWithName(routeName string) error { - ctx, cancel := getContextWithCancel() - defer cancel() - - rerr := az.RoutesClient.Delete(ctx, az.RouteTableResourceGroup, az.RouteTableName, routeName) - klog.V(10).Infof("RoutesClient.Delete(%s,%s): end", az.RouteTableName, routeName) - if rerr == nil { - return nil - } - - klog.Errorf("RoutesClient.Delete(%s, %s) failed: %v", az.RouteTableName, routeName, rerr.Error()) - return rerr.Error() -} - -// CreateOrUpdateVMSS invokes az.VirtualMachineScaleSetsClient.Update(). -func (az *Cloud) CreateOrUpdateVMSS(resourceGroupName string, VMScaleSetName string, parameters compute.VirtualMachineScaleSet) *retry.Error { - ctx, cancel := getContextWithCancel() - defer cancel() - - // When vmss is being deleted, CreateOrUpdate API would report "the vmss is being deleted" error. - // Since it is being deleted, we shouldn't send more CreateOrUpdate requests for it. - klog.V(3).Infof("CreateOrUpdateVMSS: verify the status of the vmss being created or updated") - vmss, rerr := az.VirtualMachineScaleSetsClient.Get(ctx, resourceGroupName, VMScaleSetName) - if rerr != nil { - klog.Errorf("CreateOrUpdateVMSS: error getting vmss(%s): %v", VMScaleSetName, rerr) - return rerr - } - if vmss.ProvisioningState != nil && strings.EqualFold(*vmss.ProvisioningState, consts.VirtualMachineScaleSetsDeallocating) { - klog.V(3).Infof("CreateOrUpdateVMSS: found vmss %s being deleted, skipping", VMScaleSetName) - return nil - } - - rerr = az.VirtualMachineScaleSetsClient.CreateOrUpdate(ctx, resourceGroupName, VMScaleSetName, parameters) - klog.V(10).Infof("UpdateVmssVMWithRetry: VirtualMachineScaleSetsClient.CreateOrUpdate(%s): end", VMScaleSetName) - if rerr != nil { - klog.Errorf("CreateOrUpdateVMSS: error CreateOrUpdate vmss(%s): %v", VMScaleSetName, rerr) - return rerr - } - - return nil -} - -func (az *Cloud) CreateOrUpdatePLS(service *v1.Service, pls network.PrivateLinkService) error { - ctx, cancel := getContextWithCancel() - defer cancel() - - rerr := az.PrivateLinkServiceClient.CreateOrUpdate(ctx, az.PrivateLinkServiceResourceGroup, pointer.StringDeref(pls.Name, ""), pls, pointer.StringDeref(pls.Etag, "")) - if rerr == nil { - // Invalidate the cache right after updating - _ = az.plsCache.Delete(pointer.StringDeref((*pls.LoadBalancerFrontendIPConfigurations)[0].ID, "")) - return nil - } - - rtJSON, _ := json.Marshal(pls) - klog.Warningf("PrivateLinkServiceClient.CreateOrUpdate(%s) failed: %v, PrivateLinkService request: %s", pointer.StringDeref(pls.Name, ""), rerr.Error(), string(rtJSON)) - - // Invalidate the cache because etag mismatch. - if rerr.HTTPStatusCode == http.StatusPreconditionFailed { - klog.V(3).Infof("Private link service cache for %s is cleanup because of http.StatusPreconditionFailed", pointer.StringDeref(pls.Name, "")) - _ = az.plsCache.Delete(pointer.StringDeref((*pls.LoadBalancerFrontendIPConfigurations)[0].ID, "")) - } - // Invalidate the cache because another new operation has canceled the current request. - if strings.Contains(strings.ToLower(rerr.Error().Error()), consts.OperationCanceledErrorMessage) { - klog.V(3).Infof("Private link service for %s is cleanup because CreateOrUpdatePrivateLinkService is canceled by another operation", pointer.StringDeref(pls.Name, "")) - _ = az.plsCache.Delete(pointer.StringDeref((*pls.LoadBalancerFrontendIPConfigurations)[0].ID, "")) - } - klog.Errorf("PrivateLinkServiceClient.CreateOrUpdate(%s) failed: %v", pointer.StringDeref(pls.Name, ""), rerr.Error()) - return rerr.Error() -} - -// DeletePLS invokes az.PrivateLinkServiceClient.Delete with exponential backoff retry -func (az *Cloud) DeletePLS(service *v1.Service, plsName string, plsLBFrontendID string) *retry.Error { - ctx, cancel := getContextWithCancel() - defer cancel() - - rerr := az.PrivateLinkServiceClient.Delete(ctx, az.PrivateLinkServiceResourceGroup, plsName) - if rerr == nil { - // Invalidate the cache right after deleting - _ = az.plsCache.Delete(plsLBFrontendID) - return nil - } - - klog.Errorf("PrivateLinkServiceClient.DeletePLS(%s) failed: %s", plsName, rerr.Error().Error()) - az.Event(service, v1.EventTypeWarning, "DeletePrivateLinkService", rerr.Error().Error()) - return rerr -} - -// DeletePEConn invokes az.PrivateLinkServiceClient.DeletePEConnection with exponential backoff retry -func (az *Cloud) DeletePEConn(service *v1.Service, plsName string, peConnName string) *retry.Error { - ctx, cancel := getContextWithCancel() - defer cancel() - - rerr := az.PrivateLinkServiceClient.DeletePEConnection(ctx, az.PrivateLinkServiceResourceGroup, plsName, peConnName) - if rerr == nil { - return nil - } - - klog.Errorf("PrivateLinkServiceClient.DeletePEConnection(%s-%s) failed: %s", plsName, peConnName, rerr.Error().Error()) - az.Event(service, v1.EventTypeWarning, "DeletePrivateEndpointConnection", rerr.Error().Error()) - return rerr -} - -// CreateOrUpdateSubnet invokes az.SubnetClient.CreateOrUpdate with exponential backoff retry -func (az *Cloud) CreateOrUpdateSubnet(service *v1.Service, subnet network.Subnet) error { - ctx, cancel := getContextWithCancel() - defer cancel() - - var rg string - if len(az.VnetResourceGroup) > 0 { - rg = az.VnetResourceGroup - } else { - rg = az.ResourceGroup - } - - rerr := az.SubnetsClient.CreateOrUpdate(ctx, rg, az.VnetName, *subnet.Name, subnet) - klog.V(10).Infof("SubnetClient.CreateOrUpdate(%s): end", *subnet.Name) - if rerr != nil { - klog.Errorf("SubnetClient.CreateOrUpdate(%s) failed: %s", *subnet.Name, rerr.Error().Error()) - az.Event(service, v1.EventTypeWarning, "CreateOrUpdateSubnet", rerr.Error().Error()) - return rerr.Error() - } - - return nil -} - -// MigrateToIPBasedBackendPoolAndWaitForCompletion use the migration API to migrate from -// NIC-based to IP-based LB backend pools. It also makes sure the number of IP addresses -// in the backend pools is expected. -func (az *Cloud) MigrateToIPBasedBackendPoolAndWaitForCompletion( - lbName string, backendPoolNames []string, nicsCountMap map[string]int, -) error { - if rerr := az.LoadBalancerClient.MigrateToIPBasedBackendPool(context.Background(), az.ResourceGroup, lbName, backendPoolNames); rerr != nil { - backendPoolNamesStr := strings.Join(backendPoolNames, ",") - klog.Errorf("MigrateToIPBasedBackendPoolAndWaitForCompletion: Failed to migrate to IP based backend pool for lb %s, backend pool %s: %s", lbName, backendPoolNamesStr, rerr.Error().Error()) - return rerr.Error() - } - - succeeded := make(map[string]bool) - for bpName := range nicsCountMap { - succeeded[bpName] = false - } - - err := wait.PollImmediate(5*time.Second, 10*time.Minute, func() (done bool, err error) { - for bpName, nicsCount := range nicsCountMap { - if succeeded[bpName] { - continue - } - - bp, rerr := az.LoadBalancerClient.GetLBBackendPool(context.Background(), az.ResourceGroup, lbName, bpName, "") - if rerr != nil { - klog.Errorf("MigrateToIPBasedBackendPoolAndWaitForCompletion: Failed to get backend pool %s for lb %s: %s", bpName, lbName, rerr.Error().Error()) - return false, rerr.Error() - } - - if countIPsOnBackendPool(bp) != nicsCount { - klog.V(4).Infof("MigrateToIPBasedBackendPoolAndWaitForCompletion: Expected IPs %s, current IPs %d, will retry in 5s", nicsCount, countIPsOnBackendPool(bp)) - return false, nil - } - succeeded[bpName] = true - } - return true, nil - }) - - if err != nil { - if errors.Is(err, wait.ErrWaitTimeout) { - klog.Warningf("MigrateToIPBasedBackendPoolAndWaitForCompletion: Timeout waiting for migration to IP based backend pool for lb %s, backend pool %s", lbName, strings.Join(backendPoolNames, ",")) - return nil - } - - klog.Errorf("MigrateToIPBasedBackendPoolAndWaitForCompletion: Failed to wait for migration to IP based backend pool for lb %s, backend pool %s: %s", lbName, strings.Join(backendPoolNames, ","), err.Error()) - return err - } - - return nil -} diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_common.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_common.go index 59705bac9..9702a8956 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_common.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_common.go @@ -727,7 +727,8 @@ func vmUpdateRequired(future *azure.Future, err error) bool { func getValidCreationData(subscriptionID, resourceGroup string, options *ManagedDiskOptions) (compute.CreationData, error) { if options.SourceResourceID == "" { return compute.CreationData{ - CreateOption: compute.Empty, + CreateOption: compute.Empty, + PerformancePlus: options.PerformancePlus, }, nil } @@ -744,7 +745,8 @@ func getValidCreationData(subscriptionID, resourceGroup string, options *Managed } default: return compute.CreationData{ - CreateOption: compute.Empty, + CreateOption: compute.Empty, + PerformancePlus: options.PerformancePlus, }, nil } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_instance_metadata.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_instance_metadata.go index 38505d943..c79fd6395 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_instance_metadata.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_instance_metadata.go @@ -102,7 +102,7 @@ type LoadBalancerMetadata struct { // InstanceMetadataService knows how to query the Azure instance metadata server. type InstanceMetadataService struct { imdsServer string - imsCache *azcache.TimedCache + imsCache azcache.Resource } // NewInstanceMetadataService creates an instance of the InstanceMetadataService accessor object. @@ -111,7 +111,7 @@ func NewInstanceMetadataService(imdsServer string) (*InstanceMetadataService, er imdsServer: imdsServer, } - imsCache, err := azcache.NewTimedcache(consts.MetadataCacheTTL, ims.getMetadata) + imsCache, err := azcache.NewTimedCache(consts.MetadataCacheTTL, ims.getMetadata, false) if err != nil { return nil, err } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_interface_repo.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_interface_repo.go new file mode 100644 index 000000000..0cfee7e96 --- /dev/null +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_interface_repo.go @@ -0,0 +1,39 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package provider + +import ( + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2022-07-01/network" + v1 "k8s.io/api/core/v1" + "k8s.io/klog/v2" +) + +// CreateOrUpdateInterface invokes az.InterfacesClient.CreateOrUpdate with exponential backoff retry +func (az *Cloud) CreateOrUpdateInterface(service *v1.Service, nic network.Interface) error { + ctx, cancel := getContextWithCancel() + defer cancel() + + rerr := az.InterfacesClient.CreateOrUpdate(ctx, az.ResourceGroup, *nic.Name, nic) + klog.V(10).Infof("InterfacesClient.CreateOrUpdate(%s): end", *nic.Name) + if rerr != nil { + klog.Errorf("InterfacesClient.CreateOrUpdate(%s) failed: %s", *nic.Name, rerr.Error().Error()) + az.Event(service, v1.EventTypeWarning, "CreateOrUpdateInterface", rerr.Error().Error()) + return rerr.Error() + } + + return nil +} diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer.go index 663fb0d45..d1cc4b3b7 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer.go @@ -33,6 +33,8 @@ import ( v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/sets" cloudprovider "k8s.io/cloud-provider" @@ -66,10 +68,10 @@ func (az *Cloud) existsPip(clusterName string, service *v1.Service) bool { return existingPip } - if v4Enabled && !existsPipSingleStack(false) { + if v4Enabled && !existsPipSingleStack(consts.IPVersionIPv4) { return false } - if v6Enabled && !existsPipSingleStack(true) { + if v6Enabled && !existsPipSingleStack(consts.IPVersionIPv6) { return false } return true @@ -83,13 +85,13 @@ func (az *Cloud) GetLoadBalancer(ctx context.Context, clusterName string, servic return nil, az.existsPip(clusterName, service), err } - _, status, existsLb, err := az.getServiceLoadBalancer(service, clusterName, nil, false, existingLBs) + _, status, _, existsLb, err := az.getServiceLoadBalancer(service, clusterName, nil, false, existingLBs) if err != nil || existsLb { return status, existsLb || az.existsPip(clusterName, service), err } flippedService := flipServiceInternalAnnotation(service) - _, status, existsLb, err = az.getServiceLoadBalancer(flippedService, clusterName, nil, false, existingLBs) + _, status, _, existsLb, err = az.getServiceLoadBalancer(flippedService, clusterName, nil, false, existingLBs) if err != nil || existsLb { return status, existsLb || az.existsPip(clusterName, service), err } @@ -124,7 +126,7 @@ func (az *Cloud) reconcileService(ctx context.Context, clusterName string, servi resourceBaseName := az.GetLoadBalancerName(context.TODO(), "", service) klog.V(2).Infof("reconcileService: Start reconciling Service %q with its resource basename %q", serviceName, resourceBaseName) - lbStatus, fipConfig, err := az.getServiceLoadBalancerStatus(service, lb) + lbStatus, lbIPsPrimaryPIPs, fipConfigs, err := az.getServiceLoadBalancerStatus(service, lb) if err != nil { klog.Errorf("getServiceLoadBalancerStatus(%s) failed: %v", serviceName, err) if !errors.Is(err, ErrorNotVmssInstance) { @@ -132,25 +134,21 @@ func (az *Cloud) reconcileService(ctx context.Context, clusterName string, servi } } - var serviceIP *string - if lbStatus != nil && len(lbStatus.Ingress) > 0 { - serviceIP = &lbStatus.Ingress[0].IP - } - - klog.V(2).Infof("reconcileService: reconciling security group for service %q with IP %q, wantLb = true", serviceName, logSafe(serviceIP)) - if _, err := az.reconcileSecurityGroup(clusterName, service, serviceIP, lb.Name, true /* wantLb */); err != nil { + serviceIPs := lbIPsPrimaryPIPs + klog.V(2).Infof("reconcileService: reconciling security group for service %q with IPs %q, wantLb = true", serviceName, serviceIPs) + if _, err := az.reconcileSecurityGroup(clusterName, service, &serviceIPs, lb.Name, true /* wantLb */); err != nil { klog.Errorf("reconcileSecurityGroup(%s) failed: %#v", serviceName, err) return nil, err } - if fipConfig != nil { + for _, fipConfig := range fipConfigs { if err := az.reconcilePrivateLinkService(clusterName, service, fipConfig, true /* wantPLS */); err != nil { klog.Errorf("reconcilePrivateLinkService(%s) failed: %#v", serviceName, err) return nil, err } } - updateService := updateServiceLoadBalancerIP(service, pointer.StringDeref(serviceIP, "")) + updateService := updateServiceLoadBalancerIPs(service, lbIPsPrimaryPIPs) flippedService := flipServiceInternalAnnotation(updateService) if _, err := az.reconcileLoadBalancer(clusterName, flippedService, nil, false /* wantLb */); err != nil { klog.Errorf("reconcileLoadBalancer(%s) failed: %#v", serviceName, err) @@ -278,13 +276,13 @@ func (az *Cloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName stri klog.V(5).InfoS("EnsureLoadBalancerDeleted Finish", "service", serviceName, "cluster", clusterName, "service_spec", service, "error", err) }() - serviceIPToCleanup, err := az.findServiceIPAddress(ctx, clusterName, service) + _, _, lbIPsPrimaryPIPs, _, err := az.getServiceLoadBalancer(service, clusterName, nil, false, []network.LoadBalancer{}) if err != nil && !retry.HasStatusForbiddenOrIgnoredError(err) { return err } - - klog.V(2).Infof("EnsureLoadBalancerDeleted: reconciling security group for service %q with IP %q, wantLb = false", serviceName, serviceIPToCleanup) - _, err = az.reconcileSecurityGroup(clusterName, service, &serviceIPToCleanup, nil, false /* wantLb */) + serviceIPsToCleanup := lbIPsPrimaryPIPs + klog.V(2).Infof("EnsureLoadBalancerDeleted: reconciling security group for service %q with IPs %q, wantLb = false", serviceName, serviceIPsToCleanup) + _, err = az.reconcileSecurityGroup(clusterName, service, &serviceIPsToCleanup, nil, false /* wantLb */) if err != nil { return err } @@ -300,8 +298,7 @@ func (az *Cloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName stri return err } - _, err = az.reconcilePublicIPs(clusterName, service, "", false /* wantLb */) - if err != nil { + if _, err = az.reconcilePublicIPs(clusterName, service, "", false /* wantLb */); err != nil { return err } @@ -327,7 +324,18 @@ func (az *Cloud) getLoadBalancerResourceGroup() string { // shouldChangeLoadBalancer determines if the load balancer of the service should be switched to another one // according to the mode annotation on the service. This could be happened when the LB selection mode of an // existing service is changed to another VMSS/VMAS. -func (az *Cloud) shouldChangeLoadBalancer(service *v1.Service, currLBName, clusterName string) bool { +func (az *Cloud) shouldChangeLoadBalancer(service *v1.Service, currLBName, clusterName, expectedLBName string) bool { + // if using the single standard load balancer, the current LB should be kept + if az.useSingleStandardLoadBalancer() { + return false + } + + if az.useMultipleStandardLoadBalancers() { + klog.V(2).Infof("shouldChangeLoadBalancer(%s, %s, %s): change the LB to another one %s", service.Name, currLBName, clusterName, expectedLBName) + return currLBName != expectedLBName + } + + // basic LB hasMode, isAuto, vmSetName := az.getServiceLoadBalancerMode(service) // if no mode is given or the mode is `__auto__`, the current LB should be kept @@ -335,11 +343,6 @@ func (az *Cloud) shouldChangeLoadBalancer(service *v1.Service, currLBName, clust return false } - // if using the single standard load balancer, the current LB should be kept - if az.useStandardLoadBalancer() { - return false - } - lbName := strings.TrimSuffix(currLBName, consts.InternalLoadBalancerNameSuffix) // change the LB from vmSet dedicated to primary if the vmSet becomes the primary one if strings.EqualFold(lbName, vmSetName) { @@ -360,15 +363,17 @@ func (az *Cloud) shouldChangeLoadBalancer(service *v1.Service, currLBName, clust return true } -func (az *Cloud) removeFrontendIPConfigurationFromLoadBalancer(lb *network.LoadBalancer, existingLBs []network.LoadBalancer, fip *network.FrontendIPConfiguration, clusterName string, service *v1.Service) error { +func (az *Cloud) removeFrontendIPConfigurationFromLoadBalancer(lb *network.LoadBalancer, existingLBs []network.LoadBalancer, fips []*network.FrontendIPConfiguration, clusterName string, service *v1.Service) error { if lb == nil || lb.LoadBalancerPropertiesFormat == nil || lb.FrontendIPConfigurations == nil { return nil } fipConfigs := *lb.FrontendIPConfigurations for i, fipConfig := range fipConfigs { - if strings.EqualFold(pointer.StringDeref(fipConfig.Name, ""), pointer.StringDeref(fip.Name, "")) { - fipConfigs = append(fipConfigs[:i], fipConfigs[i+1:]...) - break + for _, fip := range fips { + if strings.EqualFold(pointer.StringDeref(fipConfig.Name, ""), pointer.StringDeref(fip.Name, "")) { + fipConfigs = append(fipConfigs[:i], fipConfigs[i+1:]...) + break + } } } lb.FrontendIPConfigurations = &fipConfigs @@ -377,8 +382,10 @@ func (az *Cloud) removeFrontendIPConfigurationFromLoadBalancer(lb *network.LoadB if lb.LoadBalancingRules != nil { lbRules := *lb.LoadBalancingRules for i := len(lbRules) - 1; i >= 0; i-- { - if strings.Contains(pointer.StringDeref(lbRules[i].Name, ""), pointer.StringDeref(fip.Name, "")) { - lbRules = append(lbRules[:i], lbRules[i+1:]...) + for _, fip := range fips { + if strings.Contains(pointer.StringDeref(lbRules[i].Name, ""), pointer.StringDeref(fip.Name, "")) { + lbRules = append(lbRules[:i], lbRules[i+1:]...) + } } } lb.LoadBalancingRules = &lbRules @@ -386,32 +393,41 @@ func (az *Cloud) removeFrontendIPConfigurationFromLoadBalancer(lb *network.LoadB if lb.Probes != nil { lbProbes := *lb.Probes for i := len(lbProbes) - 1; i >= 0; i-- { - if strings.Contains(pointer.StringDeref(lbProbes[i].Name, ""), pointer.StringDeref(fip.Name, "")) { - lbProbes = append(lbProbes[:i], lbProbes[i+1:]...) + for _, fip := range fips { + if strings.Contains(pointer.StringDeref(lbProbes[i].Name, ""), pointer.StringDeref(fip.Name, "")) { + lbProbes = append(lbProbes[:i], lbProbes[i+1:]...) + } } } lb.Probes = &lbProbes } - // clean up any private link service associated with the frontEndIPConfig - err := az.reconcilePrivateLinkService(clusterName, service, fip, false /* wantPLS */) - if err != nil { - klog.Errorf("removeFrontendIPConfigurationFromLoadBalancer(%s, %s, %s, %s): failed to clean up PLS: %v", pointer.StringDeref(lb.Name, ""), pointer.StringDeref(fip.Name, ""), clusterName, service.Name, err) - return err + // PLS does not support IPv6 so there will not be additional API calls. + for _, fip := range fips { + // clean up any private link service associated with the frontEndIPConfig + if err := az.reconcilePrivateLinkService(clusterName, service, fip, false /* wantPLS */); err != nil { + klog.Errorf("removeFrontendIPConfigurationFromLoadBalancer(%s, %s, %s, %s): failed to clean up PLS: %v", pointer.StringDeref(lb.Name, ""), pointer.StringDeref(fip.Name, ""), clusterName, service.Name, err) + return err + } } + fipNames := []string{} + for _, fip := range fips { + fipNames = append(fipNames, pointer.StringDeref(fip.Name, "")) + } + logPrefix := fmt.Sprintf("removeFrontendIPConfigurationFromLoadBalancer(%s, %q, %s, %s)", pointer.StringDeref(lb.Name, ""), fipNames, clusterName, service.Name) if len(fipConfigs) == 0 { - klog.V(2).Infof("removeFrontendIPConfigurationFromLoadBalancer(%s, %s, %s, %s): deleting load balancer because there is no remaining frontend IP configurations", pointer.StringDeref(lb.Name, ""), pointer.StringDeref(fip.Name, ""), clusterName, service.Name) + klog.V(2).Infof("%s: deleting load balancer because there is no remaining frontend IP configurations", logPrefix) err := az.cleanOrphanedLoadBalancer(lb, existingLBs, service, clusterName) if err != nil { - klog.Errorf("removeFrontendIPConfigurationFromLoadBalancer(%s, %s, %s, %s): failed to cleanupOrphanedLoadBalancer: %v", pointer.StringDeref(lb.Name, ""), pointer.StringDeref(fip.Name, ""), clusterName, service.Name, err) + klog.Errorf("%s: failed to cleanupOrphanedLoadBalancer: %v", logPrefix, err) return err } } else { - klog.V(2).Infof("removeFrontendIPConfigurationFromLoadBalancer(%s, %s, %s, %s): updating the load balancer", pointer.StringDeref(lb.Name, ""), pointer.StringDeref(fip.Name, ""), clusterName, service.Name) + klog.V(2).Infof("%s: updating the load balancer", logPrefix) err := az.CreateOrUpdateLB(service, *lb) if err != nil { - klog.Errorf("removeFrontendIPConfigurationFromLoadBalancer(%s, %s, %s, %s): failed to CreateOrUpdateLB: %v", pointer.StringDeref(lb.Name, ""), pointer.StringDeref(fip.Name, ""), clusterName, service.Name, err) + klog.Errorf("%s: failed to CreateOrUpdateLB: %v", logPrefix, err) return err } _ = az.lbCache.Delete(pointer.StringDeref(lb.Name, "")) @@ -423,9 +439,15 @@ func (az *Cloud) cleanOrphanedLoadBalancer(lb *network.LoadBalancer, existingLBs lbName := pointer.StringDeref(lb.Name, "") serviceName := getServiceName(service) isBackendPoolPreConfigured := az.isBackendPoolPreConfigured(service) - isIPv6 := utilnet.IsIPv6String(service.Spec.ClusterIP) - lbBackendPoolName := getBackendPoolName(clusterName, isIPv6) - lbBackendPoolID := az.getBackendPoolID(lbName, lbBackendPoolName) + v4Enabled, v6Enabled := getIPFamiliesEnabled(service) + lbBackendPoolIDs := az.getBackendPoolIDs(clusterName, lbName) + lbBackendPoolIDsToDelete := []string{} + if v4Enabled { + lbBackendPoolIDsToDelete = append(lbBackendPoolIDsToDelete, lbBackendPoolIDs[consts.IPVersionIPv4]) + } + if v6Enabled { + lbBackendPoolIDsToDelete = append(lbBackendPoolIDsToDelete, lbBackendPoolIDs[consts.IPVersionIPv6]) + } if isBackendPoolPreConfigured { klog.V(2).Infof("cleanOrphanedLoadBalancer(%s, %s, %s): ignore cleanup of dirty lb because the lb is pre-configured", lbName, serviceName, clusterName) } else { @@ -452,8 +474,7 @@ func (az *Cloud) cleanOrphanedLoadBalancer(lb *network.LoadBalancer, existingLBs lb.BackendAddressPools = nil } - deleteErr := az.safeDeleteLoadBalancer(*lb, clusterName, vmSetName, service) - if deleteErr != nil { + if deleteErr := az.safeDeleteLoadBalancer(*lb, clusterName, vmSetName, service); deleteErr != nil { klog.Warningf("cleanOrphanedLoadBalancer(%s, %s, %s): failed to DeleteLB: %v", lbName, serviceName, clusterName, deleteErr) rgName, vmssName, parseErr := retry.GetVMSSMetadataByRawError(deleteErr) @@ -477,14 +498,12 @@ func (az *Cloud) cleanOrphanedLoadBalancer(lb *network.LoadBalancer, existingLBs } vmssNamesMap := map[string]bool{vmssName: true} - err := az.VMSet.EnsureBackendPoolDeletedFromVMSets(vmssNamesMap, []string{lbBackendPoolID}) - if err != nil { + if err := az.VMSet.EnsureBackendPoolDeletedFromVMSets(vmssNamesMap, lbBackendPoolIDsToDelete); err != nil { klog.Errorf("cleanOrphanedLoadBalancer(%s, %s, %s): failed to EnsureBackendPoolDeletedFromVMSets: %v", lbName, serviceName, clusterName, err) return err } - deleteErr := az.DeleteLB(service, lbName) - if deleteErr != nil { + if deleteErr := az.DeleteLB(service, lbName); deleteErr != nil { klog.Errorf("cleanOrphanedLoadBalancer(%s, %s, %s): failed delete lb for the second time, stop retrying: %v", lbName, serviceName, clusterName, deleteErr) return deleteErr.Error() } @@ -496,16 +515,21 @@ func (az *Cloud) cleanOrphanedLoadBalancer(lb *network.LoadBalancer, existingLBs // safeDeleteLoadBalancer deletes the load balancer after decoupling it from the vmSet func (az *Cloud) safeDeleteLoadBalancer(lb network.LoadBalancer, clusterName, vmSetName string, service *v1.Service) *retry.Error { - isIPv6 := utilnet.IsIPv6String(service.Spec.ClusterIP) - lbBackendPoolID := az.getBackendPoolID(pointer.StringDeref(lb.Name, ""), getBackendPoolName(clusterName, isIPv6)) - _, err := az.VMSet.EnsureBackendPoolDeleted(service, []string{lbBackendPoolID}, vmSetName, lb.BackendAddressPools, true) - if err != nil { + lbBackendPoolIDs := az.getBackendPoolIDs(clusterName, pointer.StringDeref(lb.Name, "")) + lbBackendPoolIDsToDelete := []string{} + v4Enabled, v6Enabled := getIPFamiliesEnabled(service) + if v4Enabled { + lbBackendPoolIDsToDelete = append(lbBackendPoolIDsToDelete, lbBackendPoolIDs[consts.IPVersionIPv4]) + } + if v6Enabled { + lbBackendPoolIDsToDelete = append(lbBackendPoolIDsToDelete, lbBackendPoolIDs[consts.IPVersionIPv6]) + } + if _, err := az.VMSet.EnsureBackendPoolDeleted(service, lbBackendPoolIDsToDelete, vmSetName, lb.BackendAddressPools, true); err != nil { return retry.NewError(false, fmt.Errorf("safeDeleteLoadBalancer: failed to EnsureBackendPoolDeleted: %w", err)) } klog.V(2).Infof("safeDeleteLoadBalancer: deleting LB %s", pointer.StringDeref(lb.Name, "")) - rerr := az.DeleteLB(service, pointer.StringDeref(lb.Name, "")) - if rerr != nil { + if rerr := az.DeleteLB(service, pointer.StringDeref(lb.Name, "")); rerr != nil { return rerr } _ = az.lbCache.Delete(pointer.StringDeref(lb.Name, "")) @@ -518,17 +542,20 @@ func (az *Cloud) safeDeleteLoadBalancer(lb network.LoadBalancer, clusterName, vm // In case the selected load balancer does not exist it returns network.LoadBalancer struct // with added metadata (such as name, location) and existsLB set to FALSE. // By default - cluster default LB is returned. -func (az *Cloud) getServiceLoadBalancer(service *v1.Service, clusterName string, nodes []*v1.Node, wantLb bool, existingLBs []network.LoadBalancer) (lb *network.LoadBalancer, status *v1.LoadBalancerStatus, exists bool, err error) { +func (az *Cloud) getServiceLoadBalancer(service *v1.Service, clusterName string, nodes []*v1.Node, wantLb bool, existingLBs []network.LoadBalancer) (lb *network.LoadBalancer, status *v1.LoadBalancerStatus, lbIPsPrimaryPIPs []string, exists bool, err error) { isInternal := requiresInternalLoadBalancer(service) var defaultLB *network.LoadBalancer primaryVMSetName := az.VMSet.GetPrimaryVMSetName() - defaultLBName := az.getAzureLoadBalancerName(clusterName, primaryVMSetName, isInternal) + defaultLBName, err := az.getAzureLoadBalancerName(service, &existingLBs, clusterName, primaryVMSetName, isInternal) + if err != nil { + return nil, nil, nil, false, err + } // reuse the lb list from reconcileSharedLoadBalancer to reduce the api call if len(existingLBs) == 0 { existingLBs, err = az.ListLB(service) if err != nil { - return nil, nil, false, err + return nil, nil, nil, false, err } } @@ -542,28 +569,38 @@ func (az *Cloud) getServiceLoadBalancer(service *v1.Service, clusterName string, if isInternalLoadBalancer(&existingLB) != isInternal { continue } - var fipConfig *network.FrontendIPConfiguration - status, fipConfig, err = az.getServiceLoadBalancerStatus(service, &existingLB) + + var fipConfigs []*network.FrontendIPConfiguration + status, lbIPsPrimaryPIPs, fipConfigs, err = az.getServiceLoadBalancerStatus(service, &existingLB) if err != nil { - return nil, nil, false, err + return nil, nil, nil, false, err } if status == nil { // service is not on this load balancer continue } - klog.V(4).Infof("getServiceLoadBalancer(%s, %s, %v): current lb ip: %s", service.Name, clusterName, wantLb, status.Ingress[0].IP) + klog.V(4).Infof("getServiceLoadBalancer(%s, %s, %v): current lb IPs: %q", service.Name, clusterName, wantLb, lbIPsPrimaryPIPs) // select another load balancer instead of returning // the current one if the change is needed - if wantLb && az.shouldChangeLoadBalancer(service, pointer.StringDeref(existingLB.Name, ""), clusterName) { - if err := az.removeFrontendIPConfigurationFromLoadBalancer(&existingLB, existingLBs, fipConfig, clusterName, service); err != nil { - klog.Errorf("getServiceLoadBalancer(%s, %s, %v): failed to remove frontend IP configuration from load balancer: %v", service.Name, clusterName, wantLb, err) - return nil, nil, false, err + if wantLb && az.shouldChangeLoadBalancer(service, pointer.StringDeref(existingLB.Name, ""), clusterName, defaultLBName) { + fipConfigNames := []string{} + for _, fipConfig := range fipConfigs { + fipConfigNames = append(fipConfigNames, pointer.StringDeref(fipConfig.Name, "")) } + if err := az.removeFrontendIPConfigurationFromLoadBalancer(&existingLB, existingLBs, fipConfigs, clusterName, service); err != nil { + klog.Errorf("getServiceLoadBalancer(%s, %s, %v): failed to remove frontend IP configurations %q from load balancer: %v", service.Name, clusterName, wantLb, fipConfigNames, err) + return nil, nil, nil, false, err + } + az.reconcileMultipleStandardLoadBalancerConfigurationStatus( + false, + getServiceName(service), + pointer.StringDeref(existingLB.Name, ""), + ) break } - return &existingLB, status, true, nil + return &existingLB, status, lbIPsPrimaryPIPs, true, nil } // Service does not have a load balancer, select one. @@ -573,10 +610,10 @@ func (az *Cloud) getServiceLoadBalancer(service *v1.Service, clusterName string, // select new load balancer for service selectedLB, exists, err := az.selectLoadBalancer(clusterName, service, &existingLBs, nodes) if err != nil { - return nil, nil, false, err + return nil, nil, nil, false, err } - return selectedLB, status, exists, err + return selectedLB, status, lbIPsPrimaryPIPs, exists, err } // create a default LB with meta data if not present @@ -599,13 +636,14 @@ func (az *Cloud) getServiceLoadBalancer(service *v1.Service, clusterName string, } } - return defaultLB, nil, false, nil + return defaultLB, nil, nil, false, nil } // selectLoadBalancer selects load balancer for the service in the cluster. // The selection algorithm selects the load balancer which currently has // the minimum lb rules. If there are multiple LBs with same number of rules, // then selects the first one (sorted based on name). +// Note: this function is only useful for basic LB clusters. func (az *Cloud) selectLoadBalancer(clusterName string, service *v1.Service, existingLBs *[]network.LoadBalancer, nodes []*v1.Node) (selectedLB *network.LoadBalancer, existsLb bool, err error) { isInternal := requiresInternalLoadBalancer(service) serviceName := getServiceName(service) @@ -623,7 +661,7 @@ func (az *Cloud) selectLoadBalancer(clusterName string, service *v1.Service, exi } selectedLBRuleCount := math.MaxInt32 for _, currVMSetName := range *vmSetNames { - currLBName := az.getAzureLoadBalancerName(clusterName, currVMSetName, isInternal) + currLBName, _ := az.getAzureLoadBalancerName(service, existingLBs, clusterName, currVMSetName, isInternal) lb, exists := mapExistingLBs[currLBName] if !exists { // select this LB as this is a new LB and will have minimum rules @@ -676,19 +714,25 @@ func (az *Cloud) selectLoadBalancer(clusterName string, service *v1.Service, exi return selectedLB, existsLb, nil } -func (az *Cloud) getServiceLoadBalancerStatus(service *v1.Service, lb *network.LoadBalancer) (status *v1.LoadBalancerStatus, fipConfig *network.FrontendIPConfiguration, err error) { +// getServiceLoadBalancerStatus returns LB status for the Service. +// Before DualStack support, old logic takes the first ingress IP as non-additional one +// and the second one as additional one. With DualStack support, the second IP may be +// the IP of another IP family so the new logic returns two variables. +func (az *Cloud) getServiceLoadBalancerStatus(service *v1.Service, lb *network.LoadBalancer) (status *v1.LoadBalancerStatus, lbIPsPrimaryPIPs []string, fipConfigs []*network.FrontendIPConfiguration, err error) { if lb == nil { klog.V(10).Info("getServiceLoadBalancerStatus: lb is nil") - return nil, nil, nil + return nil, nil, nil, nil } if lb.FrontendIPConfigurations == nil || len(*lb.FrontendIPConfigurations) == 0 { klog.V(10).Info("getServiceLoadBalancerStatus: lb.FrontendIPConfigurations is nil") - return nil, nil, nil + return nil, nil, nil, nil } + isInternal := requiresInternalLoadBalancer(service) serviceName := getServiceName(service) - for _, ipConfiguration := range *lb.FrontendIPConfigurations { - ipConfiguration := ipConfiguration + lbIngresses := []v1.LoadBalancerIngress{} + for i := range *lb.FrontendIPConfigurations { + ipConfiguration := (*lb.FrontendIPConfigurations)[i] owns, isPrimaryService, _ := az.serviceOwnsFrontendIP(ipConfiguration, service) if owns { klog.V(2).Infof("get(%s): lb(%s) - found frontend IP config, primary service: %v", serviceName, pointer.StringDeref(lb.Name, ""), isPrimaryService) @@ -698,19 +742,19 @@ func (az *Cloud) getServiceLoadBalancerStatus(service *v1.Service, lb *network.L lbIP = ipConfiguration.PrivateIPAddress } else { if ipConfiguration.PublicIPAddress == nil { - return nil, nil, fmt.Errorf("get(%s): lb(%s) - failed to get LB PublicIPAddress is Nil", serviceName, *lb.Name) + return nil, nil, nil, fmt.Errorf("get(%s): lb(%s) - failed to get LB PublicIPAddress is Nil", serviceName, *lb.Name) } pipID := ipConfiguration.PublicIPAddress.ID if pipID == nil { - return nil, nil, fmt.Errorf("get(%s): lb(%s) - failed to get LB PublicIPAddress ID is Nil", serviceName, *lb.Name) + return nil, nil, nil, fmt.Errorf("get(%s): lb(%s) - failed to get LB PublicIPAddress ID is Nil", serviceName, *lb.Name) } pipName, err := getLastSegment(*pipID, "/") if err != nil { - return nil, nil, fmt.Errorf("get(%s): lb(%s) - failed to get LB PublicIPAddress Name from ID(%s)", serviceName, *lb.Name, *pipID) + return nil, nil, nil, fmt.Errorf("get(%s): lb(%s) - failed to get LB PublicIPAddress Name from ID(%s)", serviceName, *lb.Name, *pipID) } pip, existsPip, err := az.getPublicIPAddress(az.getPublicIPAddressResourceGroup(service), pipName, azcache.CacheReadTypeDefault) if err != nil { - return nil, nil, err + return nil, nil, nil, err } if existsPip { lbIP = pip.IPAddress @@ -719,25 +763,28 @@ func (az *Cloud) getServiceLoadBalancerStatus(service *v1.Service, lb *network.L klog.V(2).Infof("getServiceLoadBalancerStatus gets ingress IP %q from frontendIPConfiguration %q for service %q", pointer.StringDeref(lbIP, ""), pointer.StringDeref(ipConfiguration.Name, ""), serviceName) - // set additional public IPs to LoadBalancerStatus, so that kube-proxy would create their iptables rules. - lbIngress := []v1.LoadBalancerIngress{{IP: pointer.StringDeref(lbIP, "")}} - additionalIPs, err := getServiceAdditionalPublicIPs(service) - if err != nil { - return &v1.LoadBalancerStatus{Ingress: lbIngress}, &ipConfiguration, err - } - if len(additionalIPs) > 0 { - for _, pip := range additionalIPs { - lbIngress = append(lbIngress, v1.LoadBalancerIngress{ - IP: pip, - }) - } - } - - return &v1.LoadBalancerStatus{Ingress: lbIngress}, &ipConfiguration, nil + lbIngresses = append(lbIngresses, v1.LoadBalancerIngress{IP: pointer.StringDeref(lbIP, "")}) + lbIPsPrimaryPIPs = append(lbIPsPrimaryPIPs, pointer.StringDeref(lbIP, "")) + fipConfigs = append(fipConfigs, &ipConfiguration) } } + if len(lbIngresses) == 0 { + return nil, nil, nil, nil + } - return nil, nil, nil + // set additional public IPs to LoadBalancerStatus, so that kube-proxy would create their iptables rules. + additionalIPs, err := getServiceAdditionalPublicIPs(service) + if err != nil { + return &v1.LoadBalancerStatus{Ingress: lbIngresses}, lbIPsPrimaryPIPs, fipConfigs, err + } + if len(additionalIPs) > 0 { + for _, pip := range additionalIPs { + lbIngresses = append(lbIngresses, v1.LoadBalancerIngress{ + IP: pip, + }) + } + } + return &v1.LoadBalancerStatus{Ingress: lbIngresses}, lbIPsPrimaryPIPs, fipConfigs, nil } func (az *Cloud) determinePublicIPName(clusterName string, service *v1.Service, isIPv6 bool) (string, bool, error) { @@ -822,41 +869,16 @@ func flipServiceInternalAnnotation(service *v1.Service) *v1.Service { return copyService } -func updateServiceLoadBalancerIP(service *v1.Service, serviceIP string) *v1.Service { +func updateServiceLoadBalancerIPs(service *v1.Service, serviceIPs []string) *v1.Service { copyService := service.DeepCopy() - if len(serviceIP) > 0 && copyService != nil { - setServiceLoadBalancerIP(copyService, serviceIP) + if copyService != nil { + for _, serviceIP := range serviceIPs { + setServiceLoadBalancerIP(copyService, serviceIP) + } } return copyService } -func (az *Cloud) findServiceIPAddress(ctx context.Context, clusterName string, service *v1.Service) (string, error) { - isIPv6 := utilnet.IsIPv6String(service.Spec.ClusterIP) - lbIP := getServiceLoadBalancerIP(service, isIPv6) - if len(lbIP) > 0 { - return lbIP, nil - } - - if len(service.Status.LoadBalancer.Ingress) > 0 && len(service.Status.LoadBalancer.Ingress[0].IP) > 0 { - return service.Status.LoadBalancer.Ingress[0].IP, nil - } - - _, lbStatus, existsLb, err := az.getServiceLoadBalancer(service, clusterName, nil, false, []network.LoadBalancer{}) - if err != nil { - return "", err - } - if !existsLb { - klog.V(2).Infof("Expected to find an IP address for service %s but did not. Assuming it has been removed", service.Name) - return "", nil - } - if len(lbStatus.Ingress) < 1 { - klog.V(2).Infof("Expected to find an IP address for service %s but it had no ingresses. Assuming it has been removed", service.Name) - return "", nil - } - - return lbStatus.Ingress[0].IP, nil -} - func (az *Cloud) ensurePublicIPExists(service *v1.Service, pipName string, domainNameLabel, clusterName string, shouldPIPExisted, foundDNSLabelAnnotation, isIPv6 bool) (*network.PublicIPAddress, error) { pipResourceGroup := az.getPublicIPAddressResourceGroup(service) pip, existsPip, err := az.getPublicIPAddress(pipResourceGroup, pipName, azcache.CacheReadTypeDefault) @@ -920,7 +942,7 @@ func (az *Cloud) ensurePublicIPExists(service *v1.Service, pipName string, domai } } else { if shouldPIPExisted { - return nil, fmt.Errorf("PublicIP from annotation azure-pip-name=%s for service %s doesn't exist", pipName, serviceName) + return nil, fmt.Errorf("PublicIP from annotation azure-pip-name(-IPv6)=%s for service %s doesn't exist", pipName, serviceName) } changed = true @@ -1260,7 +1282,7 @@ func (az *Cloud) isFrontendIPChanged( subnetName := getInternalSubnet(service) if subnetName != nil { if subnet == nil { - return false, fmt.Errorf("isFrontendIPChanged: Unexpected nil subnet") + return false, fmt.Errorf("isFrontendIPChanged: Unexpected nil subnet %q", pointer.StringDeref(subnetName, "")) } if config.Subnet != nil && !strings.EqualFold(pointer.StringDeref(config.Subnet.ID, ""), pointer.StringDeref(subnet.ID, "")) { return true, nil @@ -1420,6 +1442,85 @@ func (az *Cloud) findFrontendIPConfigsOfService( return fipsOfServiceMap, nil } +// reconcileMultipleStandardLoadBalancerConfigurations runs only once every time the +// cloud controller manager restarts or reloads itself. It checks all existing +// load balancer typed services and add service names to the ActiveServices queue +// of the corresponding load balancer configuration. It also checks if there is a configuration +// named . If not, an error will be reported. +func (az *Cloud) reconcileMultipleStandardLoadBalancerConfigurations(clusterName string, existingLBs *[]network.LoadBalancer) (err error) { + if !az.useMultipleStandardLoadBalancers() { + return nil + } + + if az.multipleStandardLoadBalancerConfigurationsSynced { + return nil + } + defer func() { + if err == nil { + az.multipleStandardLoadBalancerConfigurationsSynced = true + } + }() + + var found bool + for _, multiSLBConfig := range az.MultipleStandardLoadBalancerConfigurations { + if strings.EqualFold(multiSLBConfig.Name, clusterName) { + found = true + break + } + } + if !found { + return fmt.Errorf("multiple standard load balancers are enabled but no configuration named %q is found", clusterName) + } + + svcs, err := az.KubeClient.CoreV1().Services("").List(context.Background(), metav1.ListOptions{}) + if err != nil { + klog.Errorf("reconcileMultipleStandardLoadBalancerConfigurations: failed to list all load balancer services: %w", err) + return fmt.Errorf("failed to list all load balancer services: %w", err) + } + rulePrefixToSVCNameMap := make(map[string]string) + for _, svc := range svcs.Items { + svc := svc + if strings.EqualFold(string(svc.Spec.Type), string(v1.ServiceTypeLoadBalancer)) { + prefix := az.GetLoadBalancerName(context.Background(), "", &svc) + svcName := getServiceName(&svc) + rulePrefixToSVCNameMap[strings.ToLower(prefix)] = svcName + klog.V(2).Infof("reconcileMultipleStandardLoadBalancerConfigurations: found service %q with prefix %q", svcName, prefix) + } + } + + for _, existingLB := range *existingLBs { + lbName := pointer.StringDeref(existingLB.Name, "") + if existingLB.LoadBalancerPropertiesFormat != nil && + existingLB.LoadBalancingRules != nil { + for _, rule := range *existingLB.LoadBalancingRules { + ruleName := pointer.StringDeref(rule.Name, "") + rulePrefix := strings.Split(ruleName, "-")[0] + if rulePrefix == "" { + klog.Warningf("reconcileMultipleStandardLoadBalancerConfigurations: the load balancing rule name %s is not in the correct format", ruleName) + } + svcName, ok := rulePrefixToSVCNameMap[strings.ToLower(rulePrefix)] + if ok { + klog.V(2).Infof( + "reconcileMultipleStandardLoadBalancerConfigurations: found load balancer %q with rule %q of service %q", + lbName, ruleName, svcName, + ) + for i := range az.MultipleStandardLoadBalancerConfigurations { + if strings.EqualFold(strings.TrimSuffix(lbName, consts.InternalLoadBalancerNameSuffix), az.MultipleStandardLoadBalancerConfigurations[i].Name) { + if az.MultipleStandardLoadBalancerConfigurations[i].ActiveServices == nil { + az.MultipleStandardLoadBalancerConfigurations[i].ActiveServices = sets.New[string]() + } + klog.V(2).Infof("reconcileMultipleStandardLoadBalancerConfigurations: service(%s) is active on lb(%s)", svcName, lbName) + az.MultipleStandardLoadBalancerConfigurations[i].ActiveServices.Insert(svcName) + } + } + } + } + } + } + + return nil +} + // reconcileLoadBalancer ensures load balancer exists and the frontend ip config is setup. // This also reconciles the Service's Ports with the LoadBalancer config. // This entails adding rules/probes for expected Ports and removing stale rules/ports. @@ -1434,7 +1535,12 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, return nil, fmt.Errorf("reconcileLoadBalancer: failed to list managed LB: %w", err) } - lb, lbStatus, _, err := az.getServiceLoadBalancer(service, clusterName, nodes, wantLb, existingLBs) + if err := az.reconcileMultipleStandardLoadBalancerConfigurations(clusterName, &existingLBs); err != nil { + klog.Errorf("reconcileLoadBalancer: failed to reconcile multiple standard load balancer configurations: %s", err.Error()) + return nil, err + } + + lb, lbStatus, _, _, err := az.getServiceLoadBalancer(service, clusterName, nodes, wantLb, existingLBs) if err != nil { klog.Errorf("reconcileLoadBalancer: failed to get load balancer for service %q, error: %v", serviceName, err) return nil, err @@ -1447,8 +1553,8 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, serviceName, lbResourceGroup, lbName, wantLb) lbFrontendIPConfigNames := az.getFrontendIPConfigNames(service) lbFrontendIPConfigIDs := map[bool]string{ - false: az.getFrontendIPConfigID(lbName, lbFrontendIPConfigNames[false]), - true: az.getFrontendIPConfigID(lbName, lbFrontendIPConfigNames[true]), + consts.IPVersionIPv4: az.getFrontendIPConfigID(lbName, lbFrontendIPConfigNames[consts.IPVersionIPv4]), + consts.IPVersionIPv6: az.getFrontendIPConfigID(lbName, lbFrontendIPConfigNames[consts.IPVersionIPv6]), } dirtyLb := false @@ -1475,11 +1581,11 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, } // reconcile the load balancer's frontend IP configurations. - ownedFIPConfigs, toDeleteConfigs, changed, err := az.reconcileFrontendIPConfigs(clusterName, service, lb, lbStatus, wantLb, lbFrontendIPConfigNames) + ownedFIPConfigs, toDeleteConfigs, fipChanged, err := az.reconcileFrontendIPConfigs(clusterName, service, lb, lbStatus, wantLb, lbFrontendIPConfigNames) if err != nil { return lb, err } - if changed { + if fipChanged { dirtyLb = true } @@ -1522,7 +1628,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, if err = az.checkLoadBalancerResourcesConflicts(lb, lbFrontendIPConfigIDs[false], service); err != nil { return nil, err } - if err := getExpectedLBRule(false); err != nil { + if err := getExpectedLBRule(consts.IPVersionIPv4); err != nil { return nil, err } } @@ -1530,7 +1636,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, if err = az.checkLoadBalancerResourcesConflicts(lb, lbFrontendIPConfigIDs[true], service); err != nil { return nil, err } - if err := getExpectedLBRule(true); err != nil { + if err := getExpectedLBRule(consts.IPVersionIPv6); err != nil { return nil, err } } @@ -1542,7 +1648,6 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, if changed := az.reconcileLBRules(lb, service, serviceName, wantLb, expectedRules); changed { dirtyLb = true } - if changed := az.ensureLoadBalancerTagged(lb); changed { dirtyLb = true } @@ -1614,10 +1719,34 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, } } + if fipChanged { + az.reconcileMultipleStandardLoadBalancerConfigurationStatus(wantLb, serviceName, lbName) + } + klog.V(2).Infof("reconcileLoadBalancer for service(%s): lb(%s) finished", serviceName, lbName) return lb, nil } +func (az *Cloud) reconcileMultipleStandardLoadBalancerConfigurationStatus(wantLb bool, svcName, lbName string) { + lbName = strings.TrimSuffix(lbName, consts.InternalLoadBalancerNameSuffix) + for i := range az.MultipleStandardLoadBalancerConfigurations { + if strings.EqualFold(lbName, az.MultipleStandardLoadBalancerConfigurations[i].Name) { + if az.MultipleStandardLoadBalancerConfigurations[i].ActiveServices == nil { + az.MultipleStandardLoadBalancerConfigurations[i].ActiveServices = sets.New[string]() + } + + if wantLb { + klog.V(4).Infof("reconcileMultipleStandardLoadBalancerConfigurationStatus: service(%s) is active on lb(%s)", svcName, lbName) + az.MultipleStandardLoadBalancerConfigurations[i].ActiveServices.Insert(svcName) + } else { + klog.V(4).Infof("reconcileMultipleStandardLoadBalancerConfigurationStatus: service(%s) is not active on lb(%s) any more", svcName, lbName) + az.MultipleStandardLoadBalancerConfigurations[i].ActiveServices.Delete(svcName) + } + break + } + } +} + func (az *Cloud) reconcileLBProbes(lb *network.LoadBalancer, service *v1.Service, serviceName string, wantLb bool, expectedProbes []network.Probe) bool { // remove unwanted probes dirtyProbes := false @@ -1821,7 +1950,8 @@ func (az *Cloud) reconcileFrontendIPConfigs(clusterName string, } addNewFIPOfService := func(isIPv6 bool) error { - klog.V(4).Infof("ensure(%s): lb(%s) - creating a new frontend IP config (isIPv6=%t)", serviceName, lbName, isIPv6) + klog.V(4).Infof("ensure(%s): lb(%s) - creating a new frontend IP config %q (isIPv6=%t)", + serviceName, lbName, lbFrontendIPConfigNames[isIPv6], isIPv6) // construct FrontendIPConfigurationPropertiesFormat var fipConfigurationProperties *network.FrontendIPConfigurationPropertiesFormat @@ -2440,7 +2570,7 @@ func (az *Cloud) getExpectedLoadBalancingRulePropertiesForPort( // Azure ILB does not support secondary IPs as floating IPs on the LB. Therefore, floating IP needs to be turned // off and the rule should point to the nodeIP:nodePort. - if consts.IsK8sServiceInternalIPv6(service) { + if consts.IsK8sServiceUsingInternalLoadBalancer(service) && isBackendPoolIPv6(lbBackendPoolID) { props.BackendPort = pointer.Int32(servicePort.NodePort) props.EnableFloatingIP = pointer.Bool(false) } @@ -2462,7 +2592,7 @@ func (az *Cloud) getExpectedHAModeLoadBalancingRuleProperties( // This reconciles the Network Security Group similar to how the LB is reconciled. // This entails adding required, missing SecurityRules and removing stale rules. -func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service, lbIP *string, lbName *string, wantLb bool) (*network.SecurityGroup, error) { +func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service, lbIPs *[]string, lbName *string, wantLb bool) (*network.SecurityGroup, error) { serviceName := getServiceName(service) klog.V(5).Infof("reconcileSecurityGroup(%s): START clusterName=%q", serviceName, clusterName) @@ -2480,16 +2610,26 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service, return nil, err } - destinationIPAddress := "" - if wantLb && lbIP == nil { + if wantLb && lbIPs == nil { return nil, fmt.Errorf("no load balancer IP for setting up security rules for service %s", service.Name) } - if lbIP != nil { - destinationIPAddress = *lbIP + + destinationIPAddresses := map[bool][]string{} + if lbIPs != nil { + for _, ip := range *lbIPs { + if net.ParseIP(ip).To4() != nil { + destinationIPAddresses[false] = append(destinationIPAddresses[false], ip) + } else { + destinationIPAddresses[true] = append(destinationIPAddresses[true], ip) + } + } } - if destinationIPAddress == "" { - destinationIPAddress = "*" + if len(destinationIPAddresses[false]) == 0 { + destinationIPAddresses[false] = []string{"*"} + } + if len(destinationIPAddresses[true]) == 0 { + destinationIPAddresses[true] = []string{"*"} } disableFloatingIP := false @@ -2497,7 +2637,7 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service, disableFloatingIP = true } - backendIPAddresses := make([]string, 0) + backendIPAddresses := map[bool][]string{} if wantLb && disableFloatingIP { lb, exist, err := az.getAzureLoadBalancer(pointer.StringDeref(lbName, ""), azcache.CacheReadTypeDefault) if err != nil { @@ -2506,21 +2646,18 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service, if !exist { return nil, fmt.Errorf("unable to get lb %s", pointer.StringDeref(lbName, "")) } - backendPrivateIPv4s, backendPrivateIPv6s := az.LoadBalancerBackendPool.GetBackendPrivateIPs(clusterName, service, lb) - backendIPAddresses = backendPrivateIPv4s - if utilnet.IsIPv6String(*lbIP) { - backendIPAddresses = backendPrivateIPv6s - } + backendIPAddresses[false], backendIPAddresses[true] = az.LoadBalancerBackendPool.GetBackendPrivateIPs(clusterName, service, lb) } additionalIPs, err := getServiceAdditionalPublicIPs(service) if err != nil { return nil, fmt.Errorf("unable to get additional public IPs, error=%w", err) } - - destinationIPAddresses := []string{destinationIPAddress} - if destinationIPAddress != "*" { - destinationIPAddresses = append(destinationIPAddresses, additionalIPs...) + for _, ip := range additionalIPs { + isIPv6 := net.ParseIP(ip).To4() == nil + if len(destinationIPAddresses[isIPv6]) != 1 || destinationIPAddresses[isIPv6][0] != "*" { + destinationIPAddresses[isIPv6] = append(destinationIPAddresses[isIPv6], ip) + } } sourceRanges, err := servicehelpers.GetLoadBalancerSourceRanges(service) @@ -2532,21 +2669,40 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service, delete(sourceRanges, consts.DefaultLoadBalancerSourceRanges) } - var sourceAddressPrefixes []string + sourceAddressPrefixes := map[bool][]string{} if (sourceRanges == nil || servicehelpers.IsAllowAll(sourceRanges)) && len(serviceTags) == 0 { if !requiresInternalLoadBalancer(service) || len(service.Spec.LoadBalancerSourceRanges) > 0 { - sourceAddressPrefixes = []string{"Internet"} + sourceAddressPrefixes[false] = []string{"Internet"} + sourceAddressPrefixes[true] = []string{"Internet"} } } else { for _, ip := range sourceRanges { - sourceAddressPrefixes = append(sourceAddressPrefixes, ip.String()) + if ip == nil { + continue + } + isIPv6 := net.ParseIP(ip.IP.String()).To4() == nil + sourceAddressPrefixes[isIPv6] = append(sourceAddressPrefixes[isIPv6], ip.String()) } - sourceAddressPrefixes = append(sourceAddressPrefixes, serviceTags...) + sourceAddressPrefixes[false] = append(sourceAddressPrefixes[false], serviceTags...) + sourceAddressPrefixes[true] = append(sourceAddressPrefixes[true], serviceTags...) } - expectedSecurityRules, err := az.getExpectedSecurityRules(wantLb, ports, sourceAddressPrefixes, service, destinationIPAddresses, sourceRanges, backendIPAddresses, disableFloatingIP) - if err != nil { - return nil, err + expectedSecurityRules := []network.SecurityRule{} + handleSecurityRules := func(isIPv6 bool) error { + expectedSecurityRulesSingleStack, err := az.getExpectedSecurityRules(wantLb, ports, sourceAddressPrefixes[isIPv6], service, destinationIPAddresses[isIPv6], sourceRanges, backendIPAddresses[isIPv6], disableFloatingIP, isIPv6) + expectedSecurityRules = append(expectedSecurityRules, expectedSecurityRulesSingleStack...) + return err + } + v4Enabled, v6Enabled := getIPFamiliesEnabled(service) + if v4Enabled { + if err := handleSecurityRules(false); err != nil { + return nil, err + } + } + if v6Enabled { + if err := handleSecurityRules(true); err != nil { + return nil, err + } } // update security rules @@ -2575,7 +2731,14 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service, return &sg, nil } -func (az *Cloud) reconcileSecurityRules(sg network.SecurityGroup, service *v1.Service, serviceName string, wantLb bool, expectedSecurityRules []network.SecurityRule, ports []v1.ServicePort, sourceAddressPrefixes []string, destinationIPAddresses []string) (bool, []network.SecurityRule, error) { +func (az *Cloud) reconcileSecurityRules(sg network.SecurityGroup, + service *v1.Service, + serviceName string, + wantLb bool, + expectedSecurityRules []network.SecurityRule, + ports []v1.ServicePort, + sourceAddressPrefixes, destinationIPAddresses map[bool][]string, +) (bool, []network.SecurityRule, error) { dirtySg := false var updatedRules []network.SecurityRule if sg.SecurityGroupPropertiesFormat != nil && sg.SecurityGroupPropertiesFormat.SecurityRules != nil { @@ -2607,47 +2770,58 @@ func (az *Cloud) reconcileSecurityRules(sg network.SecurityGroup, service *v1.Se // update security rules: if the service uses a shared rule and is being deleted, // then remove it from the shared rule - if useSharedSecurityRule(service) && !wantLb { - for _, port := range ports { - for _, sourceAddressPrefix := range sourceAddressPrefixes { - sharedRuleName := az.getSecurityRuleName(service, port, sourceAddressPrefix, utilnet.IsIPv6String(service.Spec.ClusterIP)) - sharedIndex, sharedRule, sharedRuleFound := findSecurityRuleByName(updatedRules, sharedRuleName) - if !sharedRuleFound { - klog.V(4).Infof("Didn't find shared rule %s for service %s", sharedRuleName, service.Name) - continue - } - shouldDeleteNSGRule := false - if sharedRule.DestinationAddressPrefixes == nil || len(*sharedRule.DestinationAddressPrefixes) == 0 { - shouldDeleteNSGRule = true - } else { - existingPrefixes := *sharedRule.DestinationAddressPrefixes - for _, destinationIPAddress := range destinationIPAddresses { - addressIndex, found := findIndex(existingPrefixes, destinationIPAddress) - if !found { - klog.Warningf("Didn't find destination address %v in shared rule %s for service %s", destinationIPAddress, sharedRuleName, service.Name) - continue - } - if len(existingPrefixes) == 1 { - shouldDeleteNSGRule = true - break //shared nsg rule has only one entry and entry owned by deleted svc has been found. skip the rest of the entries - } else { - newDestinations := append(existingPrefixes[:addressIndex], existingPrefixes[addressIndex+1:]...) - sharedRule.DestinationAddressPrefixes = &newDestinations - updatedRules[sharedIndex] = sharedRule + handleRule := func(isIPv6 bool) { + if useSharedSecurityRule(service) && !wantLb { + for _, port := range ports { + for _, sourceAddressPrefix := range sourceAddressPrefixes[isIPv6] { + sharedRuleName := az.getSecurityRuleName(service, port, sourceAddressPrefix, isIPv6) + sharedIndex, sharedRule, sharedRuleFound := findSecurityRuleByName(updatedRules, sharedRuleName) + if !sharedRuleFound { + klog.V(4).Infof("Didn't find shared rule %s for service %s", sharedRuleName, service.Name) + continue + } + shouldDeleteNSGRule := false + if sharedRule.SecurityRulePropertiesFormat == nil || + sharedRule.SecurityRulePropertiesFormat.DestinationAddressPrefixes == nil || + len(*sharedRule.SecurityRulePropertiesFormat.DestinationAddressPrefixes) == 0 { + shouldDeleteNSGRule = true + } else { + existingPrefixes := *sharedRule.DestinationAddressPrefixes + for _, destinationIPAddress := range destinationIPAddresses[isIPv6] { + addressIndex, found := findIndex(existingPrefixes, destinationIPAddress) + if !found { + klog.Warningf("Didn't find destination address %v in shared rule %s for service %s", destinationIPAddress, sharedRuleName, service.Name) + continue + } + if len(existingPrefixes) == 1 { + shouldDeleteNSGRule = true + break //shared nsg rule has only one entry and entry owned by deleted svc has been found. skip the rest of the entries + } else { + newDestinations := append(existingPrefixes[:addressIndex], existingPrefixes[addressIndex+1:]...) + sharedRule.DestinationAddressPrefixes = &newDestinations + updatedRules[sharedIndex] = sharedRule + } + dirtySg = true } - dirtySg = true } - } - if shouldDeleteNSGRule { - klog.V(4).Infof("shared rule will be deleted because last service %s which refers this rule is deleted.", service.Name) - updatedRules = append(updatedRules[:sharedIndex], updatedRules[sharedIndex+1:]...) - dirtySg = true - continue + if shouldDeleteNSGRule { + klog.V(4).Infof("shared rule will be deleted because last service %s which refers this rule is deleted.", service.Name) + updatedRules = append(updatedRules[:sharedIndex], updatedRules[sharedIndex+1:]...) + dirtySg = true + continue + } } } } } + v4Enabled, v6Enabled := getIPFamiliesEnabled(service) + if v4Enabled { + handleRule(consts.IPVersionIPv4) + } + if v6Enabled { + handleRule(consts.IPVersionIPv6) + } // update security rules: prepare rules for consolidation for index, rule := range updatedRules { @@ -2695,10 +2869,11 @@ func (az *Cloud) reconcileSecurityRules(sg network.SecurityGroup, service *v1.Se for _, r := range updatedRules { klog.V(10).Infof("Updated security rule while processing %s: %s:%s -> %s:%s", service.Name, logSafe(r.SourceAddressPrefix), logSafe(r.SourcePortRange), logSafeCollection(r.DestinationAddressPrefix, r.DestinationAddressPrefixes), logSafe(r.DestinationPortRange)) } + return dirtySg, updatedRules, nil } -func (az *Cloud) getExpectedSecurityRules(wantLb bool, ports []v1.ServicePort, sourceAddressPrefixes []string, service *v1.Service, destinationIPAddresses []string, sourceRanges utilnet.IPNetSet, backendIPAddresses []string, disableFloatingIP bool) ([]network.SecurityRule, error) { +func (az *Cloud) getExpectedSecurityRules(wantLb bool, ports []v1.ServicePort, sourceAddressPrefixes []string, service *v1.Service, destinationIPAddresses []string, sourceRanges utilnet.IPNetSet, backendIPAddresses []string, disableFloatingIP, isIPv6 bool) ([]network.SecurityRule, error) { expectedSecurityRules := []network.SecurityRule{} if wantLb { @@ -2715,7 +2890,7 @@ func (az *Cloud) getExpectedSecurityRules(wantLb bool, ports []v1.ServicePort, s } for j := range sourceAddressPrefixes { ix := i*len(sourceAddressPrefixes) + j - securityRuleName := az.getSecurityRuleName(service, port, sourceAddressPrefixes[j], utilnet.IsIPv6String(service.Spec.ClusterIP)) + securityRuleName := az.getSecurityRuleName(service, port, sourceAddressPrefixes[j], isIPv6) nsgRule := network.SecurityRule{ Name: pointer.String(securityRuleName), SecurityRulePropertiesFormat: &network.SecurityRulePropertiesFormat{ @@ -2752,7 +2927,7 @@ func (az *Cloud) getExpectedSecurityRules(wantLb bool, ports []v1.ServicePort, s if err != nil { return nil, err } - securityRuleName := az.getSecurityRuleName(service, port, "deny_all", utilnet.IsIPv6String(service.Spec.ClusterIP)) + securityRuleName := az.getSecurityRuleName(service, port, "deny_all", isIPv6) nsgRule := network.SecurityRule{ Name: pointer.String(securityRuleName), SecurityRulePropertiesFormat: &network.SecurityRulePropertiesFormat{ @@ -2787,7 +2962,7 @@ func (az *Cloud) shouldUpdateLoadBalancer(clusterName string, service *v1.Servic return false, fmt.Errorf("shouldUpdateLoadBalancer: failed to list managed load balancers: %w", err) } - _, _, existsLb, _ := az.getServiceLoadBalancer(service, clusterName, nodes, false, existingManagedLBs) + _, _, _, existsLb, _ := az.getServiceLoadBalancer(service, clusterName, nodes, false, existingManagedLBs) return existsLb && service.ObjectMeta.DeletionTimestamp == nil && service.Spec.Type == v1.ServiceTypeLoadBalancer, nil } @@ -3160,6 +3335,9 @@ func (az *Cloud) getPublicIPUpdates( } } + if pip.Name == nil { + return false, nil, false, nil, fmt.Errorf("PIP name is empty: %v", pip) + } pipName := *pip.Name // If we've been told to use a specific public ip by the client, let's track whether or not it actually existed @@ -3725,11 +3903,227 @@ func (az *Cloud) ensureSecurityGroupTagged(sg *network.SecurityGroup) bool { return changed } -// stringSlice returns a string slice value for the passed string slice pointer. It returns a nil -// slice if the pointer is nil. -func stringSlice(s *[]string) []string { - if s != nil { - return *s +// For a load balancer, all frontend ip should reference either a subnet or publicIpAddress. +// Thus Azure do not allow mixed type (public and internal) load balancer. +// So we'd have a separate name for internal load balancer. +// This would be the name for Azure LoadBalancer resource. +func (az *Cloud) getAzureLoadBalancerName( + service *v1.Service, + existingLBs *[]network.LoadBalancer, + clusterName, vmSetName string, + isInternal bool, +) (string, error) { + if az.LoadBalancerName != "" { + clusterName = az.LoadBalancerName + } + lbNamePrefix := vmSetName + // The LB name prefix is set to the name of the cluster when: + // 1. the LB belongs to the primary agent pool. + // 2. using the single SLB. + if strings.EqualFold(vmSetName, az.VMSet.GetPrimaryVMSetName()) || az.useSingleStandardLoadBalancer() { + lbNamePrefix = clusterName + } + + // For multiple standard load balancers scenario: + // 1. Filter out the eligible load balancers. + // 2. Choose the most eligible load balancer. + if az.useMultipleStandardLoadBalancers() { + eligibleLBs, err := az.getEligibleLoadBalancers(service) + if err != nil { + return "", err + } + + currentLBName := az.getServiceCurrentLoadBalancerName(service) + lbNamePrefix = getMostEligibleLBName(currentLBName, eligibleLBs, existingLBs) } - return nil + + if isInternal { + return fmt.Sprintf("%s%s", lbNamePrefix, consts.InternalLoadBalancerNameSuffix), nil + } + return lbNamePrefix, nil +} + +func getMostEligibleLBName( + currentLBName string, + eligibleLBs []string, + existingLBs *[]network.LoadBalancer, +) string { + // 1. If the LB is eligible and being used, choose it. + if StringInSlice(currentLBName, eligibleLBs) { + klog.V(4).Infof("getMostEligibleLBName: choose %s as it is eligible and being used", currentLBName) + return currentLBName + } + + // 2. If the LB is eligible and not created yet, choose it because it has the fewest rules. + for _, eligibleLB := range eligibleLBs { + var found bool + if existingLBs != nil { + for _, existingLB := range *existingLBs { + if strings.EqualFold(pointer.StringDeref(existingLB.Name, ""), eligibleLB) { + found = true + break + } + } + } + if !found { + klog.V(4).Infof("getMostEligibleLBName: choose %s as it is eligible and not existing", eligibleLB) + return eligibleLB + } + } + + // 3. If all eligible LBs are existing, choose the one with the fewest rules. + var expectedLBName string + ruleCount := 301 + if existingLBs != nil { + for _, existingLB := range *existingLBs { + if StringInSlice(pointer.StringDeref(existingLB.Name, ""), eligibleLBs) { + if existingLB.LoadBalancerPropertiesFormat != nil && + existingLB.LoadBalancingRules != nil { + if len(*existingLB.LoadBalancingRules) < ruleCount { + ruleCount = len(*existingLB.LoadBalancingRules) + expectedLBName = pointer.StringDeref(existingLB.Name, "") + } + } + } + } + } + + if expectedLBName != "" { + klog.V(4).Infof("getMostEligibleLBName: choose %s with fewest %d rules", expectedLBName, ruleCount) + } + + return expectedLBName +} + +func (az *Cloud) getServiceCurrentLoadBalancerName(service *v1.Service) string { + for _, multiSLBConfig := range az.MultipleStandardLoadBalancerConfigurations { + if isLoadBalancerInUseByService(service, multiSLBConfig) { + return multiSLBConfig.Name + } + } + return "" +} + +// getEligibleLoadBalancers filter out the eligible load balancers for the service. +// It follows four kinds of constraints: +// 1. Service annotation `service.beta.kubernetes.io/azure-load-balancer-configurations: lb1,lb2`. +// 2. AllowServicePlacement flag. Default to true, if set to false, the new services will not be put onto the LB. +// But the existing services that is using the LB will not be affected. +// 3. ServiceLabelSelector. The service will be put onto the LB only if the service has the labels specified in the selector. +// If there is no ServiceLabel selector on the LB, all services can be valid. +// 4. ServiceNamespaceSelector. The service will be put onto the LB only if the service is in the namespaces specified in the selector. +// If there is no ServiceNamespace selector on the LB, all services can be valid. +func (az *Cloud) getEligibleLoadBalancers(service *v1.Service) ([]string, error) { + var ( + eligibleLBs []MultipleStandardLoadBalancerConfiguration + eligibleLBNames []string + lbSelectedByAnnotation []string + lbFailedLabelSelector []string + lbFailedNamespaceSelector []string + lbFailedPlacementFlag []string + ) + + // 1. Service selects LBs defined in the annotation. + // If there is no annotation given, it selects all LBs. + lbsFromAnnotation := consts.GetLoadBalancerConfigurationsNames(service) + if len(lbsFromAnnotation) > 0 { + lbNamesSet := sets.New[string](lbsFromAnnotation...) + for _, multiSLBConfig := range az.MultipleStandardLoadBalancerConfigurations { + if lbNamesSet.Has(strings.ToLower(multiSLBConfig.Name)) { + klog.V(4).Infof("getEligibleLoadBalancers: service %q selects load balancer %q by annotation", service.Name, multiSLBConfig.Name) + eligibleLBs = append(eligibleLBs, multiSLBConfig) + lbSelectedByAnnotation = append(lbSelectedByAnnotation, multiSLBConfig.Name) + } + } + } else { + klog.V(4).Infof("getEligibleLoadBalancers: service %q does not select any load balancer by annotation, all load balancers are eligible", service.Name) + eligibleLBs = append(eligibleLBs, az.MultipleStandardLoadBalancerConfigurations...) + for _, eligibleLB := range eligibleLBs { + lbSelectedByAnnotation = append(lbSelectedByAnnotation, eligibleLB.Name) + } + } + + for i := len(eligibleLBs) - 1; i >= 0; i-- { + eligibleLB := eligibleLBs[i] + + // 2. If the LB does not allow service placement, it is not eligible, + // unless the service is already using the LB. + if !pointer.BoolDeref(eligibleLB.AllowServicePlacement, true) { + if isLoadBalancerInUseByService(service, eligibleLB) { + klog.V(4).Infof("getEligibleLoadBalancers: although load balancer %q has AllowServicePlacement=false, service %q is allowed to be placed on load balancer %q because it is using the load balancer", eligibleLB.Name, service.Name, eligibleLB.Name) + } else { + klog.V(4).Infof("getEligibleLoadBalancers: service %q is not allowed to be placed on load balancer %q", service.Name, eligibleLB.Name) + eligibleLBs = append(eligibleLBs[:i], eligibleLBs[i+1:]...) + lbFailedPlacementFlag = append(lbFailedPlacementFlag, eligibleLB.Name) + continue + } + } + + // 3. Check the service label selector. The service can be migrated from one LB to another LB + // if the service does not match the selector of the LB that it is currently using. + if eligibleLB.ServiceLabelSelector != nil { + serviceLabelSelector, err := metav1.LabelSelectorAsSelector(eligibleLB.ServiceLabelSelector) + if err != nil { + klog.Errorf("Failed to parse label selector %q for load balancer %q: %s", eligibleLB.ServiceLabelSelector.String(), eligibleLB.Name, err.Error()) + return []string{}, err + } + if !serviceLabelSelector.Matches(labels.Set(service.Labels)) { + klog.V(2).Infof("getEligibleLoadBalancers: service %q does not match label selector %q for load balancer %q", service.Name, eligibleLB.ServiceLabelSelector.String(), eligibleLB.Name) + eligibleLBs = append(eligibleLBs[:i], eligibleLBs[i+1:]...) + lbFailedLabelSelector = append(lbFailedLabelSelector, eligibleLB.Name) + continue + } + } + + // 4. Check the service namespace selector. The service can be migrated from one LB to another LB + // if the service does not match the selector of the LB that it is currently using. + if eligibleLB.ServiceNamespaceSelector != nil { + serviceNamespaceSelector, err := metav1.LabelSelectorAsSelector(eligibleLB.ServiceNamespaceSelector) + if err != nil { + klog.Errorf("Failed to parse namespace selector %q for load balancer %q: %s", eligibleLB.ServiceNamespaceSelector.String(), eligibleLB.Name, err.Error()) + return []string{}, err + } + ns, err := az.KubeClient.CoreV1().Namespaces().Get(context.Background(), service.Namespace, metav1.GetOptions{}) + if err != nil { + klog.Errorf("Failed to get namespace %q for load balancer %q: %s", service.Namespace, eligibleLB.Name, err.Error()) + return []string{}, err + } + if !serviceNamespaceSelector.Matches(labels.Set(ns.Labels)) { + klog.V(2).Infof("getEligibleLoadBalancers: namespace %q does not match namespace selector %q for load balancer %q", service.Namespace, eligibleLB.ServiceNamespaceSelector.String(), eligibleLB.Name) + eligibleLBs = append(eligibleLBs[:i], eligibleLBs[i+1:]...) + lbFailedNamespaceSelector = append(lbFailedNamespaceSelector, eligibleLB.Name) + continue + } + } + } + + serviceName := getServiceName(service) + if len(eligibleLBs) == 0 { + return []string{}, fmt.Errorf( + "service %q selects %d load balancers (%s), but %d of them (%s) have AllowServicePlacement set to false and the service is not using any of them, %d of them (%s) do not match the service label selector, and %d of them (%s) do not match the service namespace selector", + serviceName, + len(lbSelectedByAnnotation), + strings.Join(lbSelectedByAnnotation, ", "), + len(lbFailedPlacementFlag), + strings.Join(lbFailedPlacementFlag, ", "), + len(lbFailedLabelSelector), + strings.Join(lbFailedLabelSelector, ", "), + len(lbFailedNamespaceSelector), + strings.Join(lbFailedNamespaceSelector, ", "), + ) + } + + for _, eligibleLB := range eligibleLBs { + eligibleLBNames = append(eligibleLBNames, eligibleLB.Name) + } + + return eligibleLBNames, nil +} + +func isLoadBalancerInUseByService(service *v1.Service, lbConfig MultipleStandardLoadBalancerConfiguration) bool { + serviceName := getServiceName(service) + if lbConfig.ActiveServices != nil { + return lbConfig.ActiveServices.Has(serviceName) + } + return false } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer_backendpool.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer_backendpool.go index 650e102b5..00fb886c5 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer_backendpool.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer_backendpool.go @@ -33,6 +33,7 @@ import ( "k8s.io/utils/pointer" "sigs.k8s.io/cloud-provider-azure/pkg/cache" + "sigs.k8s.io/cloud-provider-azure/pkg/consts" "sigs.k8s.io/cloud-provider-azure/pkg/metrics" ) @@ -70,11 +71,11 @@ func (bc *backendPoolTypeNodeIPConfig) EnsureHostsInPool(service *v1.Service, no } func isLBBackendPoolsExisting(lbBackendPoolNames map[bool]string, bpName *string) (found, isIPv6 bool) { - if strings.EqualFold(pointer.StringDeref(bpName, ""), lbBackendPoolNames[false]) { + if strings.EqualFold(pointer.StringDeref(bpName, ""), lbBackendPoolNames[consts.IPVersionIPv4]) { isIPv6 = false found = true } - if strings.EqualFold(pointer.StringDeref(bpName, ""), lbBackendPoolNames[true]) { + if strings.EqualFold(pointer.StringDeref(bpName, ""), lbBackendPoolNames[consts.IPVersionIPv6]) { isIPv6 = true found = true } @@ -135,10 +136,10 @@ func (bc *backendPoolTypeNodeIPConfig) CleanupVMSetFromBackendPoolByCondition(sl }) } if v4Enabled { - findBackendpoolToBeDeleted(false) + findBackendpoolToBeDeleted(consts.IPVersionIPv4) } if v6Enabled { - findBackendpoolToBeDeleted(true) + findBackendpoolToBeDeleted(consts.IPVersionIPv6) } // decouple the backendPool from the node shouldRefreshLB, err := bc.VMSet.EnsureBackendPoolDeleted(service, lbBackendPoolIDsSlice, vmSetName, &backendpoolToBeDeleted, true) @@ -391,8 +392,10 @@ func (bi *backendPoolTypeNodeIP) EnsureHostsInPool(service *v1.Service, nodes [] } vnetID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/virtualNetworks/%s", bi.SubscriptionID, vnetResourceGroup, bi.VnetName) - changed := false - numOfAdd := 0 + var ( + changed bool + numOfAdd, numOfDelete int + ) lbBackendPoolName := getBackendPoolName(clusterName, isIPv6) if strings.EqualFold(pointer.StringDeref(backendPool.Name, ""), lbBackendPoolName) && backendPool.BackendAddressPoolPropertiesFormat != nil { @@ -414,6 +417,7 @@ func (bi *backendPoolTypeNodeIP) EnsureHostsInPool(service *v1.Service, nodes [] } } + nodePrivateIPsSet := sets.New[string]() for _, node := range nodes { if isControlPlaneNode(node) { klog.V(4).Infof("bi.EnsureHostsInPool: skipping control plane node %s", node.Name) @@ -421,6 +425,7 @@ func (bi *backendPoolTypeNodeIP) EnsureHostsInPool(service *v1.Service, nodes [] } privateIP := getNodePrivateIPAddress(node, isIPv6) + nodePrivateIPsSet.Insert(privateIP) if !existingIPs.Has(privateIP) { name := node.Name if utilnet.IsIPv6String(privateIP) { @@ -438,9 +443,21 @@ func (bi *backendPoolTypeNodeIP) EnsureHostsInPool(service *v1.Service, nodes [] changed = true } } + + var nodeIPsToBeDeleted []string + for _, loadBalancerBackendAddress := range *backendPool.LoadBalancerBackendAddresses { + ip := pointer.StringDeref(loadBalancerBackendAddress.IPAddress, "") + if !nodePrivateIPsSet.Has(ip) { + klog.V(4).Infof("bi.EnsureHostsInPool: removing IP %s", ip) + nodeIPsToBeDeleted = append(nodeIPsToBeDeleted, ip) + changed = true + numOfDelete++ + } + } + removeNodeIPAddressesFromBackendPool(backendPool, nodeIPsToBeDeleted, false) } if changed { - klog.V(2).Infof("bi.EnsureHostsInPool: updating backend pool %s of load balancer %s to add %d nodes", lbBackendPoolName, lbName, numOfAdd) + klog.V(2).Infof("bi.EnsureHostsInPool: updating backend pool %s of load balancer %s to add %d nodes and remove %d nodes", lbBackendPoolName, lbName, numOfAdd, numOfDelete) if err := bi.CreateOrUpdateLBBackendPool(lbName, backendPool); err != nil { return fmt.Errorf("bi.EnsureHostsInPool: failed to update backend pool %s: %w", lbBackendPoolName, err) } @@ -723,6 +740,7 @@ func newBackendPool(lb *network.LoadBalancer, isBackendPoolPreConfigured bool, p BackendAddressPoolPropertiesFormat: &network.BackendAddressPoolPropertiesFormat{}, }) + // Always returns false return isBackendPoolPreConfigured } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer_repo.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer_repo.go new file mode 100644 index 000000000..b00cee3c6 --- /dev/null +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer_repo.go @@ -0,0 +1,389 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package provider + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2022-07-01/network" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/klog/v2" + "k8s.io/utils/pointer" + + azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache" + "sigs.k8s.io/cloud-provider-azure/pkg/consts" + "sigs.k8s.io/cloud-provider-azure/pkg/retry" +) + +// DeleteLB invokes az.LoadBalancerClient.Delete with exponential backoff retry +func (az *Cloud) DeleteLB(service *v1.Service, lbName string) *retry.Error { + ctx, cancel := getContextWithCancel() + defer cancel() + + rgName := az.getLoadBalancerResourceGroup() + rerr := az.LoadBalancerClient.Delete(ctx, rgName, lbName) + if rerr == nil { + // Invalidate the cache right after updating + _ = az.lbCache.Delete(lbName) + return nil + } + + klog.Errorf("LoadBalancerClient.Delete(%s) failed: %s", lbName, rerr.Error().Error()) + az.Event(service, v1.EventTypeWarning, "DeleteLoadBalancer", rerr.Error().Error()) + return rerr +} + +// ListLB invokes az.LoadBalancerClient.List with exponential backoff retry +func (az *Cloud) ListLB(service *v1.Service) ([]network.LoadBalancer, error) { + ctx, cancel := getContextWithCancel() + defer cancel() + + rgName := az.getLoadBalancerResourceGroup() + allLBs, rerr := az.LoadBalancerClient.List(ctx, rgName) + if rerr != nil { + if rerr.IsNotFound() { + return nil, nil + } + az.Event(service, v1.EventTypeWarning, "ListLoadBalancers", rerr.Error().Error()) + klog.Errorf("LoadBalancerClient.List(%v) failure with err=%v", rgName, rerr) + return nil, rerr.Error() + } + klog.V(2).Infof("LoadBalancerClient.List(%v) success", rgName) + return allLBs, nil +} + +// ListManagedLBs invokes az.LoadBalancerClient.List and filter out +// those that are not managed by cloud provider azure or not associated to a managed VMSet. +func (az *Cloud) ListManagedLBs(service *v1.Service, nodes []*v1.Node, clusterName string) ([]network.LoadBalancer, error) { + allLBs, err := az.ListLB(service) + if err != nil { + return nil, err + } + + if allLBs == nil { + klog.Warningf("ListManagedLBs: no LBs found") + return nil, nil + } + + managedLBNames := sets.New[string](strings.ToLower(clusterName)) + managedLBs := make([]network.LoadBalancer, 0) + if strings.EqualFold(az.LoadBalancerSku, consts.LoadBalancerSkuBasic) { + // return early if wantLb=false + if nodes == nil { + klog.V(4).Infof("ListManagedLBs: return all LBs in the resource group %s, including unmanaged LBs", az.getLoadBalancerResourceGroup()) + return allLBs, nil + } + + agentPoolVMSetNamesMap := make(map[string]bool) + agentPoolVMSetNames, err := az.VMSet.GetAgentPoolVMSetNames(nodes) + if err != nil { + return nil, fmt.Errorf("ListManagedLBs: failed to get agent pool vmSet names: %w", err) + } + + if agentPoolVMSetNames != nil && len(*agentPoolVMSetNames) > 0 { + for _, vmSetName := range *agentPoolVMSetNames { + klog.V(6).Infof("ListManagedLBs: found agent pool vmSet name %s", vmSetName) + agentPoolVMSetNamesMap[strings.ToLower(vmSetName)] = true + } + } + + for agentPoolVMSetName := range agentPoolVMSetNamesMap { + managedLBNames.Insert(az.mapVMSetNameToLoadBalancerName(agentPoolVMSetName, clusterName)) + } + } + + if az.useMultipleStandardLoadBalancers() { + for _, multiSLBConfig := range az.MultipleStandardLoadBalancerConfigurations { + managedLBNames.Insert(multiSLBConfig.Name, fmt.Sprintf("%s%s", multiSLBConfig.Name, consts.InternalLoadBalancerNameSuffix)) + } + } + + for _, lb := range allLBs { + if managedLBNames.Has(strings.ToLower(strings.TrimSuffix(pointer.StringDeref(lb.Name, ""), consts.InternalLoadBalancerNameSuffix))) { + managedLBs = append(managedLBs, lb) + klog.V(4).Infof("ListManagedLBs: found managed LB %s", pointer.StringDeref(lb.Name, "")) + } + } + + return managedLBs, nil +} + +// CreateOrUpdateLB invokes az.LoadBalancerClient.CreateOrUpdate with exponential backoff retry +func (az *Cloud) CreateOrUpdateLB(service *v1.Service, lb network.LoadBalancer) error { + ctx, cancel := getContextWithCancel() + defer cancel() + + lb = cleanupSubnetInFrontendIPConfigurations(&lb) + + rgName := az.getLoadBalancerResourceGroup() + rerr := az.LoadBalancerClient.CreateOrUpdate(ctx, rgName, pointer.StringDeref(lb.Name, ""), lb, pointer.StringDeref(lb.Etag, "")) + klog.V(10).Infof("LoadBalancerClient.CreateOrUpdate(%s): end", *lb.Name) + if rerr == nil { + // Invalidate the cache right after updating + _ = az.lbCache.Delete(*lb.Name) + return nil + } + + lbJSON, _ := json.Marshal(lb) + klog.Warningf("LoadBalancerClient.CreateOrUpdate(%s) failed: %v, LoadBalancer request: %s", pointer.StringDeref(lb.Name, ""), rerr.Error(), string(lbJSON)) + + // Invalidate the cache because ETAG precondition mismatch. + if rerr.HTTPStatusCode == http.StatusPreconditionFailed { + klog.V(3).Infof("LoadBalancer cache for %s is cleanup because of http.StatusPreconditionFailed", pointer.StringDeref(lb.Name, "")) + _ = az.lbCache.Delete(*lb.Name) + } + + retryErrorMessage := rerr.Error().Error() + // Invalidate the cache because another new operation has canceled the current request. + if strings.Contains(strings.ToLower(retryErrorMessage), consts.OperationCanceledErrorMessage) { + klog.V(3).Infof("LoadBalancer cache for %s is cleanup because CreateOrUpdate is canceled by another operation", pointer.StringDeref(lb.Name, "")) + _ = az.lbCache.Delete(*lb.Name) + } + + // The LB update may fail because the referenced PIP is not in the Succeeded provisioning state + if strings.Contains(strings.ToLower(retryErrorMessage), strings.ToLower(consts.ReferencedResourceNotProvisionedMessageCode)) { + matches := pipErrorMessageRE.FindStringSubmatch(retryErrorMessage) + if len(matches) != 3 { + klog.Errorf("Failed to parse the retry error message %s", retryErrorMessage) + return rerr.Error() + } + pipRG, pipName := matches[1], matches[2] + klog.V(3).Infof("The public IP %s referenced by load balancer %s is not in Succeeded provisioning state, will try to update it", pipName, pointer.StringDeref(lb.Name, "")) + pip, _, err := az.getPublicIPAddress(pipRG, pipName, azcache.CacheReadTypeDefault) + if err != nil { + klog.Errorf("Failed to get the public IP %s in resource group %s: %v", pipName, pipRG, err) + return rerr.Error() + } + // Perform a dummy update to fix the provisioning state + err = az.CreateOrUpdatePIP(service, pipRG, pip) + if err != nil { + klog.Errorf("Failed to update the public IP %s in resource group %s: %v", pipName, pipRG, err) + return rerr.Error() + } + // Invalidate the LB cache, return the error, and the controller manager + // would retry the LB update in the next reconcile loop + _ = az.lbCache.Delete(*lb.Name) + } + + return rerr.Error() +} + +func (az *Cloud) CreateOrUpdateLBBackendPool(lbName string, backendPool network.BackendAddressPool) error { + ctx, cancel := getContextWithCancel() + defer cancel() + + klog.V(4).Infof("CreateOrUpdateLBBackendPool: updating backend pool %s in LB %s", pointer.StringDeref(backendPool.Name, ""), lbName) + rerr := az.LoadBalancerClient.CreateOrUpdateBackendPools(ctx, az.getLoadBalancerResourceGroup(), lbName, pointer.StringDeref(backendPool.Name, ""), backendPool, pointer.StringDeref(backendPool.Etag, "")) + if rerr == nil { + // Invalidate the cache right after updating + _ = az.lbCache.Delete(lbName) + return nil + } + + // Invalidate the cache because ETAG precondition mismatch. + if rerr.HTTPStatusCode == http.StatusPreconditionFailed { + klog.V(3).Infof("LoadBalancer cache for %s is cleanup because of http.StatusPreconditionFailed", lbName) + _ = az.lbCache.Delete(lbName) + } + + retryErrorMessage := rerr.Error().Error() + // Invalidate the cache because another new operation has canceled the current request. + if strings.Contains(strings.ToLower(retryErrorMessage), consts.OperationCanceledErrorMessage) { + klog.V(3).Infof("LoadBalancer cache for %s is cleanup because CreateOrUpdate is canceled by another operation", lbName) + _ = az.lbCache.Delete(lbName) + } + + return rerr.Error() +} + +func (az *Cloud) DeleteLBBackendPool(lbName, backendPoolName string) error { + ctx, cancel := getContextWithCancel() + defer cancel() + + klog.V(4).Infof("DeleteLBBackendPool: deleting backend pool %s in LB %s", backendPoolName, lbName) + rerr := az.LoadBalancerClient.DeleteLBBackendPool(ctx, az.getLoadBalancerResourceGroup(), lbName, backendPoolName) + if rerr == nil { + // Invalidate the cache right after updating + _ = az.lbCache.Delete(lbName) + return nil + } + + // Invalidate the cache because ETAG precondition mismatch. + if rerr.HTTPStatusCode == http.StatusPreconditionFailed { + klog.V(3).Infof("LoadBalancer cache for %s is cleanup because of http.StatusPreconditionFailed", lbName) + _ = az.lbCache.Delete(lbName) + } + + retryErrorMessage := rerr.Error().Error() + // Invalidate the cache because another new operation has canceled the current request. + if strings.Contains(strings.ToLower(retryErrorMessage), consts.OperationCanceledErrorMessage) { + klog.V(3).Infof("LoadBalancer cache for %s is cleanup because CreateOrUpdate is canceled by another operation", lbName) + _ = az.lbCache.Delete(lbName) + } + + return rerr.Error() +} + +func cleanupSubnetInFrontendIPConfigurations(lb *network.LoadBalancer) network.LoadBalancer { + if lb.LoadBalancerPropertiesFormat == nil || lb.FrontendIPConfigurations == nil { + return *lb + } + + frontendIPConfigurations := *lb.FrontendIPConfigurations + for i := range frontendIPConfigurations { + config := frontendIPConfigurations[i] + if config.FrontendIPConfigurationPropertiesFormat != nil && + config.Subnet != nil && + config.Subnet.ID != nil { + subnet := network.Subnet{ + ID: config.Subnet.ID, + } + if config.Subnet.Name != nil { + subnet.Name = config.FrontendIPConfigurationPropertiesFormat.Subnet.Name + } + config.FrontendIPConfigurationPropertiesFormat.Subnet = &subnet + frontendIPConfigurations[i] = config + continue + } + } + + lb.FrontendIPConfigurations = &frontendIPConfigurations + return *lb +} + +// MigrateToIPBasedBackendPoolAndWaitForCompletion use the migration API to migrate from +// NIC-based to IP-based LB backend pools. It also makes sure the number of IP addresses +// in the backend pools is expected. +func (az *Cloud) MigrateToIPBasedBackendPoolAndWaitForCompletion( + lbName string, backendPoolNames []string, nicsCountMap map[string]int, +) error { + if rerr := az.LoadBalancerClient.MigrateToIPBasedBackendPool(context.Background(), az.ResourceGroup, lbName, backendPoolNames); rerr != nil { + backendPoolNamesStr := strings.Join(backendPoolNames, ",") + klog.Errorf("MigrateToIPBasedBackendPoolAndWaitForCompletion: Failed to migrate to IP based backend pool for lb %s, backend pool %s: %s", lbName, backendPoolNamesStr, rerr.Error().Error()) + return rerr.Error() + } + + succeeded := make(map[string]bool) + for bpName := range nicsCountMap { + succeeded[bpName] = false + } + + err := wait.PollImmediate(5*time.Second, 10*time.Minute, func() (done bool, err error) { + for bpName, nicsCount := range nicsCountMap { + if succeeded[bpName] { + continue + } + + bp, rerr := az.LoadBalancerClient.GetLBBackendPool(context.Background(), az.ResourceGroup, lbName, bpName, "") + if rerr != nil { + klog.Errorf("MigrateToIPBasedBackendPoolAndWaitForCompletion: Failed to get backend pool %s for lb %s: %s", bpName, lbName, rerr.Error().Error()) + return false, rerr.Error() + } + + if countIPsOnBackendPool(bp) != nicsCount { + klog.V(4).Infof("MigrateToIPBasedBackendPoolAndWaitForCompletion: Expected IPs %s, current IPs %d, will retry in 5s", nicsCount, countIPsOnBackendPool(bp)) + return false, nil + } + succeeded[bpName] = true + } + return true, nil + }) + + if err != nil { + if errors.Is(err, wait.ErrWaitTimeout) { + klog.Warningf("MigrateToIPBasedBackendPoolAndWaitForCompletion: Timeout waiting for migration to IP based backend pool for lb %s, backend pool %s", lbName, strings.Join(backendPoolNames, ",")) + return nil + } + + klog.Errorf("MigrateToIPBasedBackendPoolAndWaitForCompletion: Failed to wait for migration to IP based backend pool for lb %s, backend pool %s: %s", lbName, strings.Join(backendPoolNames, ","), err.Error()) + return err + } + + return nil +} + +func (az *Cloud) newLBCache() (azcache.Resource, error) { + getter := func(key string) (interface{}, error) { + ctx, cancel := getContextWithCancel() + defer cancel() + + lb, err := az.LoadBalancerClient.Get(ctx, az.getLoadBalancerResourceGroup(), key, "") + exists, rerr := checkResourceExistsFromError(err) + if rerr != nil { + return nil, rerr.Error() + } + + if !exists { + klog.V(2).Infof("Load balancer %q not found", key) + return nil, nil + } + + return &lb, nil + } + + if az.LoadBalancerCacheTTLInSeconds == 0 { + az.LoadBalancerCacheTTLInSeconds = loadBalancerCacheTTLDefaultInSeconds + } + return azcache.NewTimedCache(time.Duration(az.LoadBalancerCacheTTLInSeconds)*time.Second, getter, az.Config.DisableAPICallCache) +} + +func (az *Cloud) getAzureLoadBalancer(name string, crt azcache.AzureCacheReadType) (lb *network.LoadBalancer, exists bool, err error) { + cachedLB, err := az.lbCache.GetWithDeepCopy(name, crt) + if err != nil { + return lb, false, err + } + + if cachedLB == nil { + return lb, false, nil + } + + return cachedLB.(*network.LoadBalancer), true, nil +} + +// isBackendPoolOnSameLB checks whether newBackendPoolID is on the same load balancer as existingBackendPools. +// Since both public and internal LBs are supported, lbName and lbName-internal are treated as same. +// If not same, the lbName for existingBackendPools would also be returned. +func isBackendPoolOnSameLB(newBackendPoolID string, existingBackendPools []string) (bool, string, error) { + matches := backendPoolIDRE.FindStringSubmatch(newBackendPoolID) + if len(matches) != 2 { + return false, "", fmt.Errorf("new backendPoolID %q is in wrong format", newBackendPoolID) + } + + newLBName := matches[1] + newLBNameTrimmed := strings.TrimSuffix(newLBName, consts.InternalLoadBalancerNameSuffix) + for _, backendPool := range existingBackendPools { + matches := backendPoolIDRE.FindStringSubmatch(backendPool) + if len(matches) != 2 { + return false, "", fmt.Errorf("existing backendPoolID %q is in wrong format", backendPool) + } + + lbName := matches[1] + if !strings.EqualFold(strings.TrimSuffix(lbName, consts.InternalLoadBalancerNameSuffix), newLBNameTrimmed) { + return false, lbName, nil + } + } + + return true, "", nil +} diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_privatelinkservice.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_privatelinkservice.go index efd3772a5..fc65530ab 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_privatelinkservice.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_privatelinkservice.go @@ -44,24 +44,27 @@ func (az *Cloud) reconcilePrivateLinkService( isinternal := requiresInternalLoadBalancer(service) pipRG := az.getPublicIPAddressResourceGroup(service) _, _, fipIPVersion := az.serviceOwnsFrontendIP(*fipConfig, service) + serviceName := getServiceName(service) var isIPv6 bool var err error if fipIPVersion != "" { isIPv6 = fipIPVersion == network.IPv6 } else { if isIPv6, err = az.isFIPIPv6(service, pipRG, fipConfig); err != nil { - klog.Errorf("reconcilePrivateLinkService for service(%s): failed to get FIP IP family: %v", service, err) + klog.Errorf("reconcilePrivateLinkService for service(%s): failed to get FIP IP family: %v", serviceName, err) return err } } - + createPLS := wantPLS && serviceRequiresPLS(service) + isDualStack := isServiceDualStack(service) if isIPv6 { - klog.V(2).Infof("IPv6 is not supported for private link service, skip reconcilePrivateLinkService for service(%s)", service) - return nil + if isDualStack || !createPLS { + klog.V(2).Infof("IPv6 is not supported for private link service, skip reconcilePrivateLinkService for service(%s)", serviceName) + return nil + } + return fmt.Errorf("IPv6 is not supported for private link service") } - createPLS := wantPLS && serviceRequiresPLS(service) - serviceName := getServiceName(service) fipConfigID := fipConfig.ID klog.V(2).Infof("reconcilePrivateLinkService for service(%s) - LB fipConfigID(%s) - wantPLS(%t) - createPLS(%t)", serviceName, pointer.StringDeref(fipConfig.Name, ""), wantPLS, createPLS) diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_privatelinkservice_repo.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_privatelinkservice_repo.go new file mode 100644 index 000000000..e6bebeeca --- /dev/null +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_privatelinkservice_repo.go @@ -0,0 +1,134 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package provider + +import ( + "encoding/json" + "net/http" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2022-07-01/network" + v1 "k8s.io/api/core/v1" + "k8s.io/klog/v2" + "k8s.io/utils/pointer" + + azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache" + "sigs.k8s.io/cloud-provider-azure/pkg/consts" + "sigs.k8s.io/cloud-provider-azure/pkg/retry" +) + +func (az *Cloud) CreateOrUpdatePLS(service *v1.Service, pls network.PrivateLinkService) error { + ctx, cancel := getContextWithCancel() + defer cancel() + + rerr := az.PrivateLinkServiceClient.CreateOrUpdate(ctx, az.PrivateLinkServiceResourceGroup, pointer.StringDeref(pls.Name, ""), pls, pointer.StringDeref(pls.Etag, "")) + if rerr == nil { + // Invalidate the cache right after updating + _ = az.plsCache.Delete(pointer.StringDeref((*pls.LoadBalancerFrontendIPConfigurations)[0].ID, "")) + return nil + } + + rtJSON, _ := json.Marshal(pls) + klog.Warningf("PrivateLinkServiceClient.CreateOrUpdate(%s) failed: %v, PrivateLinkService request: %s", pointer.StringDeref(pls.Name, ""), rerr.Error(), string(rtJSON)) + + // Invalidate the cache because etag mismatch. + if rerr.HTTPStatusCode == http.StatusPreconditionFailed { + klog.V(3).Infof("Private link service cache for %s is cleanup because of http.StatusPreconditionFailed", pointer.StringDeref(pls.Name, "")) + _ = az.plsCache.Delete(pointer.StringDeref((*pls.LoadBalancerFrontendIPConfigurations)[0].ID, "")) + } + // Invalidate the cache because another new operation has canceled the current request. + if strings.Contains(strings.ToLower(rerr.Error().Error()), consts.OperationCanceledErrorMessage) { + klog.V(3).Infof("Private link service for %s is cleanup because CreateOrUpdatePrivateLinkService is canceled by another operation", pointer.StringDeref(pls.Name, "")) + _ = az.plsCache.Delete(pointer.StringDeref((*pls.LoadBalancerFrontendIPConfigurations)[0].ID, "")) + } + klog.Errorf("PrivateLinkServiceClient.CreateOrUpdate(%s) failed: %v", pointer.StringDeref(pls.Name, ""), rerr.Error()) + return rerr.Error() +} + +// DeletePLS invokes az.PrivateLinkServiceClient.Delete with exponential backoff retry +func (az *Cloud) DeletePLS(service *v1.Service, plsName string, plsLBFrontendID string) *retry.Error { + ctx, cancel := getContextWithCancel() + defer cancel() + + rerr := az.PrivateLinkServiceClient.Delete(ctx, az.PrivateLinkServiceResourceGroup, plsName) + if rerr == nil { + // Invalidate the cache right after deleting + _ = az.plsCache.Delete(plsLBFrontendID) + return nil + } + + klog.Errorf("PrivateLinkServiceClient.DeletePLS(%s) failed: %s", plsName, rerr.Error().Error()) + az.Event(service, v1.EventTypeWarning, "DeletePrivateLinkService", rerr.Error().Error()) + return rerr +} + +// DeletePEConn invokes az.PrivateLinkServiceClient.DeletePEConnection with exponential backoff retry +func (az *Cloud) DeletePEConn(service *v1.Service, plsName string, peConnName string) *retry.Error { + ctx, cancel := getContextWithCancel() + defer cancel() + + rerr := az.PrivateLinkServiceClient.DeletePEConnection(ctx, az.PrivateLinkServiceResourceGroup, plsName, peConnName) + if rerr == nil { + return nil + } + + klog.Errorf("PrivateLinkServiceClient.DeletePEConnection(%s-%s) failed: %s", plsName, peConnName, rerr.Error().Error()) + az.Event(service, v1.EventTypeWarning, "DeletePrivateEndpointConnection", rerr.Error().Error()) + return rerr +} + +func (az *Cloud) newPLSCache() (azcache.Resource, error) { + // for PLS cache, key is LBFrontendIPConfiguration ID + getter := func(key string) (interface{}, error) { + ctx, cancel := getContextWithCancel() + defer cancel() + plsList, err := az.PrivateLinkServiceClient.List(ctx, az.PrivateLinkServiceResourceGroup) + exists, rerr := checkResourceExistsFromError(err) + if rerr != nil { + return nil, rerr.Error() + } + + if exists { + for i := range plsList { + pls := plsList[i] + if pls.PrivateLinkServiceProperties == nil { + continue + } + fipConfigs := pls.PrivateLinkServiceProperties.LoadBalancerFrontendIPConfigurations + if fipConfigs == nil { + continue + } + for _, fipConfig := range *fipConfigs { + if strings.EqualFold(*fipConfig.ID, key) { + return &pls, nil + } + } + + } + } + + klog.V(2).Infof("No privateLinkService found for frontendIPConfig %q", key) + plsNotExistID := consts.PrivateLinkServiceNotExistID + return &network.PrivateLinkService{ID: &plsNotExistID}, nil + } + + if az.PlsCacheTTLInSeconds == 0 { + az.PlsCacheTTLInSeconds = plsCacheTTLDefaultInSeconds + } + return azcache.NewTimedCache(time.Duration(az.PlsCacheTTLInSeconds)*time.Second, getter, az.Config.DisableAPICallCache) +} diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_publicip_repo.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_publicip_repo.go new file mode 100644 index 000000000..536d48abd --- /dev/null +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_publicip_repo.go @@ -0,0 +1,154 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package provider + +import ( + "encoding/json" + "net/http" + "strings" + "sync" + "time" + + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2022-07-01/network" + v1 "k8s.io/api/core/v1" + "k8s.io/klog/v2" + "k8s.io/utils/pointer" + + azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache" + "sigs.k8s.io/cloud-provider-azure/pkg/consts" + "sigs.k8s.io/cloud-provider-azure/pkg/util/deepcopy" +) + +// CreateOrUpdatePIP invokes az.PublicIPAddressesClient.CreateOrUpdate with exponential backoff retry +func (az *Cloud) CreateOrUpdatePIP(service *v1.Service, pipResourceGroup string, pip network.PublicIPAddress) error { + ctx, cancel := getContextWithCancel() + defer cancel() + + rerr := az.PublicIPAddressesClient.CreateOrUpdate(ctx, pipResourceGroup, pointer.StringDeref(pip.Name, ""), pip) + klog.V(10).Infof("PublicIPAddressesClient.CreateOrUpdate(%s, %s): end", pipResourceGroup, pointer.StringDeref(pip.Name, "")) + if rerr == nil { + // Invalidate the cache right after updating + _ = az.pipCache.Delete(pipResourceGroup) + return nil + } + + pipJSON, _ := json.Marshal(pip) + klog.Warningf("PublicIPAddressesClient.CreateOrUpdate(%s, %s) failed: %s, PublicIP request: %s", pipResourceGroup, pointer.StringDeref(pip.Name, ""), rerr.Error().Error(), string(pipJSON)) + az.Event(service, v1.EventTypeWarning, "CreateOrUpdatePublicIPAddress", rerr.Error().Error()) + + // Invalidate the cache because ETAG precondition mismatch. + if rerr.HTTPStatusCode == http.StatusPreconditionFailed { + klog.V(3).Infof("PublicIP cache for (%s, %s) is cleanup because of http.StatusPreconditionFailed", pipResourceGroup, pointer.StringDeref(pip.Name, "")) + _ = az.pipCache.Delete(pipResourceGroup) + } + + retryErrorMessage := rerr.Error().Error() + // Invalidate the cache because another new operation has canceled the current request. + if strings.Contains(strings.ToLower(retryErrorMessage), consts.OperationCanceledErrorMessage) { + klog.V(3).Infof("PublicIP cache for (%s, %s) is cleanup because CreateOrUpdate is canceled by another operation", pipResourceGroup, pointer.StringDeref(pip.Name, "")) + _ = az.pipCache.Delete(pipResourceGroup) + } + + return rerr.Error() +} + +// DeletePublicIP invokes az.PublicIPAddressesClient.Delete with exponential backoff retry +func (az *Cloud) DeletePublicIP(service *v1.Service, pipResourceGroup string, pipName string) error { + ctx, cancel := getContextWithCancel() + defer cancel() + + rerr := az.PublicIPAddressesClient.Delete(ctx, pipResourceGroup, pipName) + if rerr != nil { + klog.Errorf("PublicIPAddressesClient.Delete(%s) failed: %s", pipName, rerr.Error().Error()) + az.Event(service, v1.EventTypeWarning, "DeletePublicIPAddress", rerr.Error().Error()) + + if strings.Contains(rerr.Error().Error(), consts.CannotDeletePublicIPErrorMessageCode) { + klog.Warningf("DeletePublicIP for public IP %s failed with error %v, this is because other resources are referencing the public IP. The deletion of the service will continue.", pipName, rerr.Error()) + return nil + } + return rerr.Error() + } + + // Invalidate the cache right after deleting + _ = az.pipCache.Delete(pipResourceGroup) + return nil +} + +func (az *Cloud) newPIPCache() (azcache.Resource, error) { + getter := func(key string) (interface{}, error) { + ctx, cancel := getContextWithCancel() + defer cancel() + + pipResourceGroup := key + pipList, rerr := az.PublicIPAddressesClient.List(ctx, pipResourceGroup) + if rerr != nil { + return nil, rerr.Error() + } + + pipMap := &sync.Map{} + for _, pip := range pipList { + pip := pip + pipMap.Store(pointer.StringDeref(pip.Name, ""), &pip) + } + return pipMap, nil + } + + if az.PublicIPCacheTTLInSeconds == 0 { + az.PublicIPCacheTTLInSeconds = publicIPCacheTTLDefaultInSeconds + } + return azcache.NewTimedCache(time.Duration(az.PublicIPCacheTTLInSeconds)*time.Second, getter, az.Config.DisableAPICallCache) +} + +func (az *Cloud) getPublicIPAddress(pipResourceGroup string, pipName string, crt azcache.AzureCacheReadType) (network.PublicIPAddress, bool, error) { + cached, err := az.pipCache.Get(pipResourceGroup, crt) + if err != nil { + return network.PublicIPAddress{}, false, err + } + + pips := cached.(*sync.Map) + pip, ok := pips.Load(pipName) + if !ok { + // pip not found, refresh cache and retry + cached, err = az.pipCache.Get(pipResourceGroup, azcache.CacheReadTypeForceRefresh) + if err != nil { + return network.PublicIPAddress{}, false, err + } + pips = cached.(*sync.Map) + pip, ok = pips.Load(pipName) + if !ok { + return network.PublicIPAddress{}, false, nil + } + } + + pip = pip.(*network.PublicIPAddress) + return *(deepcopy.Copy(pip).(*network.PublicIPAddress)), true, nil +} + +func (az *Cloud) listPIP(pipResourceGroup string, crt azcache.AzureCacheReadType) ([]network.PublicIPAddress, error) { + cached, err := az.pipCache.Get(pipResourceGroup, crt) + if err != nil { + return nil, err + } + pips := cached.(*sync.Map) + var ret []network.PublicIPAddress + pips.Range(func(key, value interface{}) bool { + pip := value.(*network.PublicIPAddress) + ret = append(ret, *pip) + return true + }) + return ret, nil +} diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_routetable_repo.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_routetable_repo.go new file mode 100644 index 000000000..9f1110132 --- /dev/null +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_routetable_repo.go @@ -0,0 +1,84 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package provider + +import ( + "encoding/json" + "net/http" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2022-07-01/network" + "k8s.io/klog/v2" + "k8s.io/utils/pointer" + + azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache" + "sigs.k8s.io/cloud-provider-azure/pkg/consts" +) + +// CreateOrUpdateRouteTable invokes az.RouteTablesClient.CreateOrUpdate with exponential backoff retry +func (az *Cloud) CreateOrUpdateRouteTable(routeTable network.RouteTable) error { + ctx, cancel := getContextWithCancel() + defer cancel() + + rerr := az.RouteTablesClient.CreateOrUpdate(ctx, az.RouteTableResourceGroup, az.RouteTableName, routeTable, pointer.StringDeref(routeTable.Etag, "")) + if rerr == nil { + // Invalidate the cache right after updating + _ = az.rtCache.Delete(*routeTable.Name) + return nil + } + + rtJSON, _ := json.Marshal(routeTable) + klog.Warningf("RouteTablesClient.CreateOrUpdate(%s) failed: %v, RouteTable request: %s", pointer.StringDeref(routeTable.Name, ""), rerr.Error(), string(rtJSON)) + + // Invalidate the cache because etag mismatch. + if rerr.HTTPStatusCode == http.StatusPreconditionFailed { + klog.V(3).Infof("Route table cache for %s is cleanup because of http.StatusPreconditionFailed", *routeTable.Name) + _ = az.rtCache.Delete(*routeTable.Name) + } + // Invalidate the cache because another new operation has canceled the current request. + if strings.Contains(strings.ToLower(rerr.Error().Error()), consts.OperationCanceledErrorMessage) { + klog.V(3).Infof("Route table cache for %s is cleanup because CreateOrUpdateRouteTable is canceled by another operation", *routeTable.Name) + _ = az.rtCache.Delete(*routeTable.Name) + } + klog.Errorf("RouteTablesClient.CreateOrUpdate(%s) failed: %v", az.RouteTableName, rerr.Error()) + return rerr.Error() +} + +func (az *Cloud) newRouteTableCache() (azcache.Resource, error) { + getter := func(key string) (interface{}, error) { + ctx, cancel := getContextWithCancel() + defer cancel() + rt, err := az.RouteTablesClient.Get(ctx, az.RouteTableResourceGroup, key, "") + exists, rerr := checkResourceExistsFromError(err) + if rerr != nil { + return nil, rerr.Error() + } + + if !exists { + klog.V(2).Infof("Route table %q not found", key) + return nil, nil + } + + return &rt, nil + } + + if az.RouteTableCacheTTLInSeconds == 0 { + az.RouteTableCacheTTLInSeconds = routeTableCacheTTLDefaultInSeconds + } + return azcache.NewTimedCache(time.Duration(az.RouteTableCacheTTLInSeconds)*time.Second, getter, az.Config.DisableAPICallCache) +} diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_securitygroup_repo.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_securitygroup_repo.go new file mode 100644 index 000000000..dc890c7d1 --- /dev/null +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_securitygroup_repo.go @@ -0,0 +1,113 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package provider + +import ( + "encoding/json" + "fmt" + "net/http" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2022-07-01/network" + "k8s.io/klog/v2" + "k8s.io/utils/pointer" + + azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache" + "sigs.k8s.io/cloud-provider-azure/pkg/consts" +) + +// CreateOrUpdateSecurityGroup invokes az.SecurityGroupsClient.CreateOrUpdate with exponential backoff retry +func (az *Cloud) CreateOrUpdateSecurityGroup(sg network.SecurityGroup) error { + ctx, cancel := getContextWithCancel() + defer cancel() + + rerr := az.SecurityGroupsClient.CreateOrUpdate(ctx, az.SecurityGroupResourceGroup, *sg.Name, sg, pointer.StringDeref(sg.Etag, "")) + klog.V(10).Infof("SecurityGroupsClient.CreateOrUpdate(%s): end", *sg.Name) + if rerr == nil { + // Invalidate the cache right after updating + _ = az.nsgCache.Delete(*sg.Name) + return nil + } + + nsgJSON, _ := json.Marshal(sg) + klog.Warningf("CreateOrUpdateSecurityGroup(%s) failed: %v, NSG request: %s", pointer.StringDeref(sg.Name, ""), rerr.Error(), string(nsgJSON)) + + // Invalidate the cache because ETAG precondition mismatch. + if rerr.HTTPStatusCode == http.StatusPreconditionFailed { + klog.V(3).Infof("SecurityGroup cache for %s is cleanup because of http.StatusPreconditionFailed", *sg.Name) + _ = az.nsgCache.Delete(*sg.Name) + } + + // Invalidate the cache because another new operation has canceled the current request. + if strings.Contains(strings.ToLower(rerr.Error().Error()), consts.OperationCanceledErrorMessage) { + klog.V(3).Infof("SecurityGroup cache for %s is cleanup because CreateOrUpdateSecurityGroup is canceled by another operation", *sg.Name) + _ = az.nsgCache.Delete(*sg.Name) + } + + return rerr.Error() +} + +func (az *Cloud) newNSGCache() (azcache.Resource, error) { + getter := func(key string) (interface{}, error) { + ctx, cancel := getContextWithCancel() + defer cancel() + nsg, err := az.SecurityGroupsClient.Get(ctx, az.SecurityGroupResourceGroup, key, "") + exists, rerr := checkResourceExistsFromError(err) + if rerr != nil { + return nil, rerr.Error() + } + + if !exists { + klog.V(2).Infof("Security group %q not found", key) + return nil, nil + } + + return &nsg, nil + } + + if az.NsgCacheTTLInSeconds == 0 { + az.NsgCacheTTLInSeconds = nsgCacheTTLDefaultInSeconds + } + return azcache.NewTimedCache(time.Duration(az.NsgCacheTTLInSeconds)*time.Second, getter, az.Config.DisableAPICallCache) +} + +func (az *Cloud) getSecurityGroup(crt azcache.AzureCacheReadType) (network.SecurityGroup, error) { + nsg := network.SecurityGroup{} + if az.SecurityGroupName == "" { + return nsg, fmt.Errorf("securityGroupName is not configured") + } + + securityGroup, err := az.nsgCache.GetWithDeepCopy(az.SecurityGroupName, crt) + if err != nil { + return nsg, err + } + + if securityGroup == nil { + return nsg, fmt.Errorf("nsg %q not found", az.SecurityGroupName) + } + + return *(securityGroup.(*network.SecurityGroup)), nil +} + +func (az *Cloud) getPrivateLinkService(frontendIPConfigID *string, crt azcache.AzureCacheReadType) (pls network.PrivateLinkService, err error) { + cachedPLS, err := az.plsCache.GetWithDeepCopy(*frontendIPConfigID, crt) + if err != nil { + return pls, err + } + return *(cachedPLS.(*network.PrivateLinkService)), nil +} diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_standard.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_standard.go index 33b3b4cb3..665a35fb0 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_standard.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_standard.go @@ -104,8 +104,8 @@ func (az *Cloud) getBackendPoolIDWithRG(lbName, rgName, backendPoolName string) func (az *Cloud) getBackendPoolIDs(clusterName, lbName string) map[bool]string { return map[bool]string{ - false: az.getBackendPoolID(lbName, getBackendPoolName(clusterName, false)), - true: az.getBackendPoolID(lbName, getBackendPoolName(clusterName, true)), + consts.IPVersionIPv4: az.getBackendPoolID(lbName, getBackendPoolName(clusterName, consts.IPVersionIPv4)), + consts.IPVersionIPv6: az.getBackendPoolID(lbName, getBackendPoolName(clusterName, consts.IPVersionIPv6)), } } @@ -140,26 +140,11 @@ func (az *Cloud) mapLoadBalancerNameToVMSet(lbName string, clusterName string) ( return vmSetName } -// For a load balancer, all frontend ip should reference either a subnet or publicIpAddress. -// Thus Azure do not allow mixed type (public and internal) load balancer. -// So we'd have a separate name for internal load balancer. -// This would be the name for Azure LoadBalancer resource. -func (az *Cloud) getAzureLoadBalancerName(clusterName string, vmSetName string, isInternal bool) string { - if az.LoadBalancerName != "" { - clusterName = az.LoadBalancerName +func (az *Cloud) mapVMSetNameToLoadBalancerName(vmSetName, clusterName string) string { + if vmSetName == az.VMSet.GetPrimaryVMSetName() { + return clusterName } - lbNamePrefix := vmSetName - // The LB name prefix is set to the name of the cluster when: - // 1. the LB belongs to the primary agent pool. - // 2. using the single SLB. - if strings.EqualFold(vmSetName, az.VMSet.GetPrimaryVMSetName()) || az.useStandardLoadBalancer() { - lbNamePrefix = clusterName - } - - if isInternal { - return fmt.Sprintf("%s%s", lbNamePrefix, consts.InternalLoadBalancerNameSuffix) - } - return lbNamePrefix + return vmSetName } // isControlPlaneNode returns true if the node has a control-plane role label. @@ -297,8 +282,8 @@ func getBackendPoolName(clusterName string, isIPv6 bool) string { // getBackendPoolNames returns the IPv4 and IPv6 backend pool names. func getBackendPoolNames(clusterName string) map[bool]string { return map[bool]string{ - false: getBackendPoolName(clusterName, false), - true: getBackendPoolName(clusterName, true), + consts.IPVersionIPv4: getBackendPoolName(clusterName, consts.IPVersionIPv4), + consts.IPVersionIPv6: getBackendPoolName(clusterName, consts.IPVersionIPv6), } } @@ -335,15 +320,17 @@ func (az *Cloud) getloadbalancerHAmodeRuleName(service *v1.Service, isIPv6 bool) } func (az *Cloud) getSecurityRuleName(service *v1.Service, port v1.ServicePort, sourceAddrPrefix string, isIPv6 bool) string { + isDualStack := isServiceDualStack(service) safePrefix := strings.Replace(sourceAddrPrefix, "/", "_", -1) safePrefix = strings.Replace(safePrefix, ":", ".", -1) // Consider IPv6 address + var name string if useSharedSecurityRule(service) { - return fmt.Sprintf("shared-%s-%d-%s", port.Protocol, port.Port, safePrefix) + name = fmt.Sprintf("shared-%s-%d-%s", port.Protocol, port.Port, safePrefix) + } else { + rulePrefix := az.getRulePrefix(service) + name = fmt.Sprintf("%s-%s-%d-%s", rulePrefix, port.Protocol, port.Port, safePrefix) } - rulePrefix := az.getRulePrefix(service) - name := fmt.Sprintf("%s-%s-%d-%s", rulePrefix, port.Protocol, port.Port, safePrefix) - // TODO: Use getResourceByIPFamily - return name + return getResourceByIPFamily(name, isDualStack, isIPv6) } // This returns a human-readable version of the Service used to tag some resources. @@ -447,8 +434,8 @@ func (az *Cloud) getFrontendIPConfigNames(service *v1.Service) map[bool]string { isDualStack := isServiceDualStack(service) defaultLBFrontendIPConfigName := az.getDefaultFrontendIPConfigName(service) return map[bool]string{ - false: getResourceByIPFamily(defaultLBFrontendIPConfigName, isDualStack, false), - true: getResourceByIPFamily(defaultLBFrontendIPConfigName, isDualStack, true), + consts.IPVersionIPv4: getResourceByIPFamily(defaultLBFrontendIPConfigName, isDualStack, consts.IPVersionIPv4), + consts.IPVersionIPv6: getResourceByIPFamily(defaultLBFrontendIPConfigName, isDualStack, consts.IPVersionIPv6), } } @@ -507,7 +494,7 @@ func MakeCRC32(str string) string { type availabilitySet struct { *Cloud - vmasCache *azcache.TimedCache + vmasCache azcache.Resource } type AvailabilitySetEntry struct { @@ -515,7 +502,7 @@ type AvailabilitySetEntry struct { ResourceGroup string } -func (as *availabilitySet) newVMASCache() (*azcache.TimedCache, error) { +func (as *availabilitySet) newVMASCache() (azcache.Resource, error) { getter := func(key string) (interface{}, error) { localCache := &sync.Map{} @@ -551,7 +538,7 @@ func (as *availabilitySet) newVMASCache() (*azcache.TimedCache, error) { as.Config.AvailabilitySetsCacheTTLInSeconds = consts.VMASCacheTTLDefaultInSeconds } - return azcache.NewTimedcache(time.Duration(as.Config.AvailabilitySetsCacheTTLInSeconds)*time.Second, getter) + return azcache.NewTimedCache(time.Duration(as.Config.AvailabilitySetsCacheTTLInSeconds)*time.Second, getter, as.Cloud.Config.DisableAPICallCache) } // newStandardSet creates a new availabilitySet. diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_storageaccount.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_storageaccount.go index 6b4070391..684406246 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_storageaccount.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_storageaccount.go @@ -251,11 +251,11 @@ func (az *Cloud) EnsureStorageAccount(ctx context.Context, accountOptions *Accou return "", "", err } index = int(n.Int64()) - klog.V(4).Infof("randomly pick one matching account, index: %d", index) + klog.V(4).Infof("randomly pick one matching account, index: %d, matching accounts: %s", index, accounts) } accountName = accounts[index].Name createNewAccount = false - klog.V(4).Infof("found a matching account %s type %s location %s", accounts[0].Name, accounts[0].StorageType, accounts[0].Location) + klog.V(4).Infof("found a matching account %s type %s location %s", accounts[index].Name, accounts[index].StorageType, accounts[index].Location) } } @@ -742,7 +742,7 @@ func isPrivateEndpointAsExpected(account storage.Account, accountOptions *Accoun } func isAllowBlobPublicAccessEqual(account storage.Account, accountOptions *AccountOptions) bool { - return pointer.BoolDeref(accountOptions.AllowBlobPublicAccess, false) == pointer.BoolDeref(account.AllowBlobPublicAccess, false) + return pointer.BoolDeref(accountOptions.AllowBlobPublicAccess, true) == pointer.BoolDeref(account.AllowBlobPublicAccess, true) } func isRequireInfrastructureEncryptionEqual(account storage.Account, accountOptions *AccountOptions) bool { @@ -754,7 +754,7 @@ func isRequireInfrastructureEncryptionEqual(account storage.Account, accountOpti } func isAllowSharedKeyAccessEqual(account storage.Account, accountOptions *AccountOptions) bool { - return pointer.BoolDeref(accountOptions.AllowSharedKeyAccess, false) == pointer.BoolDeref(account.AllowSharedKeyAccess, false) + return pointer.BoolDeref(accountOptions.AllowSharedKeyAccess, true) == pointer.BoolDeref(account.AllowSharedKeyAccess, true) } func isAccessTierEqual(account storage.Account, accountOptions *AccountOptions) bool { diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_subnet_repo.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_subnet_repo.go new file mode 100644 index 000000000..9d5cec09e --- /dev/null +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_subnet_repo.go @@ -0,0 +1,68 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package provider + +import ( + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2022-07-01/network" + v1 "k8s.io/api/core/v1" + "k8s.io/klog/v2" +) + +// CreateOrUpdateSubnet invokes az.SubnetClient.CreateOrUpdate with exponential backoff retry +func (az *Cloud) CreateOrUpdateSubnet(service *v1.Service, subnet network.Subnet) error { + ctx, cancel := getContextWithCancel() + defer cancel() + + var rg string + if len(az.VnetResourceGroup) > 0 { + rg = az.VnetResourceGroup + } else { + rg = az.ResourceGroup + } + + rerr := az.SubnetsClient.CreateOrUpdate(ctx, rg, az.VnetName, *subnet.Name, subnet) + klog.V(10).Infof("SubnetClient.CreateOrUpdate(%s): end", *subnet.Name) + if rerr != nil { + klog.Errorf("SubnetClient.CreateOrUpdate(%s) failed: %s", *subnet.Name, rerr.Error().Error()) + az.Event(service, v1.EventTypeWarning, "CreateOrUpdateSubnet", rerr.Error().Error()) + return rerr.Error() + } + + return nil +} + +func (az *Cloud) getSubnet(virtualNetworkName string, subnetName string) (network.Subnet, bool, error) { + var rg string + if len(az.VnetResourceGroup) > 0 { + rg = az.VnetResourceGroup + } else { + rg = az.ResourceGroup + } + + ctx, cancel := getContextWithCancel() + defer cancel() + subnet, err := az.SubnetsClient.Get(ctx, rg, virtualNetworkName, subnetName, "") + exists, rerr := checkResourceExistsFromError(err) + if rerr != nil { + return subnet, false, rerr.Error() + } + + if !exists { + klog.V(2).Infof("Subnet %q not found", subnetName) + } + return subnet, exists, nil +} diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_utils.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_utils.go index a9c13f143..610cd83b4 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_utils.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_utils.go @@ -34,6 +34,11 @@ import ( "sigs.k8s.io/cloud-provider-azure/pkg/consts" ) +const ( + IPVersionIPv6 bool = true + IPVersionIPv4 bool = false +) + var strToExtendedLocationType = map[string]network.ExtendedLocationTypes{ "edgezone": network.EdgeZone, } @@ -281,17 +286,17 @@ func getVMSSVMCacheKey(resourceGroup, vmssName string) string { } // isNodeInVMSSVMCache check whether nodeName is in vmssVMCache -func isNodeInVMSSVMCache(nodeName string, vmssVMCache *azcache.TimedCache) bool { +func isNodeInVMSSVMCache(nodeName string, vmssVMCache azcache.Resource) bool { if vmssVMCache == nil { return false } var isInCache bool - vmssVMCache.Lock.Lock() - defer vmssVMCache.Lock.Unlock() + vmssVMCache.Lock() + defer vmssVMCache.Unlock() - for _, entry := range vmssVMCache.Store.List() { + for _, entry := range vmssVMCache.GetStore().List() { if entry != nil { e := entry.(*azcache.AzureCacheEntry) e.Lock.Lock() @@ -392,10 +397,20 @@ func getServiceLoadBalancerIPs(service *v1.Service) []string { // setServiceLoadBalancerIP sets LB IP to a Service func setServiceLoadBalancerIP(service *v1.Service, ip string) { + if service == nil { + klog.Warning("setServiceLoadBalancerIP: Service is nil") + return + } + parsedIP := net.ParseIP(ip) + if parsedIP == nil { + klog.Warning("setServiceLoadBalancerIP: IP %q is not valid for Service", ip, service.Name) + return + } + + isIPv6 := parsedIP.To4() == nil if service.Annotations == nil { service.Annotations = map[string]string{} } - isIPv6 := net.ParseIP(ip).To4() == nil service.Annotations[consts.ServiceAnnotationLoadBalancerIPDualStack[isIPv6]] = ip } @@ -505,3 +520,22 @@ func countIPsOnBackendPool(backendPool network.BackendAddressPool) int { return ipsCount } + +// StringInSlice check if string in a list +func StringInSlice(s string, list []string) bool { + for _, item := range list { + if item == s { + return true + } + } + return false +} + +// stringSlice returns a string slice value for the passed string slice pointer. It returns a nil +// slice if the pointer is nil. +func stringSlice(s *[]string) []string { + if s != nil { + return *s + } + return nil +} diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmsets_repo.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmsets_repo.go new file mode 100644 index 000000000..a672286bd --- /dev/null +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmsets_repo.go @@ -0,0 +1,193 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package provider + +import ( + "errors" + "fmt" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2022-07-01/network" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + cloudprovider "k8s.io/cloud-provider" + "k8s.io/klog/v2" + "k8s.io/utils/pointer" + + azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache" + "sigs.k8s.io/cloud-provider-azure/pkg/consts" +) + +// GetVirtualMachineWithRetry invokes az.getVirtualMachine with exponential backoff retry +func (az *Cloud) GetVirtualMachineWithRetry(name types.NodeName, crt azcache.AzureCacheReadType) (compute.VirtualMachine, error) { + var machine compute.VirtualMachine + var retryErr error + err := wait.ExponentialBackoff(az.RequestBackoff(), func() (bool, error) { + machine, retryErr = az.getVirtualMachine(name, crt) + if errors.Is(retryErr, cloudprovider.InstanceNotFound) { + return true, cloudprovider.InstanceNotFound + } + if retryErr != nil { + klog.Errorf("GetVirtualMachineWithRetry(%s): backoff failure, will retry, err=%v", name, retryErr) + return false, nil + } + klog.V(2).Infof("GetVirtualMachineWithRetry(%s): backoff success", name) + return true, nil + }) + if errors.Is(err, wait.ErrWaitTimeout) { + err = retryErr + } + return machine, err +} + +// ListVirtualMachines invokes az.VirtualMachinesClient.List with exponential backoff retry +func (az *Cloud) ListVirtualMachines(resourceGroup string) ([]compute.VirtualMachine, error) { + ctx, cancel := getContextWithCancel() + defer cancel() + + allNodes, rerr := az.VirtualMachinesClient.List(ctx, resourceGroup) + if rerr != nil { + klog.Errorf("VirtualMachinesClient.List(%v) failure with err=%v", resourceGroup, rerr) + return nil, rerr.Error() + } + klog.V(6).Infof("VirtualMachinesClient.List(%v) success", resourceGroup) + return allNodes, nil +} + +// getPrivateIPsForMachine is wrapper for optional backoff getting private ips +// list of a node by name +func (az *Cloud) getPrivateIPsForMachine(nodeName types.NodeName) ([]string, error) { + return az.getPrivateIPsForMachineWithRetry(nodeName) +} + +func (az *Cloud) getPrivateIPsForMachineWithRetry(nodeName types.NodeName) ([]string, error) { + var privateIPs []string + err := wait.ExponentialBackoff(az.RequestBackoff(), func() (bool, error) { + var retryErr error + privateIPs, retryErr = az.VMSet.GetPrivateIPsByNodeName(string(nodeName)) + if retryErr != nil { + // won't retry since the instance doesn't exist on Azure. + if errors.Is(retryErr, cloudprovider.InstanceNotFound) { + return true, retryErr + } + klog.Errorf("GetPrivateIPsByNodeName(%s): backoff failure, will retry,err=%v", nodeName, retryErr) + return false, nil + } + klog.V(3).Infof("GetPrivateIPsByNodeName(%s): backoff success", nodeName) + return true, nil + }) + return privateIPs, err +} + +func (az *Cloud) getIPForMachine(nodeName types.NodeName) (string, string, error) { + return az.GetIPForMachineWithRetry(nodeName) +} + +// GetIPForMachineWithRetry invokes az.getIPForMachine with exponential backoff retry +func (az *Cloud) GetIPForMachineWithRetry(name types.NodeName) (string, string, error) { + var ip, publicIP string + err := wait.ExponentialBackoff(az.RequestBackoff(), func() (bool, error) { + var retryErr error + ip, publicIP, retryErr = az.VMSet.GetIPByNodeName(string(name)) + if retryErr != nil { + klog.Errorf("GetIPForMachineWithRetry(%s): backoff failure, will retry,err=%v", name, retryErr) + return false, nil + } + klog.V(3).Infof("GetIPForMachineWithRetry(%s): backoff success", name) + return true, nil + }) + return ip, publicIP, err +} + +func (az *Cloud) newVMCache() (azcache.Resource, error) { + getter := func(key string) (interface{}, error) { + // Currently InstanceView request are used by azure_zones, while the calls come after non-InstanceView + // request. If we first send an InstanceView request and then a non InstanceView request, the second + // request will still hit throttling. This is what happens now for cloud controller manager: In this + // case we do get instance view every time to fulfill the azure_zones requirement without hitting + // throttling. + // Consider adding separate parameter for controlling 'InstanceView' once node update issue #56276 is fixed + ctx, cancel := getContextWithCancel() + defer cancel() + + resourceGroup, err := az.GetNodeResourceGroup(key) + if err != nil { + return nil, err + } + + vm, verr := az.VirtualMachinesClient.Get(ctx, resourceGroup, key, compute.InstanceViewTypesInstanceView) + exists, rerr := checkResourceExistsFromError(verr) + if rerr != nil { + return nil, rerr.Error() + } + + if !exists { + klog.V(2).Infof("Virtual machine %q not found", key) + return nil, nil + } + + if vm.VirtualMachineProperties != nil && + strings.EqualFold(pointer.StringDeref(vm.VirtualMachineProperties.ProvisioningState, ""), string(consts.ProvisioningStateDeleting)) { + klog.V(2).Infof("Virtual machine %q is under deleting", key) + return nil, nil + } + + return &vm, nil + } + + if az.VMCacheTTLInSeconds == 0 { + az.VMCacheTTLInSeconds = vmCacheTTLDefaultInSeconds + } + return azcache.NewTimedCache(time.Duration(az.VMCacheTTLInSeconds)*time.Second, getter, az.Config.DisableAPICallCache) +} + +// getVirtualMachine calls 'VirtualMachinesClient.Get' with a timed cache +// The service side has throttling control that delays responses if there are multiple requests onto certain vm +// resource request in short period. +func (az *Cloud) getVirtualMachine(nodeName types.NodeName, crt azcache.AzureCacheReadType) (vm compute.VirtualMachine, err error) { + vmName := string(nodeName) + cachedVM, err := az.vmCache.Get(vmName, crt) + if err != nil { + return vm, err + } + + if cachedVM == nil { + klog.Warningf("Unable to find node %s: %v", nodeName, cloudprovider.InstanceNotFound) + return vm, cloudprovider.InstanceNotFound + } + + return *(cachedVM.(*compute.VirtualMachine)), nil +} + +func (az *Cloud) getRouteTable(crt azcache.AzureCacheReadType) (routeTable network.RouteTable, exists bool, err error) { + if len(az.RouteTableName) == 0 { + return routeTable, false, fmt.Errorf("route table name is not configured") + } + + cachedRt, err := az.rtCache.GetWithDeepCopy(az.RouteTableName, crt) + if err != nil { + return routeTable, false, err + } + + if cachedRt == nil { + return routeTable, false, nil + } + + return *(cachedRt.(*network.RouteTable)), true, nil +} diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmss.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmss.go index 469210f85..f63ec4fe5 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmss.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmss.go @@ -86,19 +86,19 @@ type ScaleSet struct { // vmssCache is timed cache where the Store in the cache is a map of // Key: consts.VMSSKey // Value: sync.Map of [vmssName]*VMSSEntry - vmssCache *azcache.TimedCache + vmssCache azcache.Resource // vmssVMCache is timed cache where the Store in the cache is a map of // Key: [resourcegroup/vmssName] // Value: sync.Map of [vmName]*VMSSVirtualMachineEntry - vmssVMCache *azcache.TimedCache + vmssVMCache azcache.Resource // nonVmssUniformNodesCache is used to store node names from non uniform vm. // Currently, the nodes can from avset or vmss flex or individual vm. // This cache contains an entry called nonVmssUniformNodesEntry. // nonVmssUniformNodesEntry contains avSetVMNodeNames list, clusterNodeNames list // and current clusterNodeNames. - nonVmssUniformNodesCache *azcache.TimedCache + nonVmssUniformNodesCache azcache.Resource // lockMap in cache refresh lockMap *lockMap diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmss_cache.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmss_cache.go index 2ab07adff..860fead7a 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmss_cache.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmss_cache.go @@ -64,7 +64,7 @@ const ( ManagedByUnknownVMSet VMManagementType = "ManagedByUnknownVMSet" ) -func (ss *ScaleSet) newVMSSCache(ctx context.Context) (*azcache.TimedCache, error) { +func (ss *ScaleSet) newVMSSCache(ctx context.Context) (azcache.Resource, error) { getter := func(key string) (interface{}, error) { localCache := &sync.Map{} // [vmssName]*vmssEntry @@ -102,14 +102,16 @@ func (ss *ScaleSet) newVMSSCache(ctx context.Context) (*azcache.TimedCache, erro } } - if resourceGroupNotFound { - // gc vmss vm cache when there is resource group not found - vmssVMKeys := ss.vmssVMCache.Store.ListKeys() - for _, cacheKey := range vmssVMKeys { - vmssName := cacheKey[strings.LastIndex(cacheKey, "/")+1:] - if _, ok := localCache.Load(vmssName); !ok { - klog.V(2).Infof("remove vmss %s from vmssVMCache due to rg not found", cacheKey) - _ = ss.vmssVMCache.Delete(cacheKey) + if !ss.Cloud.Config.DisableAPICallCache { + if resourceGroupNotFound { + // gc vmss vm cache when there is resource group not found + vmssVMKeys := ss.vmssVMCache.GetStore().ListKeys() + for _, cacheKey := range vmssVMKeys { + vmssName := cacheKey[strings.LastIndex(cacheKey, "/")+1:] + if _, ok := localCache.Load(vmssName); !ok { + klog.V(2).Infof("remove vmss %s from vmssVMCache due to rg not found", cacheKey) + _ = ss.vmssVMCache.Delete(cacheKey) + } } } } @@ -119,7 +121,7 @@ func (ss *ScaleSet) newVMSSCache(ctx context.Context) (*azcache.TimedCache, erro if ss.Config.VmssCacheTTLInSeconds == 0 { ss.Config.VmssCacheTTLInSeconds = consts.VMSSCacheTTLDefaultInSeconds } - return azcache.NewTimedcache(time.Duration(ss.Config.VmssCacheTTLInSeconds)*time.Second, getter) + return azcache.NewTimedCache(time.Duration(ss.Config.VmssCacheTTLInSeconds)*time.Second, getter, ss.Config.DisableAPICallCache) } func (ss *ScaleSet) getVMSSVMsFromCache(resourceGroup, vmssName string, crt azcache.AzureCacheReadType) (*sync.Map, error) { @@ -139,31 +141,33 @@ func (ss *ScaleSet) getVMSSVMsFromCache(resourceGroup, vmssName string, crt azca } // newVMSSVirtualMachinesCache instantiates a new VMs cache for VMs belonging to the provided VMSS. -func (ss *ScaleSet) newVMSSVirtualMachinesCache() (*azcache.TimedCache, error) { +func (ss *ScaleSet) newVMSSVirtualMachinesCache() (azcache.Resource, error) { vmssVirtualMachinesCacheTTL := time.Duration(ss.Config.VmssVirtualMachinesCacheTTLInSeconds) * time.Second getter := func(cacheKey string) (interface{}, error) { localCache := &sync.Map{} // [nodeName]*VMSSVirtualMachineEntry oldCache := make(map[string]*VMSSVirtualMachineEntry) - entry, exists, err := ss.vmssVMCache.Store.GetByKey(cacheKey) - if err != nil { - return nil, err - } - if exists { - cached := entry.(*azcache.AzureCacheEntry).Data - if cached != nil { - virtualMachines := cached.(*sync.Map) - virtualMachines.Range(func(key, value interface{}) bool { - oldCache[key.(string)] = value.(*VMSSVirtualMachineEntry) - return true - }) + if !ss.Cloud.Config.DisableAPICallCache { + entry, exists, err := ss.vmssVMCache.GetStore().GetByKey(cacheKey) + if err != nil { + return nil, err + } + if exists { + cached := entry.(*azcache.AzureCacheEntry).Data + if cached != nil { + virtualMachines := cached.(*sync.Map) + virtualMachines.Range(func(key, value interface{}) bool { + oldCache[key.(string)] = value.(*VMSSVirtualMachineEntry) + return true + }) + } } } result := strings.Split(cacheKey, "/") if len(result) < 2 { - err = fmt.Errorf("Invalid cacheKey (%s)", cacheKey) + err := fmt.Errorf("invalid cacheKey (%s)", cacheKey) return nil, err } @@ -202,43 +206,50 @@ func (ss *ScaleSet) newVMSSVirtualMachinesCache() (*azcache.TimedCache, error) { } localCache.Store(computerName, vmssVMCacheEntry) - delete(oldCache, computerName) + if !ss.Cloud.Config.DisableAPICallCache { + delete(oldCache, computerName) + } } - // add old missing cache data with nil entries to prevent aggressive - // ARM calls during cache invalidation - for name, vmEntry := range oldCache { - // if the nil cache entry has existed for vmssVirtualMachinesCacheTTL in the cache - // then it should not be added back to the cache - if vmEntry.VirtualMachine == nil && time.Since(vmEntry.LastUpdate) > vmssVirtualMachinesCacheTTL { - klog.V(5).Infof("ignoring expired entries from old cache for %s", name) - continue - } - LastUpdate := time.Now().UTC() - if vmEntry.VirtualMachine == nil { - // if this is already a nil entry then keep the time the nil - // entry was first created, so we can cleanup unwanted entries - LastUpdate = vmEntry.LastUpdate - } + if !ss.Cloud.Config.DisableAPICallCache { + // add old missing cache data with nil entries to prevent aggressive + // ARM calls during cache invalidation + for name, vmEntry := range oldCache { + // if the nil cache entry has existed for vmssVirtualMachinesCacheTTL in the cache + // then it should not be added back to the cache + if vmEntry.VirtualMachine == nil && time.Since(vmEntry.LastUpdate) > vmssVirtualMachinesCacheTTL { + klog.V(5).Infof("ignoring expired entries from old cache for %s", name) + continue + } + LastUpdate := time.Now().UTC() + if vmEntry.VirtualMachine == nil { + // if this is already a nil entry then keep the time the nil + // entry was first created, so we can cleanup unwanted entries + LastUpdate = vmEntry.LastUpdate + } - klog.V(5).Infof("adding old entries to new cache for %s", name) - localCache.Store(name, &VMSSVirtualMachineEntry{ - ResourceGroup: vmEntry.ResourceGroup, - VMSSName: vmEntry.VMSSName, - InstanceID: vmEntry.InstanceID, - VirtualMachine: nil, - LastUpdate: LastUpdate, - }) + klog.V(5).Infof("adding old entries to new cache for %s", name) + localCache.Store(name, &VMSSVirtualMachineEntry{ + ResourceGroup: vmEntry.ResourceGroup, + VMSSName: vmEntry.VMSSName, + InstanceID: vmEntry.InstanceID, + VirtualMachine: nil, + LastUpdate: LastUpdate, + }) + } } return localCache, nil } - return azcache.NewTimedcache(vmssVirtualMachinesCacheTTL, getter) + return azcache.NewTimedCache(vmssVirtualMachinesCacheTTL, getter, ss.Cloud.Config.DisableAPICallCache) } // DeleteCacheForNode deletes Node from VMSS VM and VM caches. func (ss *ScaleSet) DeleteCacheForNode(nodeName string) error { + if ss.Config.DisableAPICallCache { + return nil + } vmManagementType, err := ss.getVMManagementTypeByNodeName(nodeName, azcache.CacheReadTypeUnsafe) if err != nil { klog.Errorf("getVMManagementTypeByNodeName(%s) failed with %v", nodeName, err) @@ -314,7 +325,7 @@ func (ss *ScaleSet) updateCache(nodeName, resourceGroupName, vmssName, instanceI return nil } -func (ss *ScaleSet) newNonVmssUniformNodesCache() (*azcache.TimedCache, error) { +func (ss *ScaleSet) newNonVmssUniformNodesCache() (azcache.Resource, error) { getter := func(key string) (interface{}, error) { vmssFlexVMNodeNames := sets.New[string]() vmssFlexVMProviderIDs := sets.New[string]() @@ -368,7 +379,7 @@ func (ss *ScaleSet) newNonVmssUniformNodesCache() (*azcache.TimedCache, error) { if ss.Config.NonVmssUniformNodesCacheTTLInSeconds == 0 { ss.Config.NonVmssUniformNodesCacheTTLInSeconds = consts.NonVmssUniformNodesCacheTTLDefaultInSeconds } - return azcache.NewTimedcache(time.Duration(ss.Config.NonVmssUniformNodesCacheTTLInSeconds)*time.Second, getter) + return azcache.NewTimedCache(time.Duration(ss.Config.NonVmssUniformNodesCacheTTLInSeconds)*time.Second, getter, ss.Cloud.Config.DisableAPICallCache) } func (ss *ScaleSet) getVMManagementTypeByNodeName(nodeName string, crt azcache.AzureCacheReadType) (VMManagementType, error) { @@ -382,6 +393,16 @@ func (ss *ScaleSet) getVMManagementTypeByNodeName(nodeName string, crt azcache.A return ManagedByUnknownVMSet, err } + if ss.Cloud.Config.DisableAPICallCache { + if cached.(NonVmssUniformNodesEntry).AvSetVMNodeNames.Has(nodeName) { + return ManagedByAvSet, nil + } + if cached.(NonVmssUniformNodesEntry).VMSSFlexVMNodeNames.Has(nodeName) { + return ManagedByVmssFlex, nil + } + return ManagedByVmssUniform, nil + } + cachedNodes := cached.(NonVmssUniformNodesEntry).ClusterNodeNames // if the node is not in the cache, assume the node has joined after the last cache refresh and attempt to refresh the cache. if !cachedNodes.Has(nodeName) { diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmss_repo.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmss_repo.go new file mode 100644 index 000000000..6721dceab --- /dev/null +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmss_repo.go @@ -0,0 +1,55 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package provider + +import ( + "strings" + + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" + "k8s.io/klog/v2" + + "sigs.k8s.io/cloud-provider-azure/pkg/consts" + "sigs.k8s.io/cloud-provider-azure/pkg/retry" +) + +// CreateOrUpdateVMSS invokes az.VirtualMachineScaleSetsClient.Update(). +func (az *Cloud) CreateOrUpdateVMSS(resourceGroupName string, VMScaleSetName string, parameters compute.VirtualMachineScaleSet) *retry.Error { + ctx, cancel := getContextWithCancel() + defer cancel() + + // When vmss is being deleted, CreateOrUpdate API would report "the vmss is being deleted" error. + // Since it is being deleted, we shouldn't send more CreateOrUpdate requests for it. + klog.V(3).Infof("CreateOrUpdateVMSS: verify the status of the vmss being created or updated") + vmss, rerr := az.VirtualMachineScaleSetsClient.Get(ctx, resourceGroupName, VMScaleSetName) + if rerr != nil { + klog.Errorf("CreateOrUpdateVMSS: error getting vmss(%s): %v", VMScaleSetName, rerr) + return rerr + } + if vmss.ProvisioningState != nil && strings.EqualFold(*vmss.ProvisioningState, consts.VirtualMachineScaleSetsDeallocating) { + klog.V(3).Infof("CreateOrUpdateVMSS: found vmss %s being deleted, skipping", VMScaleSetName) + return nil + } + + rerr = az.VirtualMachineScaleSetsClient.CreateOrUpdate(ctx, resourceGroupName, VMScaleSetName, parameters) + klog.V(10).Infof("UpdateVmssVMWithRetry: VirtualMachineScaleSetsClient.CreateOrUpdate(%s): end", VMScaleSetName) + if rerr != nil { + klog.Errorf("CreateOrUpdateVMSS: error CreateOrUpdate vmss(%s): %v", VMScaleSetName, rerr) + return rerr + } + + return nil +} diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmssflex.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmssflex.go index 2d760239c..22889373b 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmssflex.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmssflex.go @@ -48,11 +48,11 @@ var ( type FlexScaleSet struct { *Cloud - vmssFlexCache *azcache.TimedCache + vmssFlexCache azcache.Resource vmssFlexVMNameToVmssID *sync.Map vmssFlexVMNameToNodeName *sync.Map - vmssFlexVMCache *azcache.TimedCache + vmssFlexVMCache azcache.Resource // lockMap in cache refresh lockMap *lockMap diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmssflex_cache.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmssflex_cache.go index 8db7b5f7f..ed44394f8 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmssflex_cache.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmssflex_cache.go @@ -34,7 +34,7 @@ import ( "sigs.k8s.io/cloud-provider-azure/pkg/consts" ) -func (fs *FlexScaleSet) newVmssFlexCache(ctx context.Context) (*azcache.TimedCache, error) { +func (fs *FlexScaleSet) newVmssFlexCache(ctx context.Context) (azcache.Resource, error) { getter := func(key string) (interface{}, error) { localCache := &sync.Map{} @@ -73,10 +73,10 @@ func (fs *FlexScaleSet) newVmssFlexCache(ctx context.Context) (*azcache.TimedCac if fs.Config.VmssFlexCacheTTLInSeconds == 0 { fs.Config.VmssFlexCacheTTLInSeconds = consts.VmssFlexCacheTTLDefaultInSeconds } - return azcache.NewTimedcache(time.Duration(fs.Config.VmssFlexCacheTTLInSeconds)*time.Second, getter) + return azcache.NewTimedCache(time.Duration(fs.Config.VmssFlexCacheTTLInSeconds)*time.Second, getter, fs.Cloud.Config.DisableAPICallCache) } -func (fs *FlexScaleSet) newVmssFlexVMCache(ctx context.Context) (*azcache.TimedCache, error) { +func (fs *FlexScaleSet) newVmssFlexVMCache(ctx context.Context) (azcache.Resource, error) { getter := func(key string) (interface{}, error) { localCache := &sync.Map{} @@ -123,7 +123,7 @@ func (fs *FlexScaleSet) newVmssFlexVMCache(ctx context.Context) (*azcache.TimedC if fs.Config.VmssFlexVMCacheTTLInSeconds == 0 { fs.Config.VmssFlexVMCacheTTLInSeconds = consts.VmssFlexVMCacheTTLDefaultInSeconds } - return azcache.NewTimedcache(time.Duration(fs.Config.VmssFlexVMCacheTTLInSeconds)*time.Second, getter) + return azcache.NewTimedCache(time.Duration(fs.Config.VmssFlexVMCacheTTLInSeconds)*time.Second, getter, fs.Cloud.Config.DisableAPICallCache) } func (fs *FlexScaleSet) getNodeNameByVMName(vmName string) (string, error) { @@ -333,6 +333,9 @@ func (fs *FlexScaleSet) getVmssFlexByName(vmssFlexName string) (*compute.Virtual } func (fs *FlexScaleSet) DeleteCacheForNode(nodeName string) error { + if fs.Config.DisableAPICallCache { + return nil + } vmssFlexID, err := fs.getNodeVmssFlexID(nodeName) if err != nil { klog.Errorf("getNodeVmssFlexID(%s) failed with %v", nodeName, err) diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_wrap.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_wrap.go index ea6e555e3..1675aa12e 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_wrap.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_wrap.go @@ -21,21 +21,9 @@ import ( "net/http" "regexp" "strings" - "sync" - "time" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2022-07-01/network" - - "k8s.io/apimachinery/pkg/types" - cloudprovider "k8s.io/cloud-provider" - "k8s.io/klog/v2" - "k8s.io/utils/pointer" - - azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache" "sigs.k8s.io/cloud-provider-azure/pkg/consts" "sigs.k8s.io/cloud-provider-azure/pkg/retry" - "sigs.k8s.io/cloud-provider-azure/pkg/util/deepcopy" ) var ( @@ -65,323 +53,6 @@ func checkResourceExistsFromError(err *retry.Error) (bool, *retry.Error) { return false, err } -// getVirtualMachine calls 'VirtualMachinesClient.Get' with a timed cache -// The service side has throttling control that delays responses if there are multiple requests onto certain vm -// resource request in short period. -func (az *Cloud) getVirtualMachine(nodeName types.NodeName, crt azcache.AzureCacheReadType) (vm compute.VirtualMachine, err error) { - vmName := string(nodeName) - cachedVM, err := az.vmCache.Get(vmName, crt) - if err != nil { - return vm, err - } - - if cachedVM == nil { - klog.Warningf("Unable to find node %s: %v", nodeName, cloudprovider.InstanceNotFound) - return vm, cloudprovider.InstanceNotFound - } - - return *(cachedVM.(*compute.VirtualMachine)), nil -} - -func (az *Cloud) getRouteTable(crt azcache.AzureCacheReadType) (routeTable network.RouteTable, exists bool, err error) { - if len(az.RouteTableName) == 0 { - return routeTable, false, fmt.Errorf("route table name is not configured") - } - - cachedRt, err := az.rtCache.GetWithDeepCopy(az.RouteTableName, crt) - if err != nil { - return routeTable, false, err - } - - if cachedRt == nil { - return routeTable, false, nil - } - - return *(cachedRt.(*network.RouteTable)), true, nil -} - -func (az *Cloud) getPublicIPAddress(pipResourceGroup string, pipName string, crt azcache.AzureCacheReadType) (network.PublicIPAddress, bool, error) { - cached, err := az.pipCache.Get(pipResourceGroup, crt) - if err != nil { - return network.PublicIPAddress{}, false, err - } - - pips := cached.(*sync.Map) - pip, ok := pips.Load(pipName) - if !ok { - // pip not found, refresh cache and retry - cached, err = az.pipCache.Get(pipResourceGroup, azcache.CacheReadTypeForceRefresh) - if err != nil { - return network.PublicIPAddress{}, false, err - } - pips = cached.(*sync.Map) - pip, ok = pips.Load(pipName) - if !ok { - return network.PublicIPAddress{}, false, nil - } - } - - pip = pip.(*network.PublicIPAddress) - return *(deepcopy.Copy(pip).(*network.PublicIPAddress)), true, nil -} - -func (az *Cloud) listPIP(pipResourceGroup string, crt azcache.AzureCacheReadType) ([]network.PublicIPAddress, error) { - cached, err := az.pipCache.Get(pipResourceGroup, crt) - if err != nil { - return nil, err - } - pips := cached.(*sync.Map) - var ret []network.PublicIPAddress - pips.Range(func(key, value interface{}) bool { - pip := value.(*network.PublicIPAddress) - ret = append(ret, *pip) - return true - }) - return ret, nil -} - -func (az *Cloud) getSubnet(virtualNetworkName string, subnetName string) (network.Subnet, bool, error) { - var rg string - if len(az.VnetResourceGroup) > 0 { - rg = az.VnetResourceGroup - } else { - rg = az.ResourceGroup - } - - ctx, cancel := getContextWithCancel() - defer cancel() - subnet, err := az.SubnetsClient.Get(ctx, rg, virtualNetworkName, subnetName, "") - exists, rerr := checkResourceExistsFromError(err) - if rerr != nil { - return subnet, false, rerr.Error() - } - - if !exists { - klog.V(2).Infof("Subnet %q not found", subnetName) - } - return subnet, exists, nil -} - -func (az *Cloud) getAzureLoadBalancer(name string, crt azcache.AzureCacheReadType) (lb *network.LoadBalancer, exists bool, err error) { - cachedLB, err := az.lbCache.GetWithDeepCopy(name, crt) - if err != nil { - return lb, false, err - } - - if cachedLB == nil { - return lb, false, nil - } - - return cachedLB.(*network.LoadBalancer), true, nil -} - -func (az *Cloud) getSecurityGroup(crt azcache.AzureCacheReadType) (network.SecurityGroup, error) { - nsg := network.SecurityGroup{} - if az.SecurityGroupName == "" { - return nsg, fmt.Errorf("securityGroupName is not configured") - } - - securityGroup, err := az.nsgCache.GetWithDeepCopy(az.SecurityGroupName, crt) - if err != nil { - return nsg, err - } - - if securityGroup == nil { - return nsg, fmt.Errorf("nsg %q not found", az.SecurityGroupName) - } - - return *(securityGroup.(*network.SecurityGroup)), nil -} - -func (az *Cloud) getPrivateLinkService(frontendIPConfigID *string, crt azcache.AzureCacheReadType) (pls network.PrivateLinkService, err error) { - cachedPLS, err := az.plsCache.GetWithDeepCopy(*frontendIPConfigID, crt) - if err != nil { - return pls, err - } - return *(cachedPLS.(*network.PrivateLinkService)), nil -} - -func (az *Cloud) newVMCache() (*azcache.TimedCache, error) { - getter := func(key string) (interface{}, error) { - // Currently InstanceView request are used by azure_zones, while the calls come after non-InstanceView - // request. If we first send an InstanceView request and then a non InstanceView request, the second - // request will still hit throttling. This is what happens now for cloud controller manager: In this - // case we do get instance view every time to fulfill the azure_zones requirement without hitting - // throttling. - // Consider adding separate parameter for controlling 'InstanceView' once node update issue #56276 is fixed - ctx, cancel := getContextWithCancel() - defer cancel() - - resourceGroup, err := az.GetNodeResourceGroup(key) - if err != nil { - return nil, err - } - - vm, verr := az.VirtualMachinesClient.Get(ctx, resourceGroup, key, compute.InstanceViewTypesInstanceView) - exists, rerr := checkResourceExistsFromError(verr) - if rerr != nil { - return nil, rerr.Error() - } - - if !exists { - klog.V(2).Infof("Virtual machine %q not found", key) - return nil, nil - } - - if vm.VirtualMachineProperties != nil && - strings.EqualFold(pointer.StringDeref(vm.VirtualMachineProperties.ProvisioningState, ""), string(consts.ProvisioningStateDeleting)) { - klog.V(2).Infof("Virtual machine %q is under deleting", key) - return nil, nil - } - - return &vm, nil - } - - if az.VMCacheTTLInSeconds == 0 { - az.VMCacheTTLInSeconds = vmCacheTTLDefaultInSeconds - } - return azcache.NewTimedcache(time.Duration(az.VMCacheTTLInSeconds)*time.Second, getter) -} - -func (az *Cloud) newLBCache() (*azcache.TimedCache, error) { - getter := func(key string) (interface{}, error) { - ctx, cancel := getContextWithCancel() - defer cancel() - - lb, err := az.LoadBalancerClient.Get(ctx, az.getLoadBalancerResourceGroup(), key, "") - exists, rerr := checkResourceExistsFromError(err) - if rerr != nil { - return nil, rerr.Error() - } - - if !exists { - klog.V(2).Infof("Load balancer %q not found", key) - return nil, nil - } - - return &lb, nil - } - - if az.LoadBalancerCacheTTLInSeconds == 0 { - az.LoadBalancerCacheTTLInSeconds = loadBalancerCacheTTLDefaultInSeconds - } - return azcache.NewTimedcache(time.Duration(az.LoadBalancerCacheTTLInSeconds)*time.Second, getter) -} - -func (az *Cloud) newNSGCache() (*azcache.TimedCache, error) { - getter := func(key string) (interface{}, error) { - ctx, cancel := getContextWithCancel() - defer cancel() - nsg, err := az.SecurityGroupsClient.Get(ctx, az.SecurityGroupResourceGroup, key, "") - exists, rerr := checkResourceExistsFromError(err) - if rerr != nil { - return nil, rerr.Error() - } - - if !exists { - klog.V(2).Infof("Security group %q not found", key) - return nil, nil - } - - return &nsg, nil - } - - if az.NsgCacheTTLInSeconds == 0 { - az.NsgCacheTTLInSeconds = nsgCacheTTLDefaultInSeconds - } - return azcache.NewTimedcache(time.Duration(az.NsgCacheTTLInSeconds)*time.Second, getter) -} - -func (az *Cloud) newRouteTableCache() (*azcache.TimedCache, error) { - getter := func(key string) (interface{}, error) { - ctx, cancel := getContextWithCancel() - defer cancel() - rt, err := az.RouteTablesClient.Get(ctx, az.RouteTableResourceGroup, key, "") - exists, rerr := checkResourceExistsFromError(err) - if rerr != nil { - return nil, rerr.Error() - } - - if !exists { - klog.V(2).Infof("Route table %q not found", key) - return nil, nil - } - - return &rt, nil - } - - if az.RouteTableCacheTTLInSeconds == 0 { - az.RouteTableCacheTTLInSeconds = routeTableCacheTTLDefaultInSeconds - } - return azcache.NewTimedcache(time.Duration(az.RouteTableCacheTTLInSeconds)*time.Second, getter) -} - -func (az *Cloud) newPIPCache() (*azcache.TimedCache, error) { - getter := func(key string) (interface{}, error) { - ctx, cancel := getContextWithCancel() - defer cancel() - - pipResourceGroup := key - pipList, rerr := az.PublicIPAddressesClient.List(ctx, pipResourceGroup) - if rerr != nil { - return nil, rerr.Error() - } - - pipMap := &sync.Map{} - for _, pip := range pipList { - pip := pip - pipMap.Store(pointer.StringDeref(pip.Name, ""), &pip) - } - return pipMap, nil - } - - if az.PublicIPCacheTTLInSeconds == 0 { - az.PublicIPCacheTTLInSeconds = publicIPCacheTTLDefaultInSeconds - } - return azcache.NewTimedcache(time.Duration(az.PublicIPCacheTTLInSeconds)*time.Second, getter) -} - -func (az *Cloud) newPLSCache() (*azcache.TimedCache, error) { - // for PLS cache, key is LBFrontendIPConfiguration ID - getter := func(key string) (interface{}, error) { - ctx, cancel := getContextWithCancel() - defer cancel() - plsList, err := az.PrivateLinkServiceClient.List(ctx, az.PrivateLinkServiceResourceGroup) - exists, rerr := checkResourceExistsFromError(err) - if rerr != nil { - return nil, rerr.Error() - } - - if exists { - for i := range plsList { - pls := plsList[i] - if pls.PrivateLinkServiceProperties == nil { - continue - } - fipConfigs := pls.PrivateLinkServiceProperties.LoadBalancerFrontendIPConfigurations - if fipConfigs == nil { - continue - } - for _, fipConfig := range *fipConfigs { - if strings.EqualFold(*fipConfig.ID, key) { - return &pls, nil - } - } - - } - } - - klog.V(2).Infof("No privateLinkService found for frontendIPConfig %q", key) - plsNotExistID := consts.PrivateLinkServiceNotExistID - return &network.PrivateLinkService{ID: &plsNotExistID}, nil - } - - if az.PlsCacheTTLInSeconds == 0 { - az.PlsCacheTTLInSeconds = plsCacheTTLDefaultInSeconds - } - return azcache.NewTimedcache(time.Duration(az.PlsCacheTTLInSeconds)*time.Second, getter) -} - func (az *Cloud) useStandardLoadBalancer() bool { return strings.EqualFold(az.LoadBalancerSku, consts.LoadBalancerSkuStandard) } @@ -426,29 +97,3 @@ func ConvertResourceGroupNameToLower(resourceID string) (string, error) { resourceGroup := matches[1] return strings.Replace(resourceID, resourceGroup, strings.ToLower(resourceGroup), 1), nil } - -// isBackendPoolOnSameLB checks whether newBackendPoolID is on the same load balancer as existingBackendPools. -// Since both public and internal LBs are supported, lbName and lbName-internal are treated as same. -// If not same, the lbName for existingBackendPools would also be returned. -func isBackendPoolOnSameLB(newBackendPoolID string, existingBackendPools []string) (bool, string, error) { - matches := backendPoolIDRE.FindStringSubmatch(newBackendPoolID) - if len(matches) != 2 { - return false, "", fmt.Errorf("new backendPoolID %q is in wrong format", newBackendPoolID) - } - - newLBName := matches[1] - newLBNameTrimmed := strings.TrimSuffix(newLBName, consts.InternalLoadBalancerNameSuffix) - for _, backendPool := range existingBackendPools { - matches := backendPoolIDRE.FindStringSubmatch(backendPool) - if len(matches) != 2 { - return false, "", fmt.Errorf("existing backendPoolID %q is in wrong format", backendPool) - } - - lbName := matches[1] - if !strings.EqualFold(strings.TrimSuffix(lbName, consts.InternalLoadBalancerNameSuffix), newLBNameTrimmed) { - return false, lbName, nil - } - } - - return true, "", nil -}