diff --git a/charts/latest/azuredisk-csi-driver-v1.6.0.tgz b/charts/latest/azuredisk-csi-driver-v1.6.0.tgz index 514f6da467..052fbb7ffd 100644 Binary files a/charts/latest/azuredisk-csi-driver-v1.6.0.tgz and b/charts/latest/azuredisk-csi-driver-v1.6.0.tgz differ diff --git a/charts/latest/azuredisk-csi-driver/templates/csi-azuredisk-controller.yaml b/charts/latest/azuredisk-csi-driver/templates/csi-azuredisk-controller.yaml index 20abb0f757..77a04ef828 100755 --- a/charts/latest/azuredisk-csi-driver/templates/csi-azuredisk-controller.yaml +++ b/charts/latest/azuredisk-csi-driver/templates/csi-azuredisk-controller.yaml @@ -199,7 +199,7 @@ spec: resources: limits: cpu: 200m - memory: 300Mi + memory: 500Mi requests: cpu: 10m memory: 20Mi diff --git a/deploy/csi-azuredisk-controller.yaml b/deploy/csi-azuredisk-controller.yaml index 181f9070b9..cb620b0023 100644 --- a/deploy/csi-azuredisk-controller.yaml +++ b/deploy/csi-azuredisk-controller.yaml @@ -170,7 +170,7 @@ spec: resources: limits: cpu: 200m - memory: 300Mi + memory: 500Mi requests: cpu: 10m memory: 20Mi diff --git a/go.mod b/go.mod index d95b2d9dbe..00bbffd546 100644 --- a/go.mod +++ b/go.mod @@ -68,5 +68,5 @@ replace ( k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.21.0 k8s.io/sample-controller => k8s.io/sample-controller v0.21.0 - sigs.k8s.io/cloud-provider-azure => sigs.k8s.io/cloud-provider-azure v0.7.4-0.20210714144827-512c4858ba96 + sigs.k8s.io/cloud-provider-azure => sigs.k8s.io/cloud-provider-azure v0.7.4-0.20210728061335-42cad749729e ) diff --git a/go.sum b/go.sum index 6c6fb14970..743260daf8 100644 --- a/go.sum +++ b/go.sum @@ -1207,8 +1207,8 @@ rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15 h1:4uqm9Mv+w2MmBYD+F4qf/v6tDFUdPOk29C095RbU5mY= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= -sigs.k8s.io/cloud-provider-azure v0.7.4-0.20210714144827-512c4858ba96 h1:BAdNwndewnpW9h2GFBBkNg+IBvJQ+GH+2gfSd0qWfr8= -sigs.k8s.io/cloud-provider-azure v0.7.4-0.20210714144827-512c4858ba96/go.mod h1:Y5dSIj1lXNzGSRePDD/WpB0uZb1aCwdoHew+Oqt6M90= +sigs.k8s.io/cloud-provider-azure v0.7.4-0.20210728061335-42cad749729e h1:lP5ExSqvtrf2yIMSDvUdWNDSKYwtxpFQLyT6vrYlt4s= +sigs.k8s.io/cloud-provider-azure v0.7.4-0.20210728061335-42cad749729e/go.mod h1:Y5dSIj1lXNzGSRePDD/WpB0uZb1aCwdoHew+Oqt6M90= sigs.k8s.io/kustomize v2.0.3+incompatible/go.mod h1:MkjgH3RdOWrievjo6c9T245dYlB5QeXV4WCbnt/PEpU= sigs.k8s.io/kustomize/api v0.8.5/go.mod h1:M377apnKT5ZHJS++6H4rQoCHmWtt6qTpp3mbe7p6OLY= sigs.k8s.io/kustomize/cmd/config v0.9.7/go.mod h1:MvXCpHs77cfyxRmCNUQjIqCmZyYsbn5PyQpWiq44nW0= diff --git a/vendor/modules.txt b/vendor/modules.txt index 25c9dd7ccd..20912e7096 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -874,7 +874,7 @@ k8s.io/utils/trace # sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15 sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client -# sigs.k8s.io/cloud-provider-azure v0.7.4 => sigs.k8s.io/cloud-provider-azure v0.7.4-0.20210714144827-512c4858ba96 +# sigs.k8s.io/cloud-provider-azure v0.7.4 => sigs.k8s.io/cloud-provider-azure v0.7.4-0.20210728061335-42cad749729e ## explicit sigs.k8s.io/cloud-provider-azure/pkg/auth sigs.k8s.io/cloud-provider-azure/pkg/azureclients @@ -958,4 +958,4 @@ sigs.k8s.io/yaml # k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.21.0 # k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.21.0 # k8s.io/sample-controller => k8s.io/sample-controller v0.21.0 -# sigs.k8s.io/cloud-provider-azure => sigs.k8s.io/cloud-provider-azure v0.7.4-0.20210714144827-512c4858ba96 +# sigs.k8s.io/cloud-provider-azure => sigs.k8s.io/cloud-provider-azure v0.7.4-0.20210728061335-42cad749729e diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/armclient/azure_armclient.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/armclient/azure_armclient.go index dde95b4d32..3259a7b881 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/armclient/azure_armclient.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/armclient/azure_armclient.go @@ -19,10 +19,12 @@ package armclient import ( "bytes" "context" + "crypto/tls" "encoding/json" "fmt" "io/ioutil" "net/http" + "net/http/cookiejar" "net/http/httputil" "strings" "sync" @@ -40,6 +42,29 @@ import ( var _ Interface = &Client{} +// Singleton transport for all connections to ARM. +var commTransport *http.Transport + +func init() { + // Use behaviour compatible with DefaultTransport, but override MaxIdleConns and MaxIdleConns + const maxIdleConns = 64 + const maxIdleConnsPerHost = 64 + defaultTransport := http.DefaultTransport.(*http.Transport) + commTransport = &http.Transport{ + Proxy: defaultTransport.Proxy, + DialContext: defaultTransport.DialContext, + MaxIdleConns: maxIdleConns, + MaxIdleConnsPerHost: maxIdleConnsPerHost, + IdleConnTimeout: defaultTransport.IdleConnTimeout, + TLSHandshakeTimeout: defaultTransport.TLSHandshakeTimeout, + ExpectContinueTimeout: defaultTransport.ExpectContinueTimeout, + TLSClientConfig: &tls.Config{ + MinVersion: tls.VersionTLS12, + Renegotiation: tls.RenegotiateNever, + }, + } +} + // Client implements ARM client Interface. type Client struct { client autorest.Client @@ -57,6 +82,8 @@ func New(authorizer autorest.Authorizer, baseURI, userAgent, apiVersion, clientR restClient.RetryAttempts = 3 restClient.RetryDuration = time.Second * 1 restClient.Authorizer = authorizer + restClient.Sender = getSender() + restClient.Sender = autorest.DecorateSender(restClient.Sender, autorest.DoCloseIfError()) if userAgent == "" { restClient.UserAgent = GetUserAgent(restClient) @@ -80,6 +107,13 @@ func New(authorizer autorest.Authorizer, baseURI, userAgent, apiVersion, clientR } } +func getSender() autorest.Sender { + // Setup sender with singleton transport so that connections to ARM are shared. + // Refer https://github.com/Azure/go-autorest/blob/master/autorest/sender.go#L128 for how the sender is created. + j, _ := cookiejar.New(nil) + return &http.Client{Jar: j, Transport: commTransport} +} + // GetUserAgent gets the autorest client with a user agent that // includes "kubernetes" and the full kubernetes git version string // example: @@ -519,6 +553,28 @@ func (c *Client) PatchResource(ctx context.Context, resourceID string, parameter return response, nil } +// PatchResourceAsync patches a resource by resource ID asynchronously +func (c *Client) PatchResourceAsync(ctx context.Context, resourceID string, parameters interface{}) (*azure.Future, *retry.Error) { + decorators := []autorest.PrepareDecorator{ + autorest.WithPathParameters("{resourceID}", map[string]interface{}{"resourceID": resourceID}), + autorest.WithJSON(parameters), + } + + request, err := c.PreparePatchRequest(ctx, decorators...) + if err != nil { + klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "patch.prepare", resourceID, err) + return nil, retry.NewError(false, err) + } + + future, resp, clientErr := c.SendAsync(ctx, request) + defer c.CloseResponse(ctx, resp) + if clientErr != nil { + klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "patch.send", resourceID, clientErr.Error()) + return nil, clientErr + } + return future, clientErr +} + // PutResourceAsync puts a resource by resource ID in async mode func (c *Client) PutResourceAsync(ctx context.Context, resourceID string, parameters interface{}) (*azure.Future, *retry.Error) { decorators := []autorest.PrepareDecorator{ diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/armclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/armclient/interface.go index e7d437e3ec..9c6fa84717 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/armclient/interface.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/armclient/interface.go @@ -77,6 +77,9 @@ type Interface interface { // PatchResource patches a resource by resource ID PatchResource(ctx context.Context, resourceID string, parameters interface{}) (*http.Response, *retry.Error) + // PatchResourceAsync patches a resource by resource ID asynchronously + PatchResourceAsync(ctx context.Context, resourceID string, parameters interface{}) (*azure.Future, *retry.Error) + // PutResourceAsync puts a resource by resource ID in async mode PutResourceAsync(ctx context.Context, resourceID string, parameters interface{}) (*azure.Future, *retry.Error) diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmclient/azure_vmclient.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmclient/azure_vmclient.go index 0230eef945..18fee8944b 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmclient/azure_vmclient.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmclient/azure_vmclient.go @@ -250,6 +250,60 @@ func (c *Client) Update(ctx context.Context, resourceGroupName string, VMName st return nil } +// UpdateAsync updates a VirtualMachine asynchronously +func (c *Client) UpdateAsync(ctx context.Context, resourceGroupName string, VMName string, parameters compute.VirtualMachineUpdate, source string) (*azure.Future, *retry.Error) { + mc := metrics.NewMetricContext("vm", "updateasync", resourceGroupName, c.subscriptionID, source) + + // Report errors if the client is rate limited. + if !c.rateLimiterWriter.TryAccept() { + mc.RateLimitedCount() + return nil, retry.GetRateLimitError(true, "VMUpdateAsync") + } + + // Report errors if the client is throttled. + if c.RetryAfterWriter.After(time.Now()) { + mc.ThrottledCount() + rerr := retry.GetThrottlingError("VMUpdateAsync", "client throttled", c.RetryAfterWriter) + return nil, rerr + } + + resourceID := armclient.GetResourceID( + c.subscriptionID, + resourceGroupName, + "Microsoft.Compute/virtualMachines", + VMName, + ) + + future, rerr := c.armClient.PatchResourceAsync(ctx, resourceID, parameters) + _ = mc.Observe(rerr.Error()) + if rerr != nil { + if rerr.IsThrottled() { + // Update RetryAfterReader so that no more requests would be sent until RetryAfter expires. + c.RetryAfterWriter = rerr.RetryAfter + } + + return nil, rerr + } + + return future, nil +} + +// WaitForUpdateResult waits for the response of the update request +func (c *Client) WaitForUpdateResult(ctx context.Context, future *azure.Future, resourceGroupName, source string) *retry.Error { + mc := metrics.NewMetricContext("vm", "wait_for_update_result", resourceGroupName, c.subscriptionID, source) + response, err := c.armClient.WaitForAsyncOperationResult(ctx, future, "VMWaitForUpdateResult") + _ = mc.Observe(err) + + if response != nil && response.StatusCode != http.StatusNoContent { + _, rerr := c.updateResponder(response) + if rerr != nil { + klog.V(5).Infof("Received error: %s", "vm.put.respond", rerr.Error()) + return rerr + } + } + return nil +} + // updateVM updates a VirtualMachine. func (c *Client) updateVM(ctx context.Context, resourceGroupName string, VMName string, parameters compute.VirtualMachineUpdate, source string) *retry.Error { resourceID := armclient.GetResourceID( diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmclient/interface.go index 7fd55b7ccd..917d365013 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmclient/interface.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmclient/interface.go @@ -20,6 +20,7 @@ import ( "context" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute" + "github.com/Azure/go-autorest/autorest/azure" "sigs.k8s.io/cloud-provider-azure/pkg/retry" ) @@ -49,6 +50,12 @@ type Interface interface { // Update updates a VirtualMachine. Update(ctx context.Context, resourceGroupName string, VMName string, parameters compute.VirtualMachineUpdate, source string) *retry.Error + // UpdateAsync updates a VirtualMachine asynchronously + UpdateAsync(ctx context.Context, resourceGroupName string, VMName string, parameters compute.VirtualMachineUpdate, source string) (*azure.Future, *retry.Error) + + // WaitForUpdateResult waits for the response of the update request + WaitForUpdateResult(ctx context.Context, future *azure.Future, resourceGroupName, source string) *retry.Error + // Delete deletes a VirtualMachine. Delete(ctx context.Context, resourceGroupName string, VMName string) *retry.Error } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmclient/mockvmclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmclient/mockvmclient/interface.go index 2d364ff81f..17548957bc 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmclient/mockvmclient/interface.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmclient/mockvmclient/interface.go @@ -21,6 +21,7 @@ import ( "reflect" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute" + "github.com/Azure/go-autorest/autorest/azure" "github.com/golang/mock/gomock" "sigs.k8s.io/cloud-provider-azure/pkg/retry" ) @@ -106,6 +107,35 @@ func (mr *MockInterfaceMockRecorder) Update(ctx, resourceGroupName, VMName, para return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Update", reflect.TypeOf((*MockInterface)(nil).Update), ctx, resourceGroupName, VMName, parameters, source) } +// UpdateAsync mocks base method +func (m *MockInterface) UpdateAsync(ctx context.Context, resourceGroupName, VMName string, parameters compute.VirtualMachineUpdate, source string) (*azure.Future, *retry.Error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateAsync", ctx, resourceGroupName, VMName, parameters, source) + ret0, _ := ret[0].(*azure.Future) + ret1, _ := ret[1].(*retry.Error) + return ret0, ret1 +} + +// UpdateAsync indicates an expected call of UpdateAsync +func (mr *MockInterfaceMockRecorder) UpdateAsync(ctx, resourceGroupName, VMName, parameters, source interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateAsync", reflect.TypeOf((*MockInterface)(nil).UpdateAsync), ctx, resourceGroupName, VMName, parameters, source) +} + +// WaitForUpdateResult waits for the response of the update request +func (m *MockInterface) WaitForUpdateResult(ctx context.Context, future *azure.Future, resourceGroupName, source string) *retry.Error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WaitForUpdateResult", ctx, future, resourceGroupName, source) + ret0, _ := ret[0].(*retry.Error) + return ret0 +} + +// WaitForUpdateResult waits for the response of the update request +func (mr *MockInterfaceMockRecorder) WaitForUpdateResult(ctx, future, resourceGroupName, source interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitForUpdateResult", reflect.TypeOf((*MockInterface)(nil).WaitForUpdateResult), ctx, future, resourceGroupName, source) +} + // Delete mocks base method func (m *MockInterface) Delete(ctx context.Context, resourceGroupName, VMName string) *retry.Error { m.ctrl.T.Helper() diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssvmclient/azure_vmssvmclient.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssvmclient/azure_vmssvmclient.go index a8716c6af5..5d40fc5997 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssvmclient/azure_vmssvmclient.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssvmclient/azure_vmssvmclient.go @@ -254,6 +254,61 @@ func (c *Client) Update(ctx context.Context, resourceGroupName string, VMScaleSe return nil } +// UpdateAsync updates a VirtualMachineScaleSetVM asynchronously +func (c *Client) UpdateAsync(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, parameters compute.VirtualMachineScaleSetVM, source string) (*azure.Future, *retry.Error) { + mc := metrics.NewMetricContext("vmssvm", "updateasync", resourceGroupName, c.subscriptionID, source) + + // Report errors if the client is rate limited. + if !c.rateLimiterWriter.TryAccept() { + mc.RateLimitedCount() + return nil, retry.GetRateLimitError(true, "VMSSVMUpdateAsync") + } + + // Report errors if the client is throttled. + if c.RetryAfterWriter.After(time.Now()) { + mc.ThrottledCount() + rerr := retry.GetThrottlingError("VMSSVMUpdateAsync", "client throttled", c.RetryAfterWriter) + return nil, rerr + } + + resourceID := armclient.GetChildResourceID( + c.subscriptionID, + resourceGroupName, + "Microsoft.Compute/virtualMachineScaleSets", + VMScaleSetName, + "virtualMachines", + instanceID, + ) + + future, rerr := c.armClient.PutResourceAsync(ctx, resourceID, parameters) + _ = mc.Observe(rerr.Error()) + if rerr != nil { + if rerr.IsThrottled() { + // Update RetryAfterReader so that no more requests would be sent until RetryAfter expires. + c.RetryAfterWriter = rerr.RetryAfter + } + + return nil, rerr + } + + return future, nil +} + +// WaitForUpdateResult waits for the response of the update request +func (c *Client) WaitForUpdateResult(ctx context.Context, future *azure.Future, resourceGroupName, source string) *retry.Error { + mc := metrics.NewMetricContext("vmss", "wait_for_update_result", resourceGroupName, c.subscriptionID, source) + response, err := c.armClient.WaitForAsyncOperationResult(ctx, future, "VMSSWaitForUpdateResult") + _ = mc.Observe(err) + if response != nil && response.StatusCode != http.StatusNoContent { + _, rerr := c.updateResponder(response) + if rerr != nil { + klog.V(5).Infof("Received error: %s", "vmss.put.respond", rerr.Error()) + return rerr + } + } + return nil +} + // updateVMSSVM updates a VirtualMachineScaleSetVM. func (c *Client) updateVMSSVM(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, parameters compute.VirtualMachineScaleSetVM) *retry.Error { resourceID := armclient.GetChildResourceID( diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssvmclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssvmclient/interface.go index 165e332152..dd458c894f 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssvmclient/interface.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssvmclient/interface.go @@ -20,6 +20,7 @@ import ( "context" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute" + "github.com/Azure/go-autorest/autorest/azure" "sigs.k8s.io/cloud-provider-azure/pkg/retry" ) @@ -46,6 +47,12 @@ type Interface interface { // Update updates a VirtualMachineScaleSetVM. Update(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, parameters compute.VirtualMachineScaleSetVM, source string) *retry.Error + // UpdateAsync updates a VirtualMachineScaleSetVM asynchronously + UpdateAsync(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, parameters compute.VirtualMachineScaleSetVM, source string) (*azure.Future, *retry.Error) + + // WaitForUpdateResult waits for the response of the update request + WaitForUpdateResult(ctx context.Context, future *azure.Future, resourceGroupName, source string) *retry.Error + // UpdateVMs updates a list of VirtualMachineScaleSetVM from map[instanceID]compute.VirtualMachineScaleSetVM. UpdateVMs(ctx context.Context, resourceGroupName string, VMScaleSetName string, instances map[string]compute.VirtualMachineScaleSetVM, source string) *retry.Error } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssvmclient/mockvmssvmclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssvmclient/mockvmssvmclient/interface.go index 30d240d30b..259bc748d4 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssvmclient/mockvmssvmclient/interface.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssvmclient/mockvmssvmclient/interface.go @@ -21,6 +21,7 @@ import ( "reflect" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute" + "github.com/Azure/go-autorest/autorest/azure" "github.com/golang/mock/gomock" "sigs.k8s.io/cloud-provider-azure/pkg/retry" ) @@ -92,6 +93,35 @@ func (mr *MockInterfaceMockRecorder) Update(ctx, resourceGroupName, VMScaleSetNa return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Update", reflect.TypeOf((*MockInterface)(nil).Update), ctx, resourceGroupName, VMScaleSetName, instanceID, parameters, source) } +// UpdateSync mocks base method +func (m *MockInterface) UpdateAsync(ctx context.Context, resourceGroupName, VMScaleSetName, instanceID string, parameters compute.VirtualMachineScaleSetVM, source string) (*azure.Future, *retry.Error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateAsync", ctx, resourceGroupName, VMScaleSetName, instanceID, parameters, source) + ret0, _ := ret[0].(*azure.Future) + ret1, _ := ret[1].(*retry.Error) + return ret0, ret1 +} + +// UpdateSync indicates an expected call of UpdateSync +func (mr *MockInterfaceMockRecorder) UpdateAsync(ctx, resourceGroupName, VMScaleSetName, instanceID, parameters, source interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateAsync", reflect.TypeOf((*MockInterface)(nil).UpdateAsync), ctx, resourceGroupName, VMScaleSetName, instanceID, parameters, source) +} + +// WaitForUpdateResult waits for the response of the update request +func (m *MockInterface) WaitForUpdateResult(ctx context.Context, future *azure.Future, resourceGroupName, source string) *retry.Error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WaitForUpdateResult", ctx, future, resourceGroupName, source) + ret0, _ := ret[0].(*retry.Error) + return ret0 +} + +// WaitForUpdateResult waits for the response of the update request +func (mr *MockInterfaceMockRecorder) WaitForUpdateResult(ctx, future, resourceGroupName, source interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitForUpdateResult", reflect.TypeOf((*MockInterface)(nil).WaitForUpdateResult), ctx, future, resourceGroupName, source) +} + // UpdateVMs mocks base method func (m *MockInterface) UpdateVMs(ctx context.Context, resourceGroupName, VMScaleSetName string, instances map[string]compute.VirtualMachineScaleSetVM, source string) *retry.Error { m.ctrl.T.Helper() diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure.go index 644082e8a9..9b5afdc102 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure.go @@ -41,6 +41,7 @@ import ( v1core "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/flowcontrol" cloudprovider "k8s.io/cloud-provider" "k8s.io/klog/v2" @@ -876,6 +877,14 @@ func initDiskControllers(az *Cloud) error { // Common controller contains the function // needed by both blob disk and managed disk controllers + qps := float32(defaultAtachDetachDiskQPS) + bucket := defaultAtachDetachDiskBucket + if az.Config.AttachDetachDiskRateLimit != nil { + qps = az.Config.AttachDetachDiskRateLimit.CloudProviderRateLimitQPSWrite + bucket = az.Config.AttachDetachDiskRateLimit.CloudProviderRateLimitBucketWrite + } + klog.V(2).Infof("attach/detach disk operation rate limit QPS: %f, Bucket: %d", qps, bucket) + common := &controllerCommon{ location: az.Location, storageEndpointSuffix: az.Environment.StorageEndpointSuffix, @@ -883,6 +892,7 @@ func initDiskControllers(az *Cloud) error { subscriptionID: az.SubscriptionID, cloud: az, lockMap: newLockMap(), + diskOpRateLimiter: flowcontrol.NewTokenBucketRateLimiter(qps, bucket), } if az.HasExtendedLocation() { diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_common.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_common.go index 154dbed1db..d96f203ee8 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_common.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_common.go @@ -31,6 +31,7 @@ import ( "k8s.io/apimachinery/pkg/types" kwait "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/util/flowcontrol" cloudprovider "k8s.io/cloud-provider" volerr "k8s.io/cloud-provider/volume/errors" "k8s.io/klog/v2" @@ -95,6 +96,8 @@ type controllerCommon struct { // > attachDiskMap sync.Map detachDiskMap sync.Map + // attach/detach disk rate limiter + diskOpRateLimiter flowcontrol.RateLimiter } // AttachDiskOptions attach disk options @@ -162,13 +165,13 @@ func (c *controllerCommon) AttachDisk(isManagedDisk bool, diskName, diskURI stri return -1, err } if strings.EqualFold(string(nodeName), string(attachedNode)) { - err := fmt.Errorf("volume %q is actually attached to current node %q, invalidate vm cache and return error", diskURI, nodeName) - klog.Warningf("%v", err) + klog.Warningf("volume %q is actually attached to current node %q, invalidate vm cache and return error", diskURI, nodeName) // update VM(invalidate vm cache) if errUpdate := c.UpdateVM(nodeName); errUpdate != nil { return -1, errUpdate } - return -1, err + lun, _, err := c.GetDiskLun(diskName, diskURI, nodeName) + return lun, err } attachErr := fmt.Sprintf( @@ -219,7 +222,13 @@ func (c *controllerCommon) AttachDisk(isManagedDisk bool, diskName, diskURI stri } c.lockMap.LockEntry(node) - defer c.lockMap.UnlockEntry(node) + unlock := false + defer func() { + if !unlock { + c.lockMap.UnlockEntry(node) + } + }() + diskMap, err := c.cleanAttachDiskRequests(node) if err != nil { return -1, err @@ -241,7 +250,28 @@ func (c *controllerCommon) AttachDisk(isManagedDisk bool, diskName, diskURI stri } c.diskStateMap.Store(disk, "attaching") defer c.diskStateMap.Delete(disk) - return lun, vmset.AttachDisk(nodeName, diskMap) + future, err := vmset.AttachDisk(nodeName, diskMap) + if err != nil { + return -1, err + } + + if c.diskOpRateLimiter.TryAccept() { + // unlock and wait for attach disk complete + unlock = true + c.lockMap.UnlockEntry(node) + } else { + klog.Warningf("azureDisk - switch to batch operation since disk operation is rate limited, current QPS: %f", c.diskOpRateLimiter.QPS()) + } + + ctx, cancel := getContextWithCancel() + defer cancel() + + resourceGroup, err := getResourceGroupFromDiskURI(diskURI) + if err != nil { + return -1, err + } + + return lun, vmset.WaitForUpdateResult(ctx, future, resourceGroup, "attach_disk") } func (c *controllerCommon) insertAttachDiskRequest(diskURI, nodeName string, options *AttachDiskOptions) error { @@ -289,7 +319,9 @@ func (c *controllerCommon) cleanAttachDiskRequests(nodeName string) (map[string] // DetachDisk detaches a disk from VM func (c *controllerCommon) DetachDisk(diskName, diskURI string, nodeName types.NodeName) error { - _, err := c.cloud.InstanceID(context.TODO(), nodeName) + ctx, cancel := getContextWithCancel() + defer cancel() + _, err := c.cloud.InstanceID(ctx, nodeName) if err != nil { if errors.Is(err, cloudprovider.InstanceNotFound) { // if host doesn't exist, no need to detach @@ -308,7 +340,12 @@ func (c *controllerCommon) DetachDisk(diskName, diskURI string, nodeName types.N } c.lockMap.LockEntry(node) - defer c.lockMap.UnlockEntry(node) + unlock := false + defer func() { + if !unlock { + c.lockMap.UnlockEntry(node) + } + }() diskMap, err := c.cleanDetachDiskRequests(node) if err != nil { return err @@ -322,7 +359,8 @@ func (c *controllerCommon) DetachDisk(diskName, diskURI string, nodeName types.N } c.diskStateMap.Store(disk, "detaching") defer c.diskStateMap.Delete(disk) - if err = vmset.DetachDisk(nodeName, diskMap); err != nil { + future, err := vmset.DetachDisk(nodeName, diskMap) + if err != nil { if isInstanceNotFoundError(err) { // if host doesn't exist, no need to detach klog.Warningf("azureDisk - got InstanceNotFoundError(%v), DetachDisk(%s) will assume disk is already detached", @@ -332,6 +370,22 @@ func (c *controllerCommon) DetachDisk(diskName, diskURI string, nodeName types.N klog.Errorf("azureDisk - detach disk(%s, %s) failed, err: %v", diskName, diskURI, err) return err } + resourceGroup, err := getResourceGroupFromDiskURI(diskURI) + if err != nil { + return err + } + if c.diskOpRateLimiter.TryAccept() { + // unlock and wait for attach disk complete + unlock = true + c.lockMap.UnlockEntry(node) + } else { + klog.Warningf("azureDisk - switch to batch operation since disk operation is rate limited, current QPS: %f", c.diskOpRateLimiter.QPS()) + } + + if err := vmset.WaitForUpdateResult(ctx, future, resourceGroup, "detach_disk"); err != nil { + klog.Errorf("azureDisk - detach disk(%s, %s) failed with error: %v", diskName, diskURI, err) + return err + } } klog.V(2).Infof("azureDisk - detach disk(%s, %s) succeeded", diskName, diskURI) diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_standard.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_standard.go index 225ccfd136..b0d6a7910b 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_standard.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_standard.go @@ -17,10 +17,12 @@ limitations under the License. package provider import ( + "context" "net/http" "strings" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute" + "github.com/Azure/go-autorest/autorest/azure" "github.com/Azure/go-autorest/autorest/to" "k8s.io/apimachinery/pkg/types" @@ -31,16 +33,16 @@ import ( ) // AttachDisk attaches a disk to vm -func (as *availabilitySet) AttachDisk(nodeName types.NodeName, diskMap map[string]*AttachDiskOptions) error { +func (as *availabilitySet) AttachDisk(nodeName types.NodeName, diskMap map[string]*AttachDiskOptions) (*azure.Future, error) { vm, err := as.getVirtualMachine(nodeName, azcache.CacheReadTypeDefault) if err != nil { - return err + return nil, err } vmName := mapNodeNameToVMName(nodeName) nodeResourceGroup, err := as.GetNodeResourceGroup(vmName) if err != nil { - return err + return nil, err } disks := make([]compute.DataDisk, len(*vm.StorageProfile.DataDisks)) @@ -114,37 +116,45 @@ func (as *availabilitySet) AttachDisk(nodeName types.NodeName, diskMap map[strin _ = as.cloud.vmCache.Delete(vmName) }() - rerr := as.VirtualMachinesClient.Update(ctx, nodeResourceGroup, vmName, newVM, "attach_disk") + future, rerr := as.VirtualMachinesClient.UpdateAsync(ctx, nodeResourceGroup, vmName, newVM, "attach_disk") if rerr != nil { klog.Errorf("azureDisk - attach disk list(%s) on rg(%s) vm(%s) failed, err: %v", diskMap, nodeResourceGroup, vmName, rerr) if rerr.HTTPStatusCode == http.StatusNotFound { klog.Errorf("azureDisk - begin to filterNonExistingDisks(%v) on rg(%s) vm(%s)", diskMap, nodeResourceGroup, vmName) disks := as.filterNonExistingDisks(ctx, *newVM.VirtualMachineProperties.StorageProfile.DataDisks) newVM.VirtualMachineProperties.StorageProfile.DataDisks = &disks - rerr = as.VirtualMachinesClient.Update(ctx, nodeResourceGroup, vmName, newVM, "attach_disk") + future, rerr = as.VirtualMachinesClient.UpdateAsync(ctx, nodeResourceGroup, vmName, newVM, "attach_disk") } } klog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk list(%s) returned with %v", nodeResourceGroup, vmName, diskMap, rerr) if rerr != nil { + return future, rerr.Error() + } + return future, nil +} + +// WaitForUpdateResult waits for the response of the update request +func (as *availabilitySet) WaitForUpdateResult(ctx context.Context, future *azure.Future, resourceGroupName, source string) error { + if rerr := as.VirtualMachinesClient.WaitForUpdateResult(ctx, future, resourceGroupName, source); rerr != nil { return rerr.Error() } return nil } // DetachDisk detaches a disk from VM -func (as *availabilitySet) DetachDisk(nodeName types.NodeName, diskMap map[string]string) error { +func (as *availabilitySet) DetachDisk(nodeName types.NodeName, diskMap map[string]string) (*azure.Future, error) { vm, err := as.getVirtualMachine(nodeName, azcache.CacheReadTypeDefault) if err != nil { // if host doesn't exist, no need to detach klog.Warningf("azureDisk - cannot find node %s, skip detaching disk list(%s)", nodeName, diskMap) - return nil + return nil, nil } vmName := mapNodeNameToVMName(nodeName) nodeResourceGroup, err := as.GetNodeResourceGroup(vmName) if err != nil { - return err + return nil, err } disks := make([]compute.DataDisk, len(*vm.StorageProfile.DataDisks)) @@ -196,22 +206,22 @@ func (as *availabilitySet) DetachDisk(nodeName types.NodeName, diskMap map[strin _ = as.cloud.vmCache.Delete(vmName) }() - rerr := as.VirtualMachinesClient.Update(ctx, nodeResourceGroup, vmName, newVM, "detach_disk") + future, rerr := as.VirtualMachinesClient.UpdateAsync(ctx, nodeResourceGroup, vmName, newVM, "detach_disk") if rerr != nil { klog.Errorf("azureDisk - detach disk list(%s) on rg(%s) vm(%s) failed, err: %v", diskMap, nodeResourceGroup, vmName, rerr) if rerr.HTTPStatusCode == http.StatusNotFound { klog.Errorf("azureDisk - begin to filterNonExistingDisks(%v) on rg(%s) vm(%s)", diskMap, nodeResourceGroup, vmName) disks := as.filterNonExistingDisks(ctx, *vm.StorageProfile.DataDisks) newVM.VirtualMachineProperties.StorageProfile.DataDisks = &disks - rerr = as.VirtualMachinesClient.Update(ctx, nodeResourceGroup, vmName, newVM, "detach_disk") + future, rerr = as.VirtualMachinesClient.UpdateAsync(ctx, nodeResourceGroup, vmName, newVM, "detach_disk") } } klog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk list(%s) returned with %v", nodeResourceGroup, vmName, diskMap, rerr) if rerr != nil { - return rerr.Error() + return future, rerr.Error() } - return nil + return future, nil } // UpdateVM updates a vm diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_vmss.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_vmss.go index 31c262ac98..2dd73ed4f5 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_vmss.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_vmss.go @@ -17,10 +17,12 @@ limitations under the License. package provider import ( + "context" "net/http" "strings" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute" + "github.com/Azure/go-autorest/autorest/azure" "github.com/Azure/go-autorest/autorest/to" "k8s.io/apimachinery/pkg/types" @@ -31,16 +33,16 @@ import ( ) // AttachDisk attaches a disk to vm -func (ss *ScaleSet) AttachDisk(nodeName types.NodeName, diskMap map[string]*AttachDiskOptions) error { +func (ss *ScaleSet) AttachDisk(nodeName types.NodeName, diskMap map[string]*AttachDiskOptions) (*azure.Future, error) { vmName := mapNodeNameToVMName(nodeName) ssName, instanceID, vm, err := ss.getVmssVM(vmName, azcache.CacheReadTypeDefault) if err != nil { - return err + return nil, err } nodeResourceGroup, err := ss.GetNodeResourceGroup(vmName) if err != nil { - return err + return nil, err } disks := []compute.DataDisk{} @@ -118,35 +120,43 @@ func (ss *ScaleSet) AttachDisk(nodeName types.NodeName, diskMap map[string]*Atta }() klog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk list(%s)", nodeResourceGroup, nodeName, diskMap) - rerr := ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, newVM, "attach_disk") + future, rerr := ss.VirtualMachineScaleSetVMsClient.UpdateAsync(ctx, nodeResourceGroup, ssName, instanceID, newVM, "attach_disk") if rerr != nil { klog.Errorf("azureDisk - attach disk list(%s) on rg(%s) vm(%s) failed, err: %v", diskMap, nodeResourceGroup, nodeName, rerr) if rerr.HTTPStatusCode == http.StatusNotFound { klog.Errorf("azureDisk - begin to filterNonExistingDisks(%v) on rg(%s) vm(%s)", diskMap, nodeResourceGroup, nodeName) disks := ss.filterNonExistingDisks(ctx, *newVM.VirtualMachineScaleSetVMProperties.StorageProfile.DataDisks) newVM.VirtualMachineScaleSetVMProperties.StorageProfile.DataDisks = &disks - rerr = ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, newVM, "attach_disk") + future, rerr = ss.VirtualMachineScaleSetVMsClient.UpdateAsync(ctx, nodeResourceGroup, ssName, instanceID, newVM, "attach_disk") } } klog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk list(%s, %s) returned with %v", nodeResourceGroup, nodeName, diskMap, rerr) if rerr != nil { + return future, rerr.Error() + } + return future, nil +} + +// WaitForUpdateResult waits for the response of the update request +func (ss *ScaleSet) WaitForUpdateResult(ctx context.Context, future *azure.Future, resourceGroupName, source string) error { + if rerr := ss.VirtualMachineScaleSetVMsClient.WaitForUpdateResult(ctx, future, resourceGroupName, source); rerr != nil { return rerr.Error() } return nil } // DetachDisk detaches a disk from VM -func (ss *ScaleSet) DetachDisk(nodeName types.NodeName, diskMap map[string]string) error { +func (ss *ScaleSet) DetachDisk(nodeName types.NodeName, diskMap map[string]string) (*azure.Future, error) { vmName := mapNodeNameToVMName(nodeName) ssName, instanceID, vm, err := ss.getVmssVM(vmName, azcache.CacheReadTypeDefault) if err != nil { - return err + return nil, err } nodeResourceGroup, err := ss.GetNodeResourceGroup(vmName) if err != nil { - return err + return nil, err } disks := []compute.DataDisk{} @@ -201,22 +211,22 @@ func (ss *ScaleSet) DetachDisk(nodeName types.NodeName, diskMap map[string]strin }() klog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk list(%s)", nodeResourceGroup, nodeName, diskMap) - rerr := ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, newVM, "detach_disk") + future, rerr := ss.VirtualMachineScaleSetVMsClient.UpdateAsync(ctx, nodeResourceGroup, ssName, instanceID, newVM, "detach_disk") if rerr != nil { klog.Errorf("azureDisk - detach disk list(%s) on rg(%s) vm(%s) failed, err: %v", diskMap, nodeResourceGroup, nodeName, rerr) if rerr.HTTPStatusCode == http.StatusNotFound { klog.Errorf("azureDisk - begin to filterNonExistingDisks(%v) on rg(%s) vm(%s)", diskMap, nodeResourceGroup, nodeName) disks := ss.filterNonExistingDisks(ctx, *newVM.VirtualMachineScaleSetVMProperties.StorageProfile.DataDisks) newVM.VirtualMachineScaleSetVMProperties.StorageProfile.DataDisks = &disks - rerr = ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, newVM, "detach_disk") + future, rerr = ss.VirtualMachineScaleSetVMsClient.UpdateAsync(ctx, nodeResourceGroup, ssName, instanceID, newVM, "detach_disk") } } klog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk(%v) returned with %v", nodeResourceGroup, nodeName, diskMap, rerr) if rerr != nil { - return rerr.Error() + return future, rerr.Error() } - return nil + return future, nil } // UpdateVM updates a vm diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer.go index b940ff8078..16d1af274e 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer.go @@ -1031,6 +1031,7 @@ func (az *Cloud) ensurePublicIPExists(service *v1.Service, pipName string, domai pip.Name = to.StringPtr(pipName) pip.Location = to.StringPtr(az.Location) if az.HasExtendedLocation() { + klog.V(2).Infof("Using extended location with name %s, and type %s for PIP", az.ExtendedLocationName, az.ExtendedLocationType) pip.ExtendedLocation = &network.ExtendedLocation{ Name: &az.ExtendedLocationName, Type: getExtendedLocationTypeFromString(az.ExtendedLocationType), @@ -1053,13 +1054,16 @@ func (az *Cloud) ensurePublicIPExists(service *v1.Service, pipName string, domai Name: network.PublicIPAddressSkuNameStandard, } - // only add zone information for the new standard pips - zones, err := az.getRegionZonesBackoff(to.String(pip.Location)) - if err != nil { - return nil, err - } - if len(zones) > 0 { - pip.Zones = &zones + // skip adding zone info since edge zones doesn't support multiple availability zones. + if !az.HasExtendedLocation() { + // only add zone information for the new standard pips + zones, err := az.getRegionZonesBackoff(to.String(pip.Location)) + if err != nil { + return nil, err + } + if len(zones) > 0 { + pip.Zones = &zones + } } } klog.V(2).Infof("ensurePublicIPExists for service(%s): pip(%s) - creating", serviceName, *pip.Name) @@ -1782,13 +1786,6 @@ func (az *Cloud) reconcileFrontendIPConfigs(clusterName string, service *v1.Serv // construct FrontendIPConfigurationPropertiesFormat var fipConfigurationProperties *network.FrontendIPConfigurationPropertiesFormat if isInternal { - // azure does not support ILB for IPv6 yet. - // TODO: remove this check when ILB supports IPv6 *and* the SDK - // have been rev'ed to 2019* version - if utilnet.IsIPv6String(service.Spec.ClusterIP) { - return nil, false, fmt.Errorf("ensure(%s): lb(%s) - internal load balancers does not support IPv6", serviceName, lbName) - } - subnetName := subnet(service) if subnetName == nil { subnetName = &az.SubnetName @@ -1806,6 +1803,10 @@ func (az *Cloud) reconcileFrontendIPConfigs(clusterName string, service *v1.Serv Subnet: &subnet, } + if utilnet.IsIPv6String(service.Spec.ClusterIP) { + configProperties.PrivateIPAddressVersion = network.IPVersionIPv6 + } + loadBalancerIP := service.Spec.LoadBalancerIP if loadBalancerIP != "" { configProperties.PrivateIPAllocationMethod = network.IPAllocationMethodStatic @@ -1837,13 +1838,13 @@ func (az *Cloud) reconcileFrontendIPConfigs(clusterName string, service *v1.Serv FrontendIPConfigurationPropertiesFormat: fipConfigurationProperties, } - // only add zone information for new internal frontend IP configurations + // only add zone information for new internal frontend IP configurations for standard load balancer not deployed to an edge zone. location := az.Location zones, err := az.getRegionZonesBackoff(location) if err != nil { return nil, false, err } - if isInternal && az.useStandardLoadBalancer() && len(zones) > 0 { + if isInternal && az.useStandardLoadBalancer() && len(zones) > 0 && !az.HasExtendedLocation() { newConfig.Zones = &zones } newConfigs = append(newConfigs, newConfig) diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_mock_vmsets.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_mock_vmsets.go deleted file mode 100644 index d99a533b27..0000000000 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_mock_vmsets.go +++ /dev/null @@ -1,368 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package provider - -import ( - "reflect" - - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute" - network "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-02-01/network" - "github.com/golang/mock/gomock" - v1 "k8s.io/api/core/v1" - types "k8s.io/apimachinery/pkg/types" - cloudprovider "k8s.io/cloud-provider" - - cache "sigs.k8s.io/cloud-provider-azure/pkg/cache" -) - -// MockVMSet is a mock of VMSet interface -type MockVMSet struct { - ctrl *gomock.Controller - recorder *MockVMSetMockRecorder -} - -// MockVMSetMockRecorder is the mock recorder for MockVMSet -type MockVMSetMockRecorder struct { - mock *MockVMSet -} - -// NewMockVMSet creates a new mock instance -func NewMockVMSet(ctrl *gomock.Controller) *MockVMSet { - mock := &MockVMSet{ctrl: ctrl} - mock.recorder = &MockVMSetMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use -func (m *MockVMSet) EXPECT() *MockVMSetMockRecorder { - return m.recorder -} - -// GetInstanceIDByNodeName mocks base method -func (m *MockVMSet) GetInstanceIDByNodeName(name string) (string, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetInstanceIDByNodeName", name) - ret0, _ := ret[0].(string) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetInstanceIDByNodeName indicates an expected call of GetInstanceIDByNodeName -func (mr *MockVMSetMockRecorder) GetInstanceIDByNodeName(name interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetInstanceIDByNodeName", reflect.TypeOf((*MockVMSet)(nil).GetInstanceIDByNodeName), name) -} - -// GetInstanceTypeByNodeName mocks base method -func (m *MockVMSet) GetInstanceTypeByNodeName(name string) (string, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetInstanceTypeByNodeName", name) - ret0, _ := ret[0].(string) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetInstanceTypeByNodeName indicates an expected call of GetInstanceTypeByNodeName -func (mr *MockVMSetMockRecorder) GetInstanceTypeByNodeName(name interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetInstanceTypeByNodeName", reflect.TypeOf((*MockVMSet)(nil).GetInstanceTypeByNodeName), name) -} - -// GetIPByNodeName mocks base method -func (m *MockVMSet) GetIPByNodeName(name string) (string, string, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetIPByNodeName", name) - ret0, _ := ret[0].(string) - ret1, _ := ret[1].(string) - ret2, _ := ret[2].(error) - return ret0, ret1, ret2 -} - -// GetIPByNodeName indicates an expected call of GetIPByNodeName -func (mr *MockVMSetMockRecorder) GetIPByNodeName(name interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetIPByNodeName", reflect.TypeOf((*MockVMSet)(nil).GetIPByNodeName), name) -} - -// GetPrimaryInterface mocks base method -func (m *MockVMSet) GetPrimaryInterface(nodeName string) (network.Interface, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetPrimaryInterface", nodeName) - ret0, _ := ret[0].(network.Interface) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetPrimaryInterface indicates an expected call of GetPrimaryInterface -func (mr *MockVMSetMockRecorder) GetPrimaryInterface(nodeName interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPrimaryInterface", reflect.TypeOf((*MockVMSet)(nil).GetPrimaryInterface), nodeName) -} - -// GetNodeNameByProviderID mocks base method -func (m *MockVMSet) GetNodeNameByProviderID(providerID string) (types.NodeName, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetNodeNameByProviderID", providerID) - ret0, _ := ret[0].(types.NodeName) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetNodeNameByProviderID indicates an expected call of GetNodeNameByProviderID -func (mr *MockVMSetMockRecorder) GetNodeNameByProviderID(providerID interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNodeNameByProviderID", reflect.TypeOf((*MockVMSet)(nil).GetNodeNameByProviderID), providerID) -} - -// GetZoneByNodeName mocks base method -func (m *MockVMSet) GetZoneByNodeName(name string) (cloudprovider.Zone, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetZoneByNodeName", name) - ret0, _ := ret[0].(cloudprovider.Zone) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetZoneByNodeName indicates an expected call of GetZoneByNodeName -func (mr *MockVMSetMockRecorder) GetZoneByNodeName(name interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetZoneByNodeName", reflect.TypeOf((*MockVMSet)(nil).GetZoneByNodeName), name) -} - -// GetPrimaryVMSetName mocks base method -func (m *MockVMSet) GetPrimaryVMSetName() string { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetPrimaryVMSetName") - ret0, _ := ret[0].(string) - return ret0 -} - -// GetPrimaryVMSetName indicates an expected call of GetPrimaryVMSetName -func (mr *MockVMSetMockRecorder) GetPrimaryVMSetName() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPrimaryVMSetName", reflect.TypeOf((*MockVMSet)(nil).GetPrimaryVMSetName)) -} - -// GetVMSetNames mocks base method -func (m *MockVMSet) GetVMSetNames(service *v1.Service, nodes []*v1.Node) (*[]string, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetVMSetNames", service, nodes) - ret0, _ := ret[0].(*[]string) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetVMSetNames indicates an expected call of GetVMSetNames -func (mr *MockVMSetMockRecorder) GetVMSetNames(service, nodes interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetVMSetNames", reflect.TypeOf((*MockVMSet)(nil).GetVMSetNames), service, nodes) -} - -// EnsureHostsInPool mocks base method -func (m *MockVMSet) EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, backendPoolID, vmSetName string, isInternal bool) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "EnsureHostsInPool", service, nodes, backendPoolID, vmSetName, isInternal) - ret0, _ := ret[0].(error) - return ret0 -} - -// EnsureHostsInPool indicates an expected call of EnsureHostsInPool -func (mr *MockVMSetMockRecorder) EnsureHostsInPool(service, nodes, backendPoolID, vmSetName, isInternal interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnsureHostsInPool", reflect.TypeOf((*MockVMSet)(nil).EnsureHostsInPool), service, nodes, backendPoolID, vmSetName, isInternal) -} - -// EnsureHostInPool mocks base method -func (m *MockVMSet) EnsureHostInPool(service *v1.Service, nodeName types.NodeName, backendPoolID, vmSetName string, isInternal bool) (string, string, string, *compute.VirtualMachineScaleSetVM, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "EnsureHostInPool", service, nodeName, backendPoolID, vmSetName, isInternal) - ret0, _ := ret[0].(string) - ret1, _ := ret[1].(string) - ret2, _ := ret[2].(string) - ret3, _ := ret[3].(*compute.VirtualMachineScaleSetVM) - ret4, _ := ret[4].(error) - return ret0, ret1, ret2, ret3, ret4 -} - -// EnsureHostInPool indicates an expected call of EnsureHostInPool -func (mr *MockVMSetMockRecorder) EnsureHostInPool(service, nodeName, backendPoolID, vmSetName, isInternal interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnsureHostInPool", reflect.TypeOf((*MockVMSet)(nil).EnsureHostInPool), service, nodeName, backendPoolID, vmSetName, isInternal) -} - -// EnsureBackendPoolDeleted mocks base method -func (m *MockVMSet) EnsureBackendPoolDeleted(service *v1.Service, backendPoolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "EnsureBackendPoolDeleted", service, backendPoolID, vmSetName, backendAddressPools) - ret0, _ := ret[0].(error) - return ret0 -} - -// EnsureBackendPoolDeleted indicates an expected call of EnsureBackendPoolDeleted -func (mr *MockVMSetMockRecorder) EnsureBackendPoolDeleted(service, backendPoolID, vmSetName, backendAddressPools interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnsureBackendPoolDeleted", reflect.TypeOf((*MockVMSet)(nil).EnsureBackendPoolDeleted), service, backendPoolID, vmSetName, backendAddressPools) -} - -// EnsureBackendPoolDeletedFromVMSets mocks base method -func (m *MockVMSet) EnsureBackendPoolDeletedFromVMSets(vmSetNamesMap map[string]bool, backendPoolID string) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "EnsureBackendPoolDeletedFromVMSets", vmSetNamesMap, backendPoolID) - ret0, _ := ret[0].(error) - return ret0 -} - -// EnsureBackendPoolDeletedFromVMSets indicates an expected call of EnsureBackendPoolDeletedFromVMSets -func (mr *MockVMSetMockRecorder) EnsureBackendPoolDeletedFromVMSets(vmSetNamesMap, backendPoolID interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnsureBackendPoolDeletedFromVMSets", reflect.TypeOf((*MockVMSet)(nil).EnsureBackendPoolDeletedFromVMSets), vmSetNamesMap, backendPoolID) -} - -// AttachDisk mocks base method -func (m *MockVMSet) AttachDisk(nodeName types.NodeName, diskMap map[string]*AttachDiskOptions) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AttachDisk", nodeName, diskMap) - ret0, _ := ret[0].(error) - return ret0 -} - -// AttachDisk indicates an expected call of AttachDisk -func (mr *MockVMSetMockRecorder) AttachDisk(nodeName, diskMap interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AttachDisk", reflect.TypeOf((*MockVMSet)(nil).AttachDisk), nodeName, diskMap) -} - -// DetachDisk mocks base method -func (m *MockVMSet) DetachDisk(nodeName types.NodeName, diskMap map[string]string) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DetachDisk", nodeName, diskMap) - ret0, _ := ret[0].(error) - return ret0 -} - -// DetachDisk indicates an expected call of DetachDisk -func (mr *MockVMSetMockRecorder) DetachDisk(nodeName, diskMap interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DetachDisk", reflect.TypeOf((*MockVMSet)(nil).DetachDisk), nodeName, diskMap) -} - -// GetDataDisks mocks base method -func (m *MockVMSet) GetDataDisks(nodeName types.NodeName, cacheType cache.AzureCacheReadType) ([]compute.DataDisk, *string, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetDataDisks", nodeName, cacheType) - ret0, _ := ret[0].([]compute.DataDisk) - ret1, _ := ret[1].(*string) - ret2, _ := ret[2].(error) - return ret0, ret1, ret2 -} - -// GetDataDisks indicates an expected call of GetDataDisks -func (mr *MockVMSetMockRecorder) GetDataDisks(nodeName, string interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDataDisks", reflect.TypeOf((*MockVMSet)(nil).GetDataDisks), nodeName, string) -} - -// UpdateVM mocks base method -func (m *MockVMSet) UpdateVM(nodeName types.NodeName) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "UpdateVM", nodeName) - ret0, _ := ret[0].(error) - return ret0 -} - -// UpdateVM indicates an expected call of UpdateVM -func (mr *MockVMSetMockRecorder) UpdateVM(nodeName interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateVM", reflect.TypeOf((*MockVMSet)(nil).UpdateVM), nodeName) -} - -// GetPowerStatusByNodeName mocks base method -func (m *MockVMSet) GetPowerStatusByNodeName(name string) (string, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetPowerStatusByNodeName", name) - ret0, _ := ret[0].(string) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetPowerStatusByNodeName indicates an expected call of GetPowerStatusByNodeName -func (mr *MockVMSetMockRecorder) GetPowerStatusByNodeName(name interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPowerStatusByNodeName", reflect.TypeOf((*MockVMSet)(nil).GetPowerStatusByNodeName), name) -} - -// GetPrivateIPsByNodeName mocks base method -func (m *MockVMSet) GetPrivateIPsByNodeName(name string) ([]string, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetPrivateIPsByNodeName", name) - ret0, _ := ret[0].([]string) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetPrivateIPsByNodeName indicates an expected call of GetPrivateIPsByNodeName -func (mr *MockVMSetMockRecorder) GetPrivateIPsByNodeName(name interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPrivateIPsByNodeName", reflect.TypeOf((*MockVMSet)(nil).GetPrivateIPsByNodeName), name) -} - -// GetNodeNameByIPConfigurationID mocks base method -func (m *MockVMSet) GetNodeNameByIPConfigurationID(ipConfigurationID string) (string, string, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetNodeNameByIPConfigurationID", ipConfigurationID) - ret0, _ := ret[0].(string) - ret1, _ := ret[1].(string) - ret2, _ := ret[2].(error) - return ret0, ret1, ret2 -} - -// GetNodeNameByIPConfigurationID indicates an expected call of GetNodeNameByIPConfigurationID -func (mr *MockVMSetMockRecorder) GetNodeNameByIPConfigurationID(ipConfigurationID interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNodeNameByIPConfigurationID", reflect.TypeOf((*MockVMSet)(nil).GetNodeNameByIPConfigurationID), ipConfigurationID) -} - -// GetNodeCIDRMasksByProviderID mocks base method -func (m *MockVMSet) GetNodeCIDRMasksByProviderID(providerID string) (int, int, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetNodeCIDRMasksByProviderID", providerID) - ret0, _ := ret[0].(int) - ret1, _ := ret[1].(int) - ret2, _ := ret[2].(error) - return ret0, ret1, ret2 -} - -// GetNodeCIDRMasksByProviderID indicates an expected call of GetNodeCIDRMasksByProviderID -func (mr *MockVMSetMockRecorder) GetNodeCIDRMasksByProviderID(providerID interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNodeCIDRMasksByProviderID", reflect.TypeOf((*MockVMSet)(nil).GetNodeCIDRMasksByProviderID), providerID) -} - -// GetAgentPoolVMSetNames mocks base method -func (m *MockVMSet) GetAgentPoolVMSetNames(nodes []*v1.Node) (*[]string, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetAgentPoolVMSetNames", nodes) - ret0, _ := ret[0].(*[]string) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetAgentPoolVMSetNames indicates an expected call of GetAgentPoolVMSetNames -func (mr *MockVMSetMockRecorder) GetAgentPoolVMSetNames(nodes interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAgentPoolVMSetNames", reflect.TypeOf((*MockVMSet)(nil).GetAgentPoolVMSetNames), nodes) -} diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_ratelimit.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_ratelimit.go index 94e6acfb91..b9f9206471 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_ratelimit.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_ratelimit.go @@ -21,6 +21,11 @@ import ( "sigs.k8s.io/cloud-provider-azure/pkg/consts" ) +const ( + defaultAtachDetachDiskQPS = 6.0 + defaultAtachDetachDiskBucket = 10 +) + // CloudProviderRateLimitConfig indicates the rate limit config for each clients. type CloudProviderRateLimitConfig struct { // The default rate limit config options. @@ -41,6 +46,7 @@ type CloudProviderRateLimitConfig struct { VirtualMachineScaleSetRateLimit *azclients.RateLimitConfig `json:"virtualMachineScaleSetRateLimit,omitempty" yaml:"virtualMachineScaleSetRateLimit,omitempty"` VirtualMachineSizeRateLimit *azclients.RateLimitConfig `json:"virtualMachineSizesRateLimit,omitempty" yaml:"virtualMachineSizesRateLimit,omitempty"` AvailabilitySetRateLimit *azclients.RateLimitConfig `json:"availabilitySetRateLimit,omitempty" yaml:"availabilitySetRateLimit,omitempty"` + AttachDetachDiskRateLimit *azclients.RateLimitConfig `json:"attachDetachDiskRateLimit,omitempty" yaml:"attachDetachDiskRateLimit,omitempty"` } // InitializeCloudProviderRateLimitConfig initializes rate limit configs. @@ -78,6 +84,13 @@ func InitializeCloudProviderRateLimitConfig(config *CloudProviderRateLimitConfig config.VirtualMachineScaleSetRateLimit = overrideDefaultRateLimitConfig(&config.RateLimitConfig, config.VirtualMachineScaleSetRateLimit) config.VirtualMachineSizeRateLimit = overrideDefaultRateLimitConfig(&config.RateLimitConfig, config.VirtualMachineSizeRateLimit) config.AvailabilitySetRateLimit = overrideDefaultRateLimitConfig(&config.RateLimitConfig, config.AvailabilitySetRateLimit) + + atachDetachDiskRateLimitConfig := azclients.RateLimitConfig{ + CloudProviderRateLimit: true, + CloudProviderRateLimitQPSWrite: defaultAtachDetachDiskQPS, + CloudProviderRateLimitBucketWrite: defaultAtachDetachDiskBucket, + } + config.AttachDetachDiskRateLimit = overrideDefaultRateLimitConfig(&atachDetachDiskRateLimitConfig, config.AttachDetachDiskRateLimit) } // overrideDefaultRateLimitConfig overrides the default CloudProviderRateLimitConfig. diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_standard.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_standard.go index a995e33bc7..7af37cf032 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_standard.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_standard.go @@ -368,8 +368,6 @@ func (az *Cloud) serviceOwnsFrontendIP(fip network.FrontendIPConfiguration, serv return true, isPrimaryService, nil } klog.V(4).Infof("serviceOwnsFrontendIP: the public IP with ID %s is being referenced by other service with public IP address %s", *pip.ID, *pip.IPAddress) - - return false, isPrimaryService, nil } return false, isPrimaryService, nil diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_storageaccount.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_storageaccount.go index 00e6a424a5..cfc981fa04 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_storageaccount.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_storageaccount.go @@ -123,6 +123,7 @@ func (az *Cloud) EnsureStorageAccount(accountOptions *AccountOptions, genAccount if accountOptions == nil { return "", "", fmt.Errorf("account options is nil") } + accountName := accountOptions.Name accountType := accountOptions.Type accountKind := accountOptions.Kind @@ -212,10 +213,10 @@ func (az *Cloud) EnsureStorageAccount(accountOptions *AccountOptions, genAccount if az.StorageAccountClient == nil { return "", "", fmt.Errorf("StorageAccountClient is nil") } + ctx, cancel := getContextWithCancel() defer cancel() - rerr := az.StorageAccountClient.Create(ctx, resourceGroup, accountName, cp) - if rerr != nil { + if rerr := az.StorageAccountClient.Create(ctx, resourceGroup, accountName, cp); rerr != nil { return "", "", fmt.Errorf("failed to create storage account %s, error: %v", accountName, rerr) } @@ -235,33 +236,37 @@ func (az *Cloud) EnsureStorageAccount(accountOptions *AccountOptions, genAccount } if accountOptions.CreatePrivateEndpoint { + vnetResourceGroup := az.ResourceGroup + if len(az.VnetResourceGroup) > 0 { + vnetResourceGroup = az.VnetResourceGroup + } // Get properties of the storageAccount - storageAccount, err := az.StorageAccountClient.GetProperties(ctx, az.ResourceGroup, accountName) + storageAccount, err := az.StorageAccountClient.GetProperties(ctx, resourceGroup, accountName) if err != nil { - return "", "", fmt.Errorf("Failed to get the properties of storage account(%s), resourceGroup(%s), error: %v", accountName, az.ResourceGroup, err) + return "", "", fmt.Errorf("Failed to get the properties of storage account(%s), resourceGroup(%s), error: %v", accountName, resourceGroup, err) } // Create private endpoint privateEndpointName := accountName + "-pvtendpoint" - if err := az.createPrivateEndpoint(ctx, accountName, storageAccount.ID, privateEndpointName); err != nil { - return "", "", fmt.Errorf("Failed to create private endpoint for storage account(%s), resourceGroup(%s), error: %v", accountName, az.ResourceGroup, err) + if err := az.createPrivateEndpoint(ctx, accountName, storageAccount.ID, privateEndpointName, vnetResourceGroup); err != nil { + return "", "", fmt.Errorf("Failed to create private endpoint for storage account(%s), resourceGroup(%s), error: %v", accountName, vnetResourceGroup, err) } // Create DNS zone - if err := az.createPrivateDNSZone(ctx); err != nil { - return "", "", fmt.Errorf("Failed to create private DNS zone(%s) in resourceGroup(%s), error: %v", PrivateDNSZoneName, az.ResourceGroup, err) + if err := az.createPrivateDNSZone(ctx, vnetResourceGroup); err != nil { + return "", "", fmt.Errorf("Failed to create private DNS zone(%s) in resourceGroup(%s), error: %v", PrivateDNSZoneName, vnetResourceGroup, err) } // Create virtual link to the zone private DNS zone vNetLinkName := accountName + "-vnetlink" - if err := az.createVNetLink(ctx, vNetLinkName); err != nil { - return "", "", fmt.Errorf("Failed to create virtual link for vnet(%s) and DNS Zone(%s) in resourceGroup(%s), error: %v", az.VnetName, PrivateDNSZoneName, az.ResourceGroup, err) + if err := az.createVNetLink(ctx, vNetLinkName, vnetResourceGroup); err != nil { + return "", "", fmt.Errorf("Failed to create virtual link for vnet(%s) and DNS Zone(%s) in resourceGroup(%s), error: %v", az.VnetName, PrivateDNSZoneName, vnetResourceGroup, err) } // Create dns zone group dnsZoneGroupName := accountName + "-dnszonegroup" - if err := az.createPrivateDNSZoneGroup(ctx, dnsZoneGroupName, privateEndpointName); err != nil { - return "", "", fmt.Errorf("Failed to create private DNS zone group - privateEndpoint(%s), vNetName(%s), resourceGroup(%s), error: %v", privateEndpointName, az.VnetName, az.ResourceGroup, err) + if err := az.createPrivateDNSZoneGroup(ctx, dnsZoneGroupName, privateEndpointName, vnetResourceGroup); err != nil { + return "", "", fmt.Errorf("Failed to create private DNS zone group - privateEndpoint(%s), vNetName(%s), resourceGroup(%s), error: %v", privateEndpointName, az.VnetName, vnetResourceGroup, err) } } } @@ -276,17 +281,16 @@ func (az *Cloud) EnsureStorageAccount(accountOptions *AccountOptions, genAccount return accountName, accountKey, nil } -func (az *Cloud) createPrivateEndpoint(ctx context.Context, accountName string, accountID *string, privateEndpointName string) error { +func (az *Cloud) createPrivateEndpoint(ctx context.Context, accountName string, accountID *string, privateEndpointName, vnetResourceGroup string) error { klog.V(2).Infof("Creating private endpoint(%s) for account (%s)", privateEndpointName, accountName) - subnet, rerr := az.SubnetsClient.Get(ctx, az.ResourceGroup, az.VnetName, az.SubnetName, "") - if rerr != nil { - return rerr.Error() - } + subnet, _, err := az.getSubnet(az.VnetName, az.SubnetName) + if err != nil { + return err + } // Disable the private endpoint network policies before creating private endpoint subnet.SubnetPropertiesFormat.PrivateEndpointNetworkPolicies = network.VirtualNetworkPrivateEndpointNetworkPoliciesDisabled - rerr = az.SubnetsClient.CreateOrUpdate(ctx, az.ResourceGroup, az.VnetName, az.SubnetName, subnet) - if rerr != nil { + if rerr := az.SubnetsClient.CreateOrUpdate(ctx, vnetResourceGroup, az.VnetName, az.SubnetName, subnet); rerr != nil { return rerr.Error() } @@ -304,42 +308,39 @@ func (az *Cloud) createPrivateEndpoint(ctx context.Context, accountName string, Location: &az.Location, PrivateEndpointProperties: &network.PrivateEndpointProperties{Subnet: &subnet, PrivateLinkServiceConnections: &privateLinkServiceConnections}, } - if err := az.privateendpointclient.CreateOrUpdate(ctx, az.ResourceGroup, privateEndpointName, privateEndpoint, true); err != nil { - return err - } - return nil + return az.privateendpointclient.CreateOrUpdate(ctx, vnetResourceGroup, privateEndpointName, privateEndpoint, true) } -func (az *Cloud) createPrivateDNSZone(ctx context.Context) error { - klog.V(2).Infof("Creating private dns zone(%s) in resourceGroup (%s)", PrivateDNSZoneName, az.ResourceGroup) +func (az *Cloud) createPrivateDNSZone(ctx context.Context, vnetResourceGroup string) error { + klog.V(2).Infof("Creating private dns zone(%s) in resourceGroup (%s)", PrivateDNSZoneName, vnetResourceGroup) location := LocationGlobal privateDNSZone := privatedns.PrivateZone{Location: &location} - if err := az.privatednsclient.CreateOrUpdate(ctx, az.ResourceGroup, PrivateDNSZoneName, privateDNSZone, true); err != nil { + if err := az.privatednsclient.CreateOrUpdate(ctx, vnetResourceGroup, PrivateDNSZoneName, privateDNSZone, true); err != nil { + if strings.Contains(err.Error(), "exists already") { + klog.V(2).Infof("private dns zone(%s) in resourceGroup (%s) already exists", PrivateDNSZoneName, vnetResourceGroup) + return nil + } return err } return nil } -func (az *Cloud) createVNetLink(ctx context.Context, vNetLinkName string) error { - klog.V(2).Infof("Creating virtual link for vnet(%s) and DNS Zone(%s) in resourceGroup(%s)", vNetLinkName, PrivateDNSZoneName, az.ResourceGroup) +func (az *Cloud) createVNetLink(ctx context.Context, vNetLinkName, vnetResourceGroup string) error { + klog.V(2).Infof("Creating virtual link for vnet(%s) and DNS Zone(%s) in resourceGroup(%s)", vNetLinkName, PrivateDNSZoneName, vnetResourceGroup) location := LocationGlobal - vnetID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/virtualNetworks/%s", az.SubscriptionID, az.ResourceGroup, az.VnetName) - registrationEnabled := false + vnetID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/virtualNetworks/%s", az.SubscriptionID, vnetResourceGroup, az.VnetName) parameters := privatedns.VirtualNetworkLink{ Location: &location, VirtualNetworkLinkProperties: &privatedns.VirtualNetworkLinkProperties{ VirtualNetwork: &privatedns.SubResource{ID: &vnetID}, - RegistrationEnabled: ®istrationEnabled}, - } - if err := az.virtualNetworkLinksClient.CreateOrUpdate(ctx, az.ResourceGroup, PrivateDNSZoneName, vNetLinkName, parameters, false); err != nil { - return err + RegistrationEnabled: to.BoolPtr(true)}, } - return nil + return az.virtualNetworkLinksClient.CreateOrUpdate(ctx, vnetResourceGroup, PrivateDNSZoneName, vNetLinkName, parameters, false) } -func (az *Cloud) createPrivateDNSZoneGroup(ctx context.Context, dnsZoneGroupName string, privateEndpointName string) error { - klog.V(2).Infof("Creating private DNS zone group(%s) with privateEndpoint(%s), vNetName(%s), resourceGroup(%s)", dnsZoneGroupName, privateEndpointName, az.VnetName, az.ResourceGroup) - privateDNSZoneID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/privateDnsZones/%s", az.SubscriptionID, az.ResourceGroup, PrivateDNSZoneName) +func (az *Cloud) createPrivateDNSZoneGroup(ctx context.Context, dnsZoneGroupName, privateEndpointName, vnetResourceGroup string) error { + klog.V(2).Infof("Creating private DNS zone group(%s) with privateEndpoint(%s), vNetName(%s), resourceGroup(%s)", dnsZoneGroupName, privateEndpointName, az.VnetName, vnetResourceGroup) + privateDNSZoneID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/privateDnsZones/%s", az.SubscriptionID, vnetResourceGroup, PrivateDNSZoneName) dnsZoneName := PrivateDNSZoneName privateDNSZoneConfig := network.PrivateDNSZoneConfig{ Name: &dnsZoneName, @@ -352,10 +353,7 @@ func (az *Cloud) createPrivateDNSZoneGroup(ctx context.Context, dnsZoneGroupName PrivateDNSZoneConfigs: &privateDNSZoneConfigs, }, } - if err := az.privatednszonegroupclient.CreateOrUpdate(ctx, az.ResourceGroup, privateEndpointName, dnsZoneGroupName, privateDNSZoneGroup, false); err != nil { - return err - } - return nil + return az.privatednszonegroupclient.CreateOrUpdate(ctx, vnetResourceGroup, privateEndpointName, dnsZoneGroupName, privateDNSZoneGroup, false) } // AddStorageAccountTags add tags to storage account @@ -480,8 +478,11 @@ func isEnableNfsV3PropertyEqual(account storage.Account, accountOptions *Account } func isPrivateEndpointAsExpected(account storage.Account, accountOptions *AccountOptions) bool { - if accountOptions.CreatePrivateEndpoint && (account.PrivateEndpointConnections == nil || len(*account.PrivateEndpointConnections) == 0) { - return false + if accountOptions.CreatePrivateEndpoint && account.PrivateEndpointConnections != nil && len(*account.PrivateEndpointConnections) > 0 { + return true } - return true + if !accountOptions.CreatePrivateEndpoint && (account.PrivateEndpointConnections == nil || len(*account.PrivateEndpointConnections) == 0) { + return true + } + return false } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmsets.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmsets.go index 5fa635bb54..75ee548b37 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmsets.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmsets.go @@ -17,8 +17,11 @@ limitations under the License. package provider import ( + "context" + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute" "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-02-01/network" + "github.com/Azure/go-autorest/autorest/azure" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" @@ -30,7 +33,7 @@ import ( // VMSet defines functions all vmsets (including scale set and availability // set) should be implemented. // Don't forget to run the following command to generate the mock client: -// mockgen -source=$GOPATH/src/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmsets.go -package=provider VMSet > $GOPATH/src/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_mock_vmsets.go +// mockgen -source=$GOPATH/src/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmsets.go -package=provider VMSet > $GOPATH/src/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_mock_vmsets_test.go type VMSet interface { // GetInstanceIDByNodeName gets the cloud provider ID by node name. // It must return ("", cloudprovider.InstanceNotFound) if the instance does @@ -68,9 +71,12 @@ type VMSet interface { EnsureBackendPoolDeletedFromVMSets(vmSetNamesMap map[string]bool, backendPoolID string) error // AttachDisk attaches a disk to vm - AttachDisk(nodeName types.NodeName, diskMap map[string]*AttachDiskOptions) error + AttachDisk(nodeName types.NodeName, diskMap map[string]*AttachDiskOptions) (*azure.Future, error) // DetachDisk detaches a disk from vm - DetachDisk(nodeName types.NodeName, diskMap map[string]string) error + DetachDisk(nodeName types.NodeName, diskMap map[string]string) (*azure.Future, error) + // WaitForUpdateResult waits for the response of the update request + WaitForUpdateResult(ctx context.Context, future *azure.Future, resourceGroupName, source string) error + // GetDataDisks gets a list of data disks attached to the node. GetDataDisks(nodeName types.NodeName, string azcache.AzureCacheReadType) ([]compute.DataDisk, *string, error) diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/retry/azure_retry.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/retry/azure_retry.go index 30358cebec..6cd3e70015 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/retry/azure_retry.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/retry/azure_retry.go @@ -168,15 +168,19 @@ func doBackoffRetry(s autorest.Sender, r *http.Request, backoff *Backoff) (resp // 3) request has been throttled // 4) request contains non-retriable errors // 5) request has completed all the retry steps - if rerr == nil || !rerr.Retriable || rerr.IsThrottled() || backoff.isNonRetriableError(rerr) || backoff.Steps == 1 { - return resp, rerr.Error() + if rerr == nil { + return resp, nil + } + + if !rerr.Retriable || rerr.IsThrottled() || backoff.isNonRetriableError(rerr) || backoff.Steps == 1 { + return resp, rerr.RawError } if !delayForBackOff(backoff, r.Context().Done()) { if r.Context().Err() != nil { return resp, r.Context().Err() } - return resp, rerr.Error() + return resp, rerr.RawError } klog.V(3).Infof("Backoff retrying %s %q with error %v", r.Method, r.URL.String(), rerr)