diff --git a/cluster-autoscaler/cloudprovider/azure/azure_agent_pool.go b/cluster-autoscaler/cloudprovider/azure/azure_agent_pool.go index 06b2d5ba3f5b..ccf46f1dde76 100644 --- a/cluster-autoscaler/cloudprovider/azure/azure_agent_pool.go +++ b/cluster-autoscaler/cloudprovider/azure/azure_agent_pool.go @@ -82,10 +82,10 @@ func (as *AgentPool) initialize() error { ctx, cancel := getContextWithCancel() defer cancel() - template, err := as.manager.azClient.deploymentsClient.ExportTemplate(ctx, as.manager.config.ResourceGroup, as.manager.config.Deployment) + template, err := as.manager.azClient.deploymentClient.ExportTemplate(ctx, as.manager.config.ResourceGroup, as.manager.config.Deployment) if err != nil { - klog.Errorf("deploymentsClient.ExportTemplate(%s, %s) failed: %v", as.manager.config.ResourceGroup, as.manager.config.Deployment, err) - return err + klog.Errorf("deploymentClient.ExportTemplate(%s, %s) failed: %v", as.manager.config.ResourceGroup, as.manager.config.Deployment, err) + return err.Error() } as.template = template.Template.(map[string]interface{}) @@ -211,18 +211,27 @@ func (as *AgentPool) TargetSize() (int, error) { return int(size), nil } -func (as *AgentPool) getAllSucceededAndFailedDeployments() (succeededAndFailedDeployments []resources.DeploymentExtended, err error) { +func (as *AgentPool) getAllSucceededAndFailedDeployments() ([]resources.DeploymentExtended, error) { ctx, cancel := getContextWithCancel() defer cancel() - deploymentsFilter := "provisioningState eq 'Succeeded' or provisioningState eq 'Failed'" - succeededAndFailedDeployments, err = as.manager.azClient.deploymentsClient.List(ctx, as.manager.config.ResourceGroup, deploymentsFilter, nil) - if err != nil { - klog.Errorf("getAllSucceededAndFailedDeployments: failed to list succeeded or failed deployments with error: %v", err) - return nil, err + allDeployments, rerr := as.manager.azClient.deploymentClient.List(ctx, as.manager.config.ResourceGroup) + if rerr != nil { + klog.Errorf("getAllSucceededAndFailedDeployments: failed to list deployments with error: %v", rerr.Error()) + return nil, rerr.Error() + } + + result := make([]resources.DeploymentExtended, 0) + for _, deployment := range allDeployments { + if deployment.Properties == nil || deployment.Properties.ProvisioningState == nil { + continue + } + if *deployment.Properties.ProvisioningState == "Succeeded" || *deployment.Properties.ProvisioningState == "Failed" { + result = append(result, deployment) + } } - return succeededAndFailedDeployments, err + return result, rerr.Error() } // deleteOutdatedDeployments keeps the newest deployments in the resource group and delete others, @@ -258,9 +267,9 @@ func (as *AgentPool) deleteOutdatedDeployments() (err error) { errList := make([]error, 0) for _, deployment := range toBeDeleted { klog.V(4).Infof("deleteOutdatedDeployments: starts deleting outdated deployment (%s)", *deployment.Name) - _, err := as.manager.azClient.deploymentsClient.Delete(ctx, as.manager.config.ResourceGroup, *deployment.Name) - if err != nil { - errList = append(errList, err) + rerr := as.manager.azClient.deploymentClient.Delete(ctx, as.manager.config.ResourceGroup, *deployment.Name) + if rerr != nil { + errList = append(errList, rerr.Error()) } } @@ -317,22 +326,20 @@ func (as *AgentPool) IncreaseSize(delta int) error { } ctx, cancel := getContextWithCancel() defer cancel() - klog.V(3).Infof("Waiting for deploymentsClient.CreateOrUpdate(%s, %s, %v)", as.manager.config.ResourceGroup, newDeploymentName, newDeployment) - resp, err := as.manager.azClient.deploymentsClient.CreateOrUpdate(ctx, as.manager.config.ResourceGroup, newDeploymentName, newDeployment) - isSuccess, realError := isSuccessHTTPResponse(resp, err) - if isSuccess { - klog.V(3).Infof("deploymentsClient.CreateOrUpdate(%s, %s, %v) success", as.manager.config.ResourceGroup, newDeploymentName, newDeployment) - - // Update cache after scale success. - as.curSize = int64(expectedSize) - as.lastRefresh = time.Now() - klog.V(6).Info("IncreaseSize: invalidating cache") - as.manager.invalidateCache() - return nil + klog.V(3).Infof("Waiting for deploymentClient.CreateOrUpdate(%s, %s, %v)", as.manager.config.ResourceGroup, newDeploymentName, newDeployment) + rerr := as.manager.azClient.deploymentClient.CreateOrUpdate(ctx, as.manager.config.ResourceGroup, newDeploymentName, newDeployment, "") + if rerr != nil { + klog.Errorf("deploymentClient.CreateOrUpdate for deployment %q failed: %v", newDeploymentName, rerr.Error()) + return rerr.Error() } + klog.V(3).Infof("deploymentClient.CreateOrUpdate(%s, %s, %v) success", as.manager.config.ResourceGroup, newDeploymentName, newDeployment) - klog.Errorf("deploymentsClient.CreateOrUpdate for deployment %q failed: %v", newDeploymentName, realError) - return realError + // Update cache after scale success. + as.curSize = int64(expectedSize) + as.lastRefresh = time.Now() + klog.V(6).Info("IncreaseSize: invalidating cache") + as.manager.invalidateCache() + return nil } // DecreaseTargetSize decreases the target size of the node group. This function diff --git a/cluster-autoscaler/cloudprovider/azure/azure_agent_pool_test.go b/cluster-autoscaler/cloudprovider/azure/azure_agent_pool_test.go index 444f2f9f235c..a242e0b7b666 100644 --- a/cluster-autoscaler/cloudprovider/azure/azure_agent_pool_test.go +++ b/cluster-autoscaler/cloudprovider/azure/azure_agent_pool_test.go @@ -28,6 +28,7 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/storageaccountclient/mockstorageaccountclient" "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmclient/mockvmclient" + providerazureconsts "sigs.k8s.io/cloud-provider-azure/pkg/consts" "sigs.k8s.io/cloud-provider-azure/pkg/retry" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" @@ -156,13 +157,13 @@ func TestDeleteOutdatedDeployments(t *testing.T) { for _, test := range testCases { testAS := newTestAgentPool(newTestAzureManager(t), "testAS") - testAS.manager.azClient.deploymentsClient = &DeploymentsClientMock{ + testAS.manager.azClient.deploymentClient = &DeploymentClientMock{ FakeStore: test.deployments, } err := testAS.deleteOutdatedDeployments() assert.Equal(t, test.expectedErr, err, test.desc) - existedDeployments, err := testAS.manager.azClient.deploymentsClient.List(context.Background(), "", "", to.Int32Ptr(0)) + existedDeployments, _ := testAS.manager.azClient.deploymentClient.List(context.Background(), "") existedDeploymentsNames := make(map[string]bool) for _, deployment := range existedDeployments { existedDeploymentsNames[*deployment.Name] = true @@ -185,7 +186,7 @@ func TestGetVMsFromCache(t *testing.T) { mockVMClient := mockvmclient.NewMockInterface(ctrl) testAS.manager.azClient.virtualMachinesClient = mockVMClient mockVMClient.EXPECT().List(gomock.Any(), testAS.manager.config.ResourceGroup).Return(expectedVMs, nil) - testAS.manager.config.VMType = vmTypeStandard + testAS.manager.config.VMType = providerazureconsts.VMTypeStandard ac, err := newAzureCache(testAS.manager.azClient, refreshInterval, *testAS.manager.config) assert.NoError(t, err) testAS.manager.azureCache = ac @@ -204,7 +205,7 @@ func TestGetVMIndexes(t *testing.T) { mockVMClient := mockvmclient.NewMockInterface(ctrl) as.manager.azClient.virtualMachinesClient = mockVMClient mockVMClient.EXPECT().List(gomock.Any(), as.manager.config.ResourceGroup).Return(expectedVMs, nil) - as.manager.config.VMType = vmTypeStandard + as.manager.config.VMType = providerazureconsts.VMTypeStandard ac, err := newAzureCache(as.manager.azClient, refreshInterval, *as.manager.config) assert.NoError(t, err) as.manager.azureCache = ac @@ -244,7 +245,7 @@ func TestGetCurSize(t *testing.T) { mockVMClient := mockvmclient.NewMockInterface(ctrl) as.manager.azClient.virtualMachinesClient = mockVMClient mockVMClient.EXPECT().List(gomock.Any(), as.manager.config.ResourceGroup).Return(expectedVMs, nil) - as.manager.config.VMType = vmTypeStandard + as.manager.config.VMType = providerazureconsts.VMTypeStandard ac, err := newAzureCache(as.manager.azClient, refreshInterval, *as.manager.config) assert.NoError(t, err) as.manager.azureCache = ac @@ -269,7 +270,7 @@ func TestAgentPoolTargetSize(t *testing.T) { as.manager.azClient.virtualMachinesClient = mockVMClient expectedVMs := getExpectedVMs() mockVMClient.EXPECT().List(gomock.Any(), as.manager.config.ResourceGroup).Return(expectedVMs, nil) - as.manager.config.VMType = vmTypeStandard + as.manager.config.VMType = providerazureconsts.VMTypeStandard ac, err := newAzureCache(as.manager.azClient, refreshInterval, *as.manager.config) assert.NoError(t, err) as.manager.azureCache = ac @@ -289,7 +290,7 @@ func TestAgentPoolIncreaseSize(t *testing.T) { as.manager.azClient.virtualMachinesClient = mockVMClient expectedVMs := getExpectedVMs() mockVMClient.EXPECT().List(gomock.Any(), as.manager.config.ResourceGroup).Return(expectedVMs, nil).MaxTimes(2) - as.manager.config.VMType = vmTypeStandard + as.manager.config.VMType = providerazureconsts.VMTypeStandard ac, err := newAzureCache(as.manager.azClient, refreshInterval, *as.manager.config) assert.NoError(t, err) as.manager.azureCache = ac @@ -318,7 +319,7 @@ func TestDecreaseTargetSize(t *testing.T) { as.manager.azClient.virtualMachinesClient = mockVMClient expectedVMs := getExpectedVMs() mockVMClient.EXPECT().List(gomock.Any(), as.manager.config.ResourceGroup).Return(expectedVMs, nil).MaxTimes(3) - as.manager.config.VMType = vmTypeStandard + as.manager.config.VMType = providerazureconsts.VMTypeStandard ac, err := newAzureCache(as.manager.azClient, refreshInterval, *as.manager.config) assert.NoError(t, err) as.manager.azureCache = ac @@ -437,9 +438,9 @@ func TestAgentPoolDeleteNodes(t *testing.T) { mockSAClient := mockstorageaccountclient.NewMockInterface(ctrl) as.manager.azClient.storageAccountsClient = mockSAClient mockVMClient.EXPECT().List(gomock.Any(), as.manager.config.ResourceGroup).Return(expectedVMs, nil) - as.manager.config.VMType = vmTypeStandard + as.manager.config.VMType = providerazureconsts.VMTypeStandard ac, err := newAzureCache(as.manager.azClient, refreshInterval, *as.manager.config) - as.manager.config.VMType = vmTypeVMSS + as.manager.config.VMType = providerazureconsts.VMTypeVMSS assert.NoError(t, err) as.manager.azureCache = ac @@ -505,7 +506,7 @@ func TestAgentPoolNodes(t *testing.T) { mockVMClient := mockvmclient.NewMockInterface(ctrl) as.manager.azClient.virtualMachinesClient = mockVMClient mockVMClient.EXPECT().List(gomock.Any(), as.manager.config.ResourceGroup).Return(expectedVMs, nil) - as.manager.config.VMType = vmTypeStandard + as.manager.config.VMType = providerazureconsts.VMTypeStandard ac, err := newAzureCache(as.manager.azClient, refreshInterval, *as.manager.config) assert.NoError(t, err) as.manager.azureCache = ac diff --git a/cluster-autoscaler/cloudprovider/azure/azure_cache.go b/cluster-autoscaler/cloudprovider/azure/azure_cache.go index bb68567e8f40..c8334ac78771 100644 --- a/cluster-autoscaler/cloudprovider/azure/azure_cache.go +++ b/cluster-autoscaler/cloudprovider/azure/azure_cache.go @@ -28,6 +28,7 @@ import ( "github.com/Azure/go-autorest/autorest/to" "github.com/Azure/skewer" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" + providerazureconsts "sigs.k8s.io/cloud-provider-azure/pkg/consts" "k8s.io/klog/v2" ) @@ -436,7 +437,7 @@ func (m *azureCache) FindForInstance(instance *azureRef, vmType string) (cloudpr } // cluster with vmss pool only - if vmType == vmTypeVMSS && len(vmsPoolSet) == 0 { + if vmType == providerazureconsts.VMTypeVMSS && len(vmsPoolSet) == 0 { if m.areAllScaleSetsUniform() { // Omit virtual machines not managed by vmss only in case of uniform scale set. if ok := virtualMachineRE.Match([]byte(inst.Name)); ok { @@ -447,7 +448,7 @@ func (m *azureCache) FindForInstance(instance *azureRef, vmType string) (cloudpr } } - if vmType == vmTypeStandard { + if vmType == providerazureconsts.VMTypeStandard { // Omit virtual machines with providerID not in Azure resource ID format. if ok := virtualMachineRE.Match([]byte(inst.Name)); !ok { klog.V(3).Infof("Instance %q is not in Azure resource ID format, omit it in autoscaler", instance.Name) diff --git a/cluster-autoscaler/cloudprovider/azure/azure_cache_test.go b/cluster-autoscaler/cloudprovider/azure/azure_cache_test.go index 2b87ab938486..3fd801afbe8f 100644 --- a/cluster-autoscaler/cloudprovider/azure/azure_cache_test.go +++ b/cluster-autoscaler/cloudprovider/azure/azure_cache_test.go @@ -20,6 +20,7 @@ import ( "testing" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" + providerazureconsts "sigs.k8s.io/cloud-provider-azure/pkg/consts" "github.com/stretchr/testify/assert" ) @@ -60,12 +61,12 @@ func TestFindForInstance(t *testing.T) { inst := azureRef{Name: "/subscriptions/sub/resourceGroups/rg/providers/foo"} ac.unownedInstances = make(map[azureRef]bool) ac.unownedInstances[inst] = true - nodeGroup, err := ac.FindForInstance(&inst, vmTypeVMSS) + nodeGroup, err := ac.FindForInstance(&inst, providerazureconsts.VMTypeVMSS) assert.Nil(t, nodeGroup) assert.NoError(t, err) ac.unownedInstances[inst] = false - nodeGroup, err = ac.FindForInstance(&inst, vmTypeStandard) + nodeGroup, err = ac.FindForInstance(&inst, providerazureconsts.VMTypeStandard) assert.Nil(t, nodeGroup) assert.NoError(t, err) assert.True(t, ac.unownedInstances[inst]) diff --git a/cluster-autoscaler/cloudprovider/azure/azure_client.go b/cluster-autoscaler/cloudprovider/azure/azure_client.go index 2bf337a4e8d4..fbc39a62ed28 100644 --- a/cluster-autoscaler/cloudprovider/azure/azure_client.go +++ b/cluster-autoscaler/cloudprovider/azure/azure_client.go @@ -19,10 +19,6 @@ package azure import ( "context" "fmt" - "io/ioutil" - "net/http" - "os" - "time" _ "go.uber.org/mock/mockgen/model" // for go:generate @@ -35,119 +31,22 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azidentity" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute" - "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2017-05-10/resources" - "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage" "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/adal" "github.com/Azure/go-autorest/autorest/azure" "github.com/Azure/go-autorest/autorest/azure/auth" klog "k8s.io/klog/v2" + "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/deploymentclient" "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/diskclient" "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/interfaceclient" "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/storageaccountclient" "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmclient" "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssclient" "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssvmclient" + providerazureconfig "sigs.k8s.io/cloud-provider-azure/pkg/provider/config" ) -// DeploymentsClient defines needed functions for azure network.DeploymentsClient. -type DeploymentsClient interface { - Get(ctx context.Context, resourceGroupName string, deploymentName string) (result resources.DeploymentExtended, err error) - List(ctx context.Context, resourceGroupName string, filter string, top *int32) (result []resources.DeploymentExtended, err error) - ExportTemplate(ctx context.Context, resourceGroupName string, deploymentName string) (result resources.DeploymentExportResult, err error) - CreateOrUpdate(ctx context.Context, resourceGroupName string, deploymentName string, parameters resources.Deployment) (resp *http.Response, err error) - Delete(ctx context.Context, resourceGroupName string, deploymentName string) (resp *http.Response, err error) -} - -type azDeploymentsClient struct { - client resources.DeploymentsClient -} - -func newAzDeploymentsClient(subscriptionID, endpoint string, authorizer autorest.Authorizer) *azDeploymentsClient { - deploymentsClient := resources.NewDeploymentsClient(subscriptionID) - deploymentsClient.BaseURI = endpoint - deploymentsClient.Authorizer = authorizer - deploymentsClient.PollingDelay = 5 * time.Second - configureUserAgent(&deploymentsClient.Client) - - return &azDeploymentsClient{ - client: deploymentsClient, - } -} - -func (az *azDeploymentsClient) Get(ctx context.Context, resourceGroupName string, deploymentName string) (result resources.DeploymentExtended, err error) { - klog.V(10).Infof("azDeploymentsClient.Get(%q,%q): start", resourceGroupName, deploymentName) - defer func() { - klog.V(10).Infof("azDeploymentsClient.Get(%q,%q): end", resourceGroupName, deploymentName) - }() - - return az.client.Get(ctx, resourceGroupName, deploymentName) -} - -func (az *azDeploymentsClient) ExportTemplate(ctx context.Context, resourceGroupName string, deploymentName string) (result resources.DeploymentExportResult, err error) { - klog.V(10).Infof("azDeploymentsClient.ExportTemplate(%q,%q): start", resourceGroupName, deploymentName) - defer func() { - klog.V(10).Infof("azDeploymentsClient.ExportTemplate(%q,%q): end", resourceGroupName, deploymentName) - }() - - return az.client.ExportTemplate(ctx, resourceGroupName, deploymentName) -} - -func (az *azDeploymentsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, deploymentName string, parameters resources.Deployment) (resp *http.Response, err error) { - klog.V(10).Infof("azDeploymentsClient.CreateOrUpdate(%q,%q): start", resourceGroupName, deploymentName) - defer func() { - klog.V(10).Infof("azDeploymentsClient.CreateOrUpdate(%q,%q): end", resourceGroupName, deploymentName) - }() - - future, err := az.client.CreateOrUpdate(ctx, resourceGroupName, deploymentName, parameters) - if err != nil { - return future.Response(), err - } - - err = future.WaitForCompletionRef(ctx, az.client.Client) - return future.Response(), err -} - -func (az *azDeploymentsClient) List(ctx context.Context, resourceGroupName, filter string, top *int32) (result []resources.DeploymentExtended, err error) { - klog.V(10).Infof("azDeploymentsClient.List(%q): start", resourceGroupName) - defer func() { - klog.V(10).Infof("azDeploymentsClient.List(%q): end", resourceGroupName) - }() - - iterator, err := az.client.ListByResourceGroupComplete(ctx, resourceGroupName, filter, top) - if err != nil { - return nil, err - } - - result = make([]resources.DeploymentExtended, 0) - for ; iterator.NotDone(); err = iterator.Next() { - if err != nil { - return nil, err - } - - result = append(result, iterator.Value()) - } - - return result, err -} - -func (az *azDeploymentsClient) Delete(ctx context.Context, resourceGroupName, deploymentName string) (resp *http.Response, err error) { - klog.V(10).Infof("azDeploymentsClient.Delete(%q,%q): start", resourceGroupName, deploymentName) - defer func() { - klog.V(10).Infof("azDeploymentsClient.Delete(%q,%q): end", resourceGroupName, deploymentName) - }() - - future, err := az.client.Delete(ctx, resourceGroupName, deploymentName) - if err != nil { - return future.Response(), err - } - - err = future.WaitForCompletionRef(ctx, az.client.Client) - return future.Response(), err -} - //go:generate sh -c "mockgen k8s.io/autoscaler/cluster-autoscaler/cloudprovider/azure AgentPoolsClient >./agentpool_client.go" // AgentPoolsClient interface defines the methods needed for scaling vms pool. @@ -260,15 +159,11 @@ func newAgentpoolClientWithPublicEndpoint(cfg *Config, retryOptions azurecore_po return newAgentpoolClientWithConfig(cfg.SubscriptionID, cred, env.ResourceManagerEndpoint, env.TokenAudience, retryOptions) } -type azAccountsClient struct { - client storage.AccountsClient -} - type azClient struct { virtualMachineScaleSetsClient vmssclient.Interface virtualMachineScaleSetVMsClient vmssvmclient.Interface virtualMachinesClient vmclient.Interface - deploymentsClient DeploymentsClient + deploymentClient deploymentclient.Interface interfacesClient interfaceclient.Interface disksClient diskclient.Interface storageAccountsClient storageaccountclient.Interface @@ -276,80 +171,12 @@ type azClient struct { agentPoolClient AgentPoolsClient } -// newServicePrincipalTokenFromCredentials creates a new ServicePrincipalToken using values of the -// passed credentials map. -func newServicePrincipalTokenFromCredentials(config *Config, env *azure.Environment) (*adal.ServicePrincipalToken, error) { - oauthConfig, err := adal.NewOAuthConfig(env.ActiveDirectoryEndpoint, config.TenantID) - if err != nil { - return nil, fmt.Errorf("creating the OAuth config: %v", err) - } - - if config.UseWorkloadIdentityExtension { - klog.V(2).Infoln("azure: using workload identity extension to retrieve access token") - jwt, err := os.ReadFile(config.AADFederatedTokenFile) - if err != nil { - return nil, fmt.Errorf("failed to read a file with a federated token: %v", err) - } - token, err := adal.NewServicePrincipalTokenFromFederatedToken(*oauthConfig, config.AADClientID, string(jwt), env.ResourceManagerEndpoint) - if err != nil { - return nil, fmt.Errorf("failed to create a workload identity token: %v", err) - } - return token, nil - } - if config.UseManagedIdentityExtension { - klog.V(2).Infoln("azure: using managed identity extension to retrieve access token") - msiEndpoint, err := adal.GetMSIVMEndpoint() - if err != nil { - return nil, fmt.Errorf("getting the managed service identity endpoint: %v", err) - } - if config.UserAssignedIdentityID != "" { - klog.V(4).Info("azure: using User Assigned MSI ID to retrieve access token") - return adal.NewServicePrincipalTokenFromMSIWithUserAssignedID(msiEndpoint, - env.ServiceManagementEndpoint, - config.UserAssignedIdentityID) - } - klog.V(4).Info("azure: using System Assigned MSI to retrieve access token") - return adal.NewServicePrincipalTokenFromMSI( - msiEndpoint, - env.ServiceManagementEndpoint) - } - - if config.AADClientSecret != "" { - klog.V(2).Infoln("azure: using client_id+client_secret to retrieve access token") - return adal.NewServicePrincipalToken( - *oauthConfig, - config.AADClientID, - config.AADClientSecret, - env.ServiceManagementEndpoint) - } - - if config.AADClientCertPath != "" { - klog.V(2).Infoln("azure: using jwt client_assertion (client_cert+client_private_key) to retrieve access token") - certData, err := ioutil.ReadFile(config.AADClientCertPath) - if err != nil { - return nil, fmt.Errorf("reading the client certificate from file %s: %v", config.AADClientCertPath, err) - } - certificate, privateKey, err := adal.DecodePfxCertificateData(certData, config.AADClientCertPassword) - if err != nil { - return nil, fmt.Errorf("decoding the client certificate: %v", err) - } - return adal.NewServicePrincipalTokenFromCertificate( - *oauthConfig, - config.AADClientID, - certificate, - privateKey, - env.ServiceManagementEndpoint) - } - - return nil, fmt.Errorf("no credentials provided for AAD application %s", config.AADClientID) -} - func newAuthorizer(config *Config, env *azure.Environment) (autorest.Authorizer, error) { switch config.AuthMethod { case authMethodCLI: return auth.NewAuthorizerFromCLI() case "", authMethodPrincipal: - token, err := newServicePrincipalTokenFromCredentials(config, env) + token, err := providerazureconfig.GetServicePrincipalToken(&config.AzureAuthConfig, env, "") if err != nil { return nil, fmt.Errorf("retrieve service principal token: %v", err) } @@ -380,8 +207,9 @@ func newAzClient(cfg *Config, env *azure.Environment) (*azClient, error) { virtualMachinesClient := vmclient.New(vmClientConfig) klog.V(5).Infof("Created vm client with authorizer: %v", virtualMachinesClient) - deploymentsClient := newAzDeploymentsClient(cfg.SubscriptionID, env.ResourceManagerEndpoint, authorizer) - klog.V(5).Infof("Created deployments client with authorizer: %v", deploymentsClient) + deploymentConfig := azClientConfig.WithRateLimiter(cfg.DeploymentRateLimit) + deploymentClient := deploymentclient.New(deploymentConfig) + klog.V(5).Infof("Created deployments client with authorizer: %v", deploymentClient) interfaceClientConfig := azClientConfig.WithRateLimiter(cfg.InterfaceRateLimit) interfacesClient := interfaceclient.New(interfaceClientConfig) @@ -414,7 +242,7 @@ func newAzClient(cfg *Config, env *azure.Environment) (*azClient, error) { interfacesClient: interfacesClient, virtualMachineScaleSetsClient: scaleSetsClient, virtualMachineScaleSetVMsClient: scaleSetVMsClient, - deploymentsClient: deploymentsClient, + deploymentClient: deploymentClient, virtualMachinesClient: virtualMachinesClient, storageAccountsClient: storageAccountsClient, skuClient: skuClient, diff --git a/cluster-autoscaler/cloudprovider/azure/azure_client_test.go b/cluster-autoscaler/cloudprovider/azure/azure_client_test.go index 7ed0dd4c01f7..a1e94d9a5316 100644 --- a/cluster-autoscaler/cloudprovider/azure/azure_client_test.go +++ b/cluster-autoscaler/cloudprovider/azure/azure_client_test.go @@ -23,17 +23,28 @@ import ( "github.com/Azure/go-autorest/autorest/adal" "github.com/Azure/go-autorest/autorest/azure" "github.com/stretchr/testify/assert" + azclient "sigs.k8s.io/cloud-provider-azure/pkg/azclient" + providerazure "sigs.k8s.io/cloud-provider-azure/pkg/provider" + providerazureconfig "sigs.k8s.io/cloud-provider-azure/pkg/provider/config" ) func TestGetServicePrincipalTokenFromCertificate(t *testing.T) { config := &Config{ - TenantID: "TenantID", - AADClientID: "AADClientID", - AADClientCertPath: "./testdata/test.pfx", - AADClientCertPassword: "id", + Config: providerazure.Config{ + AzureAuthConfig: providerazureconfig.AzureAuthConfig{ + ARMClientConfig: azclient.ARMClientConfig{ + TenantID: "TenantID", + }, + AzureAuthConfig: azclient.AzureAuthConfig{ + AADClientID: "AADClientID", + AADClientCertPath: "./testdata/test.pfx", + AADClientCertPassword: "id", + }, + }, + }, } env := &azure.PublicCloud - token, err := newServicePrincipalTokenFromCredentials(config, env) + token, err := providerazureconfig.GetServicePrincipalToken(&config.AzureAuthConfig, env, "") assert.NoError(t, err) oauthConfig, err := adal.NewOAuthConfig(env.ActiveDirectoryEndpoint, config.TenantID) @@ -45,17 +56,25 @@ func TestGetServicePrincipalTokenFromCertificate(t *testing.T) { spt, err := adal.NewServicePrincipalTokenFromCertificate( *oauthConfig, config.AADClientID, certificate, privateKey, env.ServiceManagementEndpoint) assert.NoError(t, err) - assert.Equal(t, token, spt) + assert.Equal(t, token.Token(), spt.Token()) } func TestGetServicePrincipalTokenFromCertificateWithoutPassword(t *testing.T) { config := &Config{ - TenantID: "TenantID", - AADClientID: "AADClientID", - AADClientCertPath: "./testdata/testnopassword.pfx", + Config: providerazure.Config{ + AzureAuthConfig: providerazureconfig.AzureAuthConfig{ + ARMClientConfig: azclient.ARMClientConfig{ + TenantID: "TenantID", + }, + AzureAuthConfig: azclient.AzureAuthConfig{ + AADClientID: "AADClientID", + AADClientCertPath: "./testdata/testnopassword.pfx", + }, + }, + }, } env := &azure.PublicCloud - token, err := newServicePrincipalTokenFromCredentials(config, env) + token, err := providerazureconfig.GetServicePrincipalToken(&config.AzureAuthConfig, env, "") assert.NoError(t, err) oauthConfig, err := adal.NewOAuthConfig(env.ActiveDirectoryEndpoint, config.TenantID) @@ -67,5 +86,5 @@ func TestGetServicePrincipalTokenFromCertificateWithoutPassword(t *testing.T) { spt, err := adal.NewServicePrincipalTokenFromCertificate( *oauthConfig, config.AADClientID, certificate, privateKey, env.ServiceManagementEndpoint) assert.NoError(t, err) - assert.Equal(t, token, spt) + assert.Equal(t, token.Token(), spt.Token()) } diff --git a/cluster-autoscaler/cloudprovider/azure/azure_cloud_provider_test.go b/cluster-autoscaler/cloudprovider/azure/azure_cloud_provider_test.go index da37ed8492da..cd88602da479 100644 --- a/cluster-autoscaler/cloudprovider/azure/azure_cloud_provider_test.go +++ b/cluster-autoscaler/cloudprovider/azure/azure_cloud_provider_test.go @@ -31,6 +31,8 @@ import ( "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmclient/mockvmclient" "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssclient/mockvmssclient" "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssvmclient/mockvmssvmclient" + providerazureconsts "sigs.k8s.io/cloud-provider-azure/pkg/consts" + providerazure "sigs.k8s.io/cloud-provider-azure/pkg/provider" "github.com/Azure/go-autorest/autorest/azure" "github.com/stretchr/testify/assert" @@ -55,18 +57,20 @@ func newTestAzureManager(t *testing.T) *AzureManager { env: azure.PublicCloud, explicitlyConfigured: make(map[string]bool), config: &Config{ - ResourceGroup: "rg", - VMType: vmTypeVMSS, + Config: providerazure.Config{ + ResourceGroup: "rg", + VMType: providerazureconsts.VMTypeVMSS, + Location: "eastus", + }, MaxDeploymentsCount: 2, Deployment: "deployment", EnableForceDelete: true, - Location: "eastus", }, azClient: &azClient{ virtualMachineScaleSetsClient: mockVMSSClient, virtualMachineScaleSetVMsClient: mockVMSSVMClient, virtualMachinesClient: mockVMClient, - deploymentsClient: &DeploymentsClientMock{ + deploymentClient: &DeploymentClientMock{ FakeStore: map[string]resources.DeploymentExtended{ "deployment": { Name: to.StringPtr("deployment"), @@ -332,7 +336,7 @@ func TestNodeGroupForNode(t *testing.T) { mockVMSSVMClient.EXPECT().List(gomock.Any(), provider.azureManager.config.ResourceGroup, "test-asg", gomock.Any()).Return(expectedVMSSVMs, nil).AnyTimes() provider.azureManager.azClient.virtualMachineScaleSetVMsClient = mockVMSSVMClient } else { - provider.azureManager.config.EnableVmssFlex = true + provider.azureManager.config.EnableVmssFlexNodes = true mockVMClient.EXPECT().ListVmssFlexVMsWithoutInstanceView(gomock.Any(), "test-asg").Return(expectedVMs, nil).AnyTimes() } diff --git a/cluster-autoscaler/cloudprovider/azure/azure_config.go b/cluster-autoscaler/cloudprovider/azure/azure_config.go index 6c354c2a23e4..49d0bdf32d4d 100644 --- a/cluster-autoscaler/cloudprovider/azure/azure_config.go +++ b/cluster-autoscaler/cloudprovider/azure/azure_config.go @@ -18,7 +18,6 @@ package azure import ( "encoding/json" - "errors" "fmt" "io" "io/ioutil" @@ -32,7 +31,9 @@ import ( "github.com/Azure/go-autorest/autorest/azure" "k8s.io/klog/v2" azclients "sigs.k8s.io/cloud-provider-azure/pkg/azureclients" + providerazureconsts "sigs.k8s.io/cloud-provider-azure/pkg/consts" providerazure "sigs.k8s.io/cloud-provider-azure/pkg/provider" + providerazureconfig "sigs.k8s.io/cloud-provider-azure/pkg/provider/config" "sigs.k8s.io/cloud-provider-azure/pkg/retry" ) @@ -42,60 +43,23 @@ const ( imdsServerURL = "http://169.254.169.254" - // backoff - backoffRetriesDefault = 6 - backoffExponentDefault = 1.5 - backoffDurationDefault = 5 // in seconds - backoffJitterDefault = 1.0 - - // rate limit - rateLimitQPSDefault float32 = 1.0 - rateLimitBucketDefault = 5 - rateLimitReadQPSEnvVar = "RATE_LIMIT_READ_QPS" - rateLimitReadBucketsEnvVar = "RATE_LIMIT_READ_BUCKETS" - rateLimitWriteQPSEnvVar = "RATE_LIMIT_WRITE_QPS" - rateLimitWriteBucketsEnvVar = "RATE_LIMIT_WRITE_BUCKETS" - - // VmssSizeRefreshPeriodDefault in seconds - VmssSizeRefreshPeriodDefault = 30 - // auth methods authMethodPrincipal = "principal" authMethodCLI = "cli" - - // toggle - dynamicInstanceListDefault = false - enableVmssFlexDefault = false ) -// CloudProviderRateLimitConfig indicates the rate limit config for each clients. -type CloudProviderRateLimitConfig struct { - // The default rate limit config options. - azclients.RateLimitConfig - - // Rate limit config for each clients. Values would override default settings above. - InterfaceRateLimit *azclients.RateLimitConfig `json:"interfaceRateLimit,omitempty" yaml:"interfaceRateLimit,omitempty"` - VirtualMachineRateLimit *azclients.RateLimitConfig `json:"virtualMachineRateLimit,omitempty" yaml:"virtualMachineRateLimit,omitempty"` - StorageAccountRateLimit *azclients.RateLimitConfig `json:"storageAccountRateLimit,omitempty" yaml:"storageAccountRateLimit,omitempty"` - DiskRateLimit *azclients.RateLimitConfig `json:"diskRateLimit,omitempty" yaml:"diskRateLimit,omitempty"` - VirtualMachineScaleSetRateLimit *azclients.RateLimitConfig `json:"virtualMachineScaleSetRateLimit,omitempty" yaml:"virtualMachineScaleSetRateLimit,omitempty"` - KubernetesServiceRateLimit *azclients.RateLimitConfig `json:"kubernetesServiceRateLimit,omitempty" yaml:"kubernetesServiceRateLimit,omitempty"` -} - -// Config holds the configuration parsed from the --cloud-config flag +// Config holds the configuration parsed from the --cloud-config flag or the environment variables. +// Contains both general Azure cloud provider configuration (i.e., in azure.json) and CAS configurations/options specifically for Azure provider. type Config struct { - CloudProviderRateLimitConfig - - Cloud string `json:"cloud" yaml:"cloud"` - Location string `json:"location" yaml:"location"` - TenantID string `json:"tenantId" yaml:"tenantId"` - SubscriptionID string `json:"subscriptionId" yaml:"subscriptionId"` - ClusterName string `json:"clusterName" yaml:"clusterName"` - // ResourceGroup is the MC_ resource group where the nodes are located. - ResourceGroup string `json:"resourceGroup" yaml:"resourceGroup"` + // Azure cloud provider configuration, which is generally shared with other Azure components. + providerazure.Config `json:",inline" yaml:",inline"` + + // Legacy fields, which are only here for backward compatibility. To be deprecated. + legacyConfig `json:",inline" yaml:",inline"` + + ClusterName string `json:"clusterName" yaml:"clusterName"` // ClusterResourceGroup is the resource group where the cluster is located. ClusterResourceGroup string `json:"clusterResourceGroup" yaml:"clusterResourceGroup"` - VMType string `json:"vmType" yaml:"vmType"` // ARMBaseURLForAPClient is the URL to use for operations for the VMs pool. // It can override the default public ARM endpoint for VMs pool scale operations. @@ -105,51 +69,26 @@ type Config struct { // cloud. Valid options are "principal" (= the traditional // service principle approach) and "cli" (= load az command line // config file). The default is "principal". + // 08/16/2024: This field is awkward, given the existence of UseManagedIdentityExtension and UseFederatedWorkloadIdentityExtension. + // Ideally, either it should be deprecated, or reworked to be on the same "dimension" as the two above, if not reworking those two. AuthMethod string `json:"authMethod" yaml:"authMethod"` - // Settings for a service principal. - - AADClientID string `json:"aadClientId" yaml:"aadClientId"` - AADClientSecret string `json:"aadClientSecret" yaml:"aadClientSecret"` - AADClientCertPath string `json:"aadClientCertPath" yaml:"aadClientCertPath"` - AADClientCertPassword string `json:"aadClientCertPassword" yaml:"aadClientCertPassword"` - AADFederatedTokenFile string `json:"aadFederatedTokenFile" yaml:"aadFederatedTokenFile"` - UseManagedIdentityExtension bool `json:"useManagedIdentityExtension" yaml:"useManagedIdentityExtension"` - UseWorkloadIdentityExtension bool `json:"useWorkloadIdentityExtension" yaml:"useWorkloadIdentityExtension"` - UserAssignedIdentityID string `json:"userAssignedIdentityID" yaml:"userAssignedIdentityID"` - // Configs only for standard vmType (agent pools). Deployment string `json:"deployment" yaml:"deployment"` DeploymentParameters map[string]interface{} `json:"deploymentParameters" yaml:"deploymentParameters"` - // VMSS metadata cache TTL in seconds, only applies for vmss type - VmssCacheTTL int64 `json:"vmssCacheTTL" yaml:"vmssCacheTTL"` - - // VMSS instances cache TTL in seconds, only applies for vmss type - VmssVmsCacheTTL int64 `json:"vmssVmsCacheTTL" yaml:"vmssVmsCacheTTL"` - // Jitter in seconds subtracted from the VMSS cache TTL before the first refresh VmssVmsCacheJitter int `json:"vmssVmsCacheJitter" yaml:"vmssVmsCacheJitter"` // number of latest deployments that will not be deleted MaxDeploymentsCount int64 `json:"maxDeploymentsCount" yaml:"maxDeploymentsCount"` - // Enable exponential backoff to manage resource request retries - CloudProviderBackoff bool `json:"cloudProviderBackoff,omitempty" yaml:"cloudProviderBackoff,omitempty"` - CloudProviderBackoffRetries int `json:"cloudProviderBackoffRetries,omitempty" yaml:"cloudProviderBackoffRetries,omitempty"` - CloudProviderBackoffExponent float64 `json:"cloudProviderBackoffExponent,omitempty" yaml:"cloudProviderBackoffExponent,omitempty"` - CloudProviderBackoffDuration int `json:"cloudProviderBackoffDuration,omitempty" yaml:"cloudProviderBackoffDuration,omitempty"` - CloudProviderBackoffJitter float64 `json:"cloudProviderBackoffJitter,omitempty" yaml:"cloudProviderBackoffJitter,omitempty"` - // EnableForceDelete defines whether to enable force deletion on the APIs EnableForceDelete bool `json:"enableForceDelete,omitempty" yaml:"enableForceDelete,omitempty"` // EnableDynamicInstanceList defines whether to enable dynamic instance workflow for instance information check EnableDynamicInstanceList bool `json:"enableDynamicInstanceList,omitempty" yaml:"enableDynamicInstanceList,omitempty"` - // EnableVmssFlex defines whether to enable Vmss Flex support or not - EnableVmssFlex bool `json:"enableVmssFlex,omitempty" yaml:"enableVmssFlex,omitempty"` - // (DEPRECATED, DO NOT USE) EnableDetailedCSEMessage defines whether to emit error messages in the CSE error body info EnableDetailedCSEMessage bool `json:"enableDetailedCSEMessage,omitempty" yaml:"enableDetailedCSEMessage,omitempty"` @@ -157,11 +96,34 @@ type Config struct { GetVmssSizeRefreshPeriod int `json:"getVmssSizeRefreshPeriod,omitempty" yaml:"getVmssSizeRefreshPeriod,omitempty"` } +// These are only here for backward compabitility. Their equivalent exists in providerazure.Config with a different name. +type legacyConfig struct { + // Being renamed to UseFederatedWorkloadIdentityExtension + UseWorkloadIdentityExtension *bool `json:"useWorkloadIdentityExtension" yaml:"useWorkloadIdentityExtension"` + // VMSS metadata cache TTL in seconds, only applies for vmss type; being renamed to VmssCacheTTLInSeconds + VmssCacheTTL *int64 `json:"vmssCacheTTL" yaml:"vmssCacheTTL"` + // VMSS instances cache TTL in seconds, only applies for vmss type; being renamed to VmssVirtualMachinesCacheTTLInSeconds + VmssVmsCacheTTL *int64 `json:"vmssVmsCacheTTL" yaml:"vmssVmsCacheTTL"` + // EnableVmssFlex defines whether to enable Vmss Flex support or not; being renamed to EnableVmssFlexNodes + EnableVmssFlex *bool `json:"enableVmssFlex,omitempty" yaml:"enableVmssFlex,omitempty"` +} + // BuildAzureConfig returns a Config object for the Azure clients func BuildAzureConfig(configReader io.Reader) (*Config, error) { var err error cfg := &Config{} + // Static defaults + cfg.EnableDynamicInstanceList = false + cfg.EnableVmssFlexNodes = false + cfg.CloudProviderBackoffRetries = providerazureconsts.BackoffRetriesDefault + cfg.CloudProviderBackoffExponent = providerazureconsts.BackoffExponentDefault + cfg.CloudProviderBackoffDuration = providerazureconsts.BackoffDurationDefault + cfg.CloudProviderBackoffJitter = providerazureconsts.BackoffJitterDefault + cfg.VMType = providerazureconsts.VMTypeVMSS + cfg.MaxDeploymentsCount = int64(defaultMaxDeploymentsCount) + + // Config file overrides defaults if configReader != nil { body, err := ioutil.ReadAll(configReader) if err != nil { @@ -171,192 +133,181 @@ func BuildAzureConfig(configReader io.Reader) (*Config, error) { if err != nil { return nil, fmt.Errorf("failed to unmarshal config body: %v", err) } - } else { - cfg.Cloud = os.Getenv("ARM_CLOUD") - cfg.Location = os.Getenv("LOCATION") - cfg.ResourceGroup = os.Getenv("ARM_RESOURCE_GROUP") - cfg.TenantID = os.Getenv("ARM_TENANT_ID") - if tenantId := os.Getenv("AZURE_TENANT_ID"); tenantId != "" { - cfg.TenantID = tenantId - } - cfg.AADClientID = os.Getenv("ARM_CLIENT_ID") - if clientId := os.Getenv("AZURE_CLIENT_ID"); clientId != "" { - cfg.AADClientID = clientId - } - cfg.AADFederatedTokenFile = os.Getenv("AZURE_FEDERATED_TOKEN_FILE") - cfg.AADClientSecret = os.Getenv("ARM_CLIENT_SECRET") - cfg.VMType = strings.ToLower(os.Getenv("ARM_VM_TYPE")) - cfg.AADClientCertPath = os.Getenv("ARM_CLIENT_CERT_PATH") - cfg.AADClientCertPassword = os.Getenv("ARM_CLIENT_CERT_PASSWORD") - cfg.Deployment = os.Getenv("ARM_DEPLOYMENT") - - subscriptionID, err := getSubscriptionIdFromInstanceMetadata() - if err != nil { - return nil, err - } - cfg.SubscriptionID = subscriptionID - - useManagedIdentityExtensionFromEnv := os.Getenv("ARM_USE_MANAGED_IDENTITY_EXTENSION") - if len(useManagedIdentityExtensionFromEnv) > 0 { - cfg.UseManagedIdentityExtension, err = strconv.ParseBool(useManagedIdentityExtensionFromEnv) - if err != nil { - return nil, err - } - } - - useWorkloadIdentityExtensionFromEnv := os.Getenv("ARM_USE_WORKLOAD_IDENTITY_EXTENSION") - if len(useWorkloadIdentityExtensionFromEnv) > 0 { - cfg.UseWorkloadIdentityExtension, err = strconv.ParseBool(useWorkloadIdentityExtensionFromEnv) - if err != nil { - return nil, err - } - } - - if cfg.UseManagedIdentityExtension && cfg.UseWorkloadIdentityExtension { - return nil, errors.New("you can not combine both managed identity and workload identity as an authentication mechanism") - } - - userAssignedIdentityIDFromEnv := os.Getenv("ARM_USER_ASSIGNED_IDENTITY_ID") - if userAssignedIdentityIDFromEnv != "" { - cfg.UserAssignedIdentityID = userAssignedIdentityIDFromEnv - } - - if vmssCacheTTL := os.Getenv("AZURE_VMSS_CACHE_TTL"); vmssCacheTTL != "" { - cfg.VmssCacheTTL, err = strconv.ParseInt(vmssCacheTTL, 10, 0) - if err != nil { - return nil, fmt.Errorf("failed to parse AZURE_VMSS_CACHE_TTL %q: %v", vmssCacheTTL, err) - } - } - - if vmssVmsCacheTTL := os.Getenv("AZURE_VMSS_VMS_CACHE_TTL"); vmssVmsCacheTTL != "" { - cfg.VmssVmsCacheTTL, err = strconv.ParseInt(vmssVmsCacheTTL, 10, 0) - if err != nil { - return nil, fmt.Errorf("failed to parse AZURE_VMSS_VMS_CACHE_TTL %q: %v", vmssVmsCacheTTL, err) - } - } - - if vmssVmsCacheJitter := os.Getenv("AZURE_VMSS_VMS_CACHE_JITTER"); vmssVmsCacheJitter != "" { - cfg.VmssVmsCacheJitter, err = strconv.Atoi(vmssVmsCacheJitter) - if err != nil { - return nil, fmt.Errorf("failed to parse AZURE_VMSS_VMS_CACHE_JITTER %q: %v", vmssVmsCacheJitter, err) - } - } + } - if threshold := os.Getenv("AZURE_MAX_DEPLOYMENT_COUNT"); threshold != "" { - cfg.MaxDeploymentsCount, err = strconv.ParseInt(threshold, 10, 0) - if err != nil { - return nil, fmt.Errorf("failed to parse AZURE_MAX_DEPLOYMENT_COUNT %q: %v", threshold, err) - } + // Legacy config fields, take precedence if provided. + if cfg.UseWorkloadIdentityExtension != nil { + cfg.UseFederatedWorkloadIdentityExtension = *cfg.UseWorkloadIdentityExtension + } + if cfg.VmssCacheTTL != nil { + if *cfg.VmssCacheTTL > int64(^uint32(0)) { + return nil, fmt.Errorf("VmssCacheTTL value %d is too large", *cfg.VmssCacheTTL) } - - if enableBackoff := os.Getenv("ENABLE_BACKOFF"); enableBackoff != "" { - cfg.CloudProviderBackoff, err = strconv.ParseBool(enableBackoff) - if err != nil { - return nil, fmt.Errorf("failed to parse ENABLE_BACKOFF %q: %v", enableBackoff, err) - } + cfg.VmssCacheTTLInSeconds = int(*cfg.VmssCacheTTL) + } + if cfg.VmssVmsCacheTTL != nil { + if *cfg.VmssVmsCacheTTL > int64(^uint32(0)) { + return nil, fmt.Errorf("VmssVmsCacheTTL value %d is too large", *cfg.VmssVmsCacheTTL) } + cfg.VmssVirtualMachinesCacheTTLInSeconds = int(*cfg.VmssVmsCacheTTL) + } + if cfg.EnableVmssFlex != nil { + cfg.EnableVmssFlexNodes = *cfg.EnableVmssFlex + } - if enableDynamicInstanceList := os.Getenv("AZURE_ENABLE_DYNAMIC_INSTANCE_LIST"); enableDynamicInstanceList != "" { - cfg.EnableDynamicInstanceList, err = strconv.ParseBool(enableDynamicInstanceList) - if err != nil { - return nil, fmt.Errorf("failed to parse AZURE_ENABLE_DYNAMIC_INSTANCE_LIST %q: %v", enableDynamicInstanceList, err) - } - } else { - cfg.EnableDynamicInstanceList = dynamicInstanceListDefault + // Each of these environment variables, if provided, will override what's in the config file. + // Note that this "retrieval from env" does not exist in cloud-provider-azure library (at the time of this comment). + if _, err = assignFromEnvIfExists(&cfg.ClusterName, "CLUSTER_NAME"); err != nil { + return nil, err + } + if _, err = assignFromEnvIfExists(&cfg.ClusterResourceGroup, "ARM_CLUSTER_RESOURCE_GROUP"); err != nil { + return nil, err + } + if _, err = assignFromEnvIfExists(&cfg.ARMBaseURLForAPClient, "ARM_BASE_URL_FOR_AP_CLIENT"); err != nil { + return nil, err + } + if _, err = assignFromEnvIfExists(&cfg.Cloud, "ARM_CLOUD"); err != nil { + return nil, err + } + if _, err = assignFromEnvIfExists(&cfg.Location, "LOCATION"); err != nil { + return nil, err + } + if _, err = assignFromEnvIfExists(&cfg.ResourceGroup, "ARM_RESOURCE_GROUP"); err != nil { + return nil, err + } + if _, err = assignFromEnvIfExists(&cfg.TenantID, "ARM_TENANT_ID"); err != nil { + return nil, err + } + if _, err = assignFromEnvIfExists(&cfg.TenantID, "AZURE_TENANT_ID"); err != nil { // taking precedence + return nil, err + } + if _, err = assignFromEnvIfExists(&cfg.AADClientID, "ARM_CLIENT_ID"); err != nil { + return nil, err + } + if _, err = assignFromEnvIfExists(&cfg.AADClientID, "AZURE_CLIENT_ID"); err != nil { // taking precedence + return nil, err + } + if _, err = assignFromEnvIfExists(&cfg.AADFederatedTokenFile, "AZURE_FEDERATED_TOKEN_FILE"); err != nil { + return nil, err + } + if _, err = assignFromEnvIfExists(&cfg.AADClientSecret, "ARM_CLIENT_SECRET"); err != nil { + return nil, err + } + if _, err = assignFromEnvIfExists(&cfg.VMType, "ARM_VM_TYPE"); err != nil { + return nil, err + } + if _, err = assignFromEnvIfExists(&cfg.AADClientCertPath, "ARM_CLIENT_CERT_PATH"); err != nil { + return nil, err + } + if _, err = assignFromEnvIfExists(&cfg.AADClientCertPassword, "ARM_CLIENT_CERT_PASSWORD"); err != nil { + return nil, err + } + if _, err = assignFromEnvIfExists(&cfg.Deployment, "ARM_DEPLOYMENT"); err != nil { + return nil, err + } + if _, err = assignFromEnvIfExists(&cfg.SubscriptionID, "ARM_SUBSCRIPTION_ID"); err != nil { + return nil, err + } + if _, err = assignBoolFromEnvIfExists(&cfg.UseManagedIdentityExtension, "ARM_USE_MANAGED_IDENTITY_EXTENSION"); err != nil { + return nil, err + } + if _, err = assignBoolFromEnvIfExists(&cfg.UseFederatedWorkloadIdentityExtension, "ARM_USE_FEDERATED_WORKLOAD_IDENTITY_EXTENSION"); err != nil { + return nil, err + } + if _, err = assignBoolFromEnvIfExists(&cfg.UseFederatedWorkloadIdentityExtension, "ARM_USE_WORKLOAD_IDENTITY_EXTENSION"); err != nil { // taking precedence + return nil, err + } + if _, err = assignFromEnvIfExists(&cfg.UserAssignedIdentityID, "ARM_USER_ASSIGNED_IDENTITY_ID"); err != nil { + return nil, err + } + if _, err = assignIntFromEnvIfExists(&cfg.VmssCacheTTLInSeconds, "AZURE_VMSS_CACHE_TTL_IN_SECONDS"); err != nil { + return nil, err + } + if _, err = assignIntFromEnvIfExists(&cfg.VmssCacheTTLInSeconds, "AZURE_VMSS_CACHE_TTL"); err != nil { // taking precedence + return nil, err + } + if _, err = assignIntFromEnvIfExists(&cfg.VmssVirtualMachinesCacheTTLInSeconds, "AZURE_VMSS_VMS_CACHE_TTL_IN_SECONDS"); err != nil { + return nil, err + } + if _, err = assignIntFromEnvIfExists(&cfg.VmssVirtualMachinesCacheTTLInSeconds, "AZURE_VMSS_VMS_CACHE_TTL"); err != nil { // taking precedence + return nil, err + } + if _, err = assignIntFromEnvIfExists(&cfg.VmssVmsCacheJitter, "AZURE_VMSS_VMS_CACHE_JITTER"); err != nil { + return nil, err + } + if _, err = assignIntFromEnvIfExists(&cfg.GetVmssSizeRefreshPeriod, "AZURE_GET_VMSS_SIZE_REFRESH_PERIOD"); err != nil { + return nil, err + } + if _, err = assignInt64FromEnvIfExists(&cfg.MaxDeploymentsCount, "AZURE_MAX_DEPLOYMENT_COUNT"); err != nil { + return nil, err + } + if _, err = assignBoolFromEnvIfExists(&cfg.CloudProviderBackoff, "ENABLE_BACKOFF"); err != nil { + return nil, err + } + if _, err = assignBoolFromEnvIfExists(&cfg.EnableForceDelete, "AZURE_ENABLE_FORCE_DELETE"); err != nil { + return nil, err + } + if _, err = assignBoolFromEnvIfExists(&cfg.EnableDynamicInstanceList, "AZURE_ENABLE_DYNAMIC_INSTANCE_LIST"); err != nil { + return nil, err + } + if _, err = assignBoolFromEnvIfExists(&cfg.EnableVmssFlexNodes, "AZURE_ENABLE_VMSS_FLEX_NODES"); err != nil { + return nil, err + } + if _, err = assignBoolFromEnvIfExists(&cfg.EnableVmssFlexNodes, "AZURE_ENABLE_VMSS_FLEX"); err != nil { // taking precedence + return nil, err + } + if cfg.CloudProviderBackoff { + if _, err = assignIntFromEnvIfExists(&cfg.CloudProviderBackoffRetries, "BACKOFF_RETRIES"); err != nil { + return nil, err } - - if getVmssSizeRefreshPeriod := os.Getenv("AZURE_GET_VMSS_SIZE_REFRESH_PERIOD"); getVmssSizeRefreshPeriod != "" { - cfg.GetVmssSizeRefreshPeriod, err = strconv.Atoi(getVmssSizeRefreshPeriod) - if err != nil { - return nil, fmt.Errorf("failed to parse AZURE_GET_VMSS_SIZE_REFRESH_PERIOD %q: %v", getVmssSizeRefreshPeriod, err) - } - } else { - cfg.GetVmssSizeRefreshPeriod = VmssSizeRefreshPeriodDefault + if _, err = assignFloat64FromEnvIfExists(&cfg.CloudProviderBackoffExponent, "BACKOFF_EXPONENT"); err != nil { + return nil, err } - - if enableVmssFlex := os.Getenv("AZURE_ENABLE_VMSS_FLEX"); enableVmssFlex != "" { - cfg.EnableVmssFlex, err = strconv.ParseBool(enableVmssFlex) - if err != nil { - return nil, fmt.Errorf("failed to parse AZURE_ENABLE_VMSS_FLEX %q: %v", enableVmssFlex, err) - } - } else { - cfg.EnableVmssFlex = enableVmssFlexDefault + if _, err = assignIntFromEnvIfExists(&cfg.CloudProviderBackoffDuration, "BACKOFF_DURATION"); err != nil { + return nil, err } - - if cfg.CloudProviderBackoff { - if backoffRetries := os.Getenv("BACKOFF_RETRIES"); backoffRetries != "" { - retries, err := strconv.ParseInt(backoffRetries, 10, 0) - if err != nil { - return nil, fmt.Errorf("failed to parse BACKOFF_RETRIES %q: %v", retries, err) - } - cfg.CloudProviderBackoffRetries = int(retries) - } else { - cfg.CloudProviderBackoffRetries = backoffRetriesDefault - } - - if backoffExponent := os.Getenv("BACKOFF_EXPONENT"); backoffExponent != "" { - cfg.CloudProviderBackoffExponent, err = strconv.ParseFloat(backoffExponent, 64) - if err != nil { - return nil, fmt.Errorf("failed to parse BACKOFF_EXPONENT %q: %v", backoffExponent, err) - } - } else { - cfg.CloudProviderBackoffExponent = backoffExponentDefault - } - - if backoffDuration := os.Getenv("BACKOFF_DURATION"); backoffDuration != "" { - duration, err := strconv.ParseInt(backoffDuration, 10, 0) - if err != nil { - return nil, fmt.Errorf("failed to parse BACKOFF_DURATION %q: %v", backoffDuration, err) - } - cfg.CloudProviderBackoffDuration = int(duration) - } else { - cfg.CloudProviderBackoffDuration = backoffDurationDefault - } - - if backoffJitter := os.Getenv("BACKOFF_JITTER"); backoffJitter != "" { - cfg.CloudProviderBackoffJitter, err = strconv.ParseFloat(backoffJitter, 64) - if err != nil { - return nil, fmt.Errorf("failed to parse BACKOFF_JITTER %q: %v", backoffJitter, err) - } - } else { - cfg.CloudProviderBackoffJitter = backoffJitterDefault - } + if _, err = assignFloat64FromEnvIfExists(&cfg.CloudProviderBackoffJitter, "BACKOFF_JITTER"); err != nil { + return nil, err } } + if _, err = assignBoolFromEnvIfExists(&cfg.CloudProviderRateLimit, "CLOUD_PROVIDER_RATE_LIMIT"); err != nil { + return nil, err + } + if _, err = assignFloat32FromEnvIfExists(&cfg.CloudProviderRateLimitQPS, "RATE_LIMIT_READ_QPS"); err != nil { + return nil, err + } + if _, err = assignIntFromEnvIfExists(&cfg.CloudProviderRateLimitBucket, "RATE_LIMIT_READ_BUCKETS"); err != nil { + return nil, err + } + if _, err = assignFloat32FromEnvIfExists(&cfg.CloudProviderRateLimitQPSWrite, "RATE_LIMIT_WRITE_QPS"); err != nil { + return nil, err + } + if _, err = assignIntFromEnvIfExists(&cfg.CloudProviderRateLimitBucketWrite, "RATE_LIMIT_WRITE_BUCKETS"); err != nil { + return nil, err + } - // always read the following from environment variables since azure.json doesn't have these fields - cfg.ClusterName = os.Getenv("CLUSTER_NAME") - cfg.ClusterResourceGroup = os.Getenv("ARM_CLUSTER_RESOURCE_GROUP") - cfg.ARMBaseURLForAPClient = os.Getenv("ARM_BASE_URL_FOR_AP_CLIENT") - - cfg.TrimSpace() - - if cloudProviderRateLimit := os.Getenv("CLOUD_PROVIDER_RATE_LIMIT"); cloudProviderRateLimit != "" { - cfg.CloudProviderRateLimit, err = strconv.ParseBool(cloudProviderRateLimit) + // Nonstatic defaults + cfg.VMType = strings.ToLower(cfg.VMType) + if cfg.MaxDeploymentsCount == 0 { + // 0 means "use default" in this case. + // This means, if it is valued by the config file, but explicitly set to 0 in the env, it will retreat to default. + cfg.MaxDeploymentsCount = int64(defaultMaxDeploymentsCount) + } + if cfg.SubscriptionID == "" { + metadataService, err := providerazure.NewInstanceMetadataService(imdsServerURL) if err != nil { - return nil, fmt.Errorf("failed to parse CLOUD_PROVIDER_RATE_LIMIT: %q, %v", cloudProviderRateLimit, err) + return nil, err } - } - if enableForceDelete := os.Getenv("AZURE_ENABLE_FORCE_DELETE"); enableForceDelete != "" { - cfg.EnableForceDelete, err = strconv.ParseBool(enableForceDelete) + metadata, err := metadataService.GetMetadata(0) if err != nil { - return nil, fmt.Errorf("failed to parse AZURE_ENABLE_FORCE_DELETE: %q, %v", enableForceDelete, err) + return nil, err } - } - err = initializeCloudProviderRateLimitConfig(&cfg.CloudProviderRateLimitConfig) - if err != nil { - return nil, err + cfg.SubscriptionID = metadata.Compute.SubscriptionID } - - // Defaulting vmType to vmss. - if cfg.VMType == "" { - cfg.VMType = vmTypeVMSS - } - - // Read parameters from deploymentParametersPath if it is not set. - if cfg.VMType == vmTypeStandard && len(cfg.DeploymentParameters) == 0 { + if cfg.VMType == providerazureconsts.VMTypeStandard && len(cfg.DeploymentParameters) == 0 { + // Read parameters from deploymentParametersPath if it is not set. parameters, err := readDeploymentParameters(deploymentParametersPath) if err != nil { klog.Errorf("readDeploymentParameters failed with error: %v", err) @@ -365,10 +316,7 @@ func BuildAzureConfig(configReader io.Reader) (*Config, error) { cfg.DeploymentParameters = parameters } - - if cfg.MaxDeploymentsCount == 0 { - cfg.MaxDeploymentsCount = int64(defaultMaxDeploymentsCount) - } + providerazureconfig.InitializeCloudProviderRateLimitConfig(&cfg.CloudProviderRateLimitConfig) if err := cfg.validate(); err != nil { return nil, err @@ -376,104 +324,11 @@ func BuildAzureConfig(configReader io.Reader) (*Config, error) { return cfg, nil } -// initializeCloudProviderRateLimitConfig initializes rate limit configs. -func initializeCloudProviderRateLimitConfig(config *CloudProviderRateLimitConfig) error { - if config == nil { - return nil - } - - // Assign read rate limit defaults if no configuration was passed in. - if config.CloudProviderRateLimitQPS == 0 { - if rateLimitQPSFromEnv := os.Getenv(rateLimitReadQPSEnvVar); rateLimitQPSFromEnv != "" { - rateLimitQPS, err := strconv.ParseFloat(rateLimitQPSFromEnv, 0) - if err != nil { - return fmt.Errorf("failed to parse %s: %q, %v", rateLimitReadQPSEnvVar, rateLimitQPSFromEnv, err) - } - config.CloudProviderRateLimitQPS = float32(rateLimitQPS) - } else { - config.CloudProviderRateLimitQPS = rateLimitQPSDefault - } - } - - if config.CloudProviderRateLimitBucket == 0 { - if rateLimitBucketFromEnv := os.Getenv(rateLimitReadBucketsEnvVar); rateLimitBucketFromEnv != "" { - rateLimitBucket, err := strconv.ParseInt(rateLimitBucketFromEnv, 10, 0) - if err != nil { - return fmt.Errorf("failed to parse %s: %q, %v", rateLimitReadBucketsEnvVar, rateLimitBucketFromEnv, err) - } - config.CloudProviderRateLimitBucket = int(rateLimitBucket) - } else { - config.CloudProviderRateLimitBucket = rateLimitBucketDefault - } - } - - // Assign write rate limit defaults if no configuration was passed in. - if config.CloudProviderRateLimitQPSWrite == 0 { - if rateLimitQPSWriteFromEnv := os.Getenv(rateLimitWriteQPSEnvVar); rateLimitQPSWriteFromEnv != "" { - rateLimitQPSWrite, err := strconv.ParseFloat(rateLimitQPSWriteFromEnv, 0) - if err != nil { - return fmt.Errorf("failed to parse %s: %q, %v", rateLimitWriteQPSEnvVar, rateLimitQPSWriteFromEnv, err) - } - config.CloudProviderRateLimitQPSWrite = float32(rateLimitQPSWrite) - } else { - config.CloudProviderRateLimitQPSWrite = config.CloudProviderRateLimitQPS - } - } - - if config.CloudProviderRateLimitBucketWrite == 0 { - if rateLimitBucketWriteFromEnv := os.Getenv(rateLimitWriteBucketsEnvVar); rateLimitBucketWriteFromEnv != "" { - rateLimitBucketWrite, err := strconv.ParseInt(rateLimitBucketWriteFromEnv, 10, 0) - if err != nil { - return fmt.Errorf("failed to parse %s: %q, %v", rateLimitWriteBucketsEnvVar, rateLimitBucketWriteFromEnv, err) - } - config.CloudProviderRateLimitBucketWrite = int(rateLimitBucketWrite) - } else { - config.CloudProviderRateLimitBucketWrite = config.CloudProviderRateLimitBucket - } - } - - config.InterfaceRateLimit = overrideDefaultRateLimitConfig(&config.RateLimitConfig, config.InterfaceRateLimit) - config.VirtualMachineRateLimit = overrideDefaultRateLimitConfig(&config.RateLimitConfig, config.VirtualMachineRateLimit) - config.StorageAccountRateLimit = overrideDefaultRateLimitConfig(&config.RateLimitConfig, config.StorageAccountRateLimit) - config.DiskRateLimit = overrideDefaultRateLimitConfig(&config.RateLimitConfig, config.DiskRateLimit) - config.VirtualMachineScaleSetRateLimit = overrideDefaultRateLimitConfig(&config.RateLimitConfig, config.VirtualMachineScaleSetRateLimit) - config.KubernetesServiceRateLimit = overrideDefaultRateLimitConfig(&config.RateLimitConfig, config.KubernetesServiceRateLimit) - - return nil -} - -// overrideDefaultRateLimitConfig overrides the default CloudProviderRateLimitConfig. -func overrideDefaultRateLimitConfig(defaults, config *azclients.RateLimitConfig) *azclients.RateLimitConfig { - // If config not set, apply defaults. - if config == nil { - return defaults - } - - // Remain disabled if it's set explicitly. - if !config.CloudProviderRateLimit { - return &azclients.RateLimitConfig{CloudProviderRateLimit: false} - } - - // Apply default values. - if config.CloudProviderRateLimitQPS == 0 { - config.CloudProviderRateLimitQPS = defaults.CloudProviderRateLimitQPS - } - if config.CloudProviderRateLimitBucket == 0 { - config.CloudProviderRateLimitBucket = defaults.CloudProviderRateLimitBucket - } - if config.CloudProviderRateLimitQPSWrite == 0 { - config.CloudProviderRateLimitQPSWrite = defaults.CloudProviderRateLimitQPSWrite - } - if config.CloudProviderRateLimitBucketWrite == 0 { - config.CloudProviderRateLimitBucketWrite = defaults.CloudProviderRateLimitBucketWrite - } - - return config -} - +// A "fork" of az.getAzureClientConfig with BYO authorizer (e.g., for CLI auth) and custom polling delay support func (cfg *Config) getAzureClientConfig(authorizer autorest.Authorizer, env *azure.Environment) *azclients.ClientConfig { pollingDelay := 30 * time.Second azClientConfig := &azclients.ClientConfig{ + CloudName: cfg.Cloud, Location: cfg.Location, SubscriptionID: cfg.SubscriptionID, ResourceManagerEndpoint: env.ResourceManagerEndpoint, @@ -482,6 +337,8 @@ func (cfg *Config) getAzureClientConfig(authorizer autorest.Authorizer, env *azu RestClientConfig: azclients.RestClientConfig{ PollingDelay: &pollingDelay, }, + DisableAzureStackCloud: cfg.DisableAzureStackCloud, + UserAgent: cfg.UserAgent, } if cfg.CloudProviderBackoff { @@ -493,24 +350,14 @@ func (cfg *Config) getAzureClientConfig(authorizer autorest.Authorizer, env *azu } } - return azClientConfig -} + if cfg.HasExtendedLocation() { + azClientConfig.ExtendedLocation = &azclients.ExtendedLocation{ + Name: cfg.ExtendedLocationName, + Type: cfg.ExtendedLocationType, + } + } -// TrimSpace removes all leading and trailing white spaces. -func (cfg *Config) TrimSpace() { - cfg.Cloud = strings.TrimSpace(cfg.Cloud) - cfg.Location = strings.TrimSpace(cfg.Location) - cfg.TenantID = strings.TrimSpace(cfg.TenantID) - cfg.SubscriptionID = strings.TrimSpace(cfg.SubscriptionID) - cfg.ClusterName = strings.TrimSpace(cfg.ClusterName) - cfg.ResourceGroup = strings.TrimSpace(cfg.ResourceGroup) - cfg.ClusterResourceGroup = strings.TrimSpace(cfg.ClusterResourceGroup) - cfg.VMType = strings.TrimSpace(cfg.VMType) - cfg.AADClientID = strings.TrimSpace(cfg.AADClientID) - cfg.AADClientSecret = strings.TrimSpace(cfg.AADClientSecret) - cfg.AADClientCertPath = strings.TrimSpace(cfg.AADClientCertPath) - cfg.AADClientCertPassword = strings.TrimSpace(cfg.AADClientCertPassword) - cfg.Deployment = strings.TrimSpace(cfg.Deployment) + return azClientConfig } func (cfg *Config) validate() error { @@ -518,7 +365,7 @@ func (cfg *Config) validate() error { return fmt.Errorf("resource group not set") } - if cfg.VMType == vmTypeStandard { + if cfg.VMType == providerazureconsts.VMTypeStandard { if cfg.Deployment == "" { return fmt.Errorf("deployment not set") } @@ -532,23 +379,29 @@ func (cfg *Config) validate() error { return fmt.Errorf("subscription ID not set") } - if cfg.UseManagedIdentityExtension { - return nil + if cfg.UseManagedIdentityExtension && cfg.UseFederatedWorkloadIdentityExtension { + return fmt.Errorf("you can not combine both managed identity and workload identity as an authentication mechanism") } - if cfg.TenantID == "" { - return fmt.Errorf("tenant ID not set") + if cfg.VMType != providerazureconsts.VMTypeStandard && cfg.VMType != providerazureconsts.VMTypeVMSS { + return fmt.Errorf("unsupported VM type: %s", cfg.VMType) } - switch cfg.AuthMethod { - case "", authMethodPrincipal: - if cfg.AADClientID == "" { - return errors.New("ARM Client ID not set") + if !cfg.UseManagedIdentityExtension && !cfg.UseFederatedWorkloadIdentityExtension { + if cfg.TenantID == "" { + return fmt.Errorf("tenant ID not set") + } + + switch cfg.AuthMethod { + case "", authMethodPrincipal: + if cfg.AADClientID == "" { + return fmt.Errorf("ARM Client ID not set") + } + case authMethodCLI: + // Nothing to check at the moment. + default: + return fmt.Errorf("unsupported authorization method: %s", cfg.AuthMethod) } - case authMethodCLI: - // Nothing to check at the moment. - default: - return fmt.Errorf("unsupported authorization method: %s", cfg.AuthMethod) } if cfg.CloudProviderBackoff && cfg.CloudProviderBackoffRetries == 0 { @@ -558,21 +411,88 @@ func (cfg *Config) validate() error { return nil } -// getSubscriptionId reads the Subscription ID from the instance metadata. -func getSubscriptionIdFromInstanceMetadata() (string, error) { - subscriptionID, present := os.LookupEnv("ARM_SUBSCRIPTION_ID") - if !present { - metadataService, err := providerazure.NewInstanceMetadataService(imdsServerURL) +func assignFromEnvIfExists(assignee *string, name string) (bool, error) { + if assignee == nil { + return false, fmt.Errorf("assignee is nil") + } + if val, present := os.LookupEnv(name); present && strings.TrimSpace(val) != "" { + *assignee = strings.TrimSpace(val) + return true, nil + } + return false, nil +} + +func assignBoolFromEnvIfExists(assignee *bool, name string) (bool, error) { + if assignee == nil { + return false, fmt.Errorf("assignee is nil") + } + var err error + if val, present := os.LookupEnv(name); present && strings.TrimSpace(val) != "" { + *assignee, err = strconv.ParseBool(val) if err != nil { - return "", err + return false, fmt.Errorf("failed to parse %s %q: %v", name, val, err) } + return true, nil + } + return false, nil +} - metadata, err := metadataService.GetMetadata(0) +func assignIntFromEnvIfExists(assignee *int, name string) (bool, error) { + if assignee == nil { + return false, fmt.Errorf("assignee is nil") + } + var err error + if val, present := os.LookupEnv(name); present && strings.TrimSpace(val) != "" { + *assignee, err = parseInt32(val, 10) if err != nil { - return "", err + return false, fmt.Errorf("failed to parse %s %q: %v", name, val, err) } + return true, nil + } + return false, nil +} - return metadata.Compute.SubscriptionID, nil +func assignInt64FromEnvIfExists(assignee *int64, name string) (bool, error) { + if assignee == nil { + return false, fmt.Errorf("assignee is nil") + } + var err error + if val, present := os.LookupEnv(name); present && strings.TrimSpace(val) != "" { + *assignee, err = strconv.ParseInt(val, 10, 0) + if err != nil { + return false, fmt.Errorf("failed to parse %s %q: %v", name, val, err) + } + return true, nil + } + return false, nil +} + +func assignFloat32FromEnvIfExists(assignee *float32, name string) (bool, error) { + if assignee == nil { + return false, fmt.Errorf("assignee is nil") + } + var err error + if val, present := os.LookupEnv(name); present && strings.TrimSpace(val) != "" { + *assignee, err = parseFloat32(val) + if err != nil { + return false, fmt.Errorf("failed to parse %s %q: %v", name, val, err) + } + return true, nil + } + return false, nil +} + +func assignFloat64FromEnvIfExists(assignee *float64, name string) (bool, error) { + if assignee == nil { + return false, fmt.Errorf("assignee is nil") + } + var err error + if val, present := os.LookupEnv(name); present && strings.TrimSpace(val) != "" { + *assignee, err = strconv.ParseFloat(val, 64) + if err != nil { + return false, fmt.Errorf("failed to parse %s %q: %v", name, val, err) + } + return true, nil } - return subscriptionID, nil + return false, nil } diff --git a/cluster-autoscaler/cloudprovider/azure/azure_config_test.go b/cluster-autoscaler/cloudprovider/azure/azure_config_test.go index 65c74a5c90a1..6bfd19ce2319 100644 --- a/cluster-autoscaler/cloudprovider/azure/azure_config_test.go +++ b/cluster-autoscaler/cloudprovider/azure/azure_config_test.go @@ -17,174 +17,74 @@ limitations under the License. package azure import ( - "fmt" "testing" "github.com/stretchr/testify/assert" azclients "sigs.k8s.io/cloud-provider-azure/pkg/azureclients" + providerazureconsts "sigs.k8s.io/cloud-provider-azure/pkg/consts" + providerazureconfig "sigs.k8s.io/cloud-provider-azure/pkg/provider/config" ) -func TestInitializeCloudProviderRateLimitConfigWithNoConfigReturnsNoError(t *testing.T) { - err := initializeCloudProviderRateLimitConfig(nil) - assert.Nil(t, err, "err should be nil") +func TestCloudProviderAzureConsts(t *testing.T) { + // Just detect user-facing breaking changes from cloud-provider-azure. + // Shouldn't really change a lot, but just in case. + assert.Equal(t, "vmss", providerazureconsts.VMTypeVMSS) + assert.Equal(t, "standard", providerazureconsts.VMTypeStandard) } func TestInitializeCloudProviderRateLimitConfigWithNoRateLimitSettingsReturnsDefaults(t *testing.T) { - emptyConfig := &CloudProviderRateLimitConfig{} - err := initializeCloudProviderRateLimitConfig(emptyConfig) - - assert.NoError(t, err) - assert.Equal(t, emptyConfig.CloudProviderRateLimitQPS, rateLimitQPSDefault) - assert.Equal(t, emptyConfig.CloudProviderRateLimitBucket, rateLimitBucketDefault) - assert.Equal(t, emptyConfig.CloudProviderRateLimitQPSWrite, rateLimitQPSDefault) - assert.Equal(t, emptyConfig.CloudProviderRateLimitBucketWrite, rateLimitBucketDefault) -} - -func TestInitializeCloudProviderRateLimitConfigWithReadRateLimitSettingsFromEnv(t *testing.T) { - emptyConfig := &CloudProviderRateLimitConfig{} - var rateLimitReadQPS float32 = 3.0 - rateLimitReadBuckets := 10 - t.Setenv(rateLimitReadQPSEnvVar, fmt.Sprintf("%.1f", rateLimitReadQPS)) - t.Setenv(rateLimitReadBucketsEnvVar, fmt.Sprintf("%d", rateLimitReadBuckets)) - - err := initializeCloudProviderRateLimitConfig(emptyConfig) - assert.NoError(t, err) - assert.Equal(t, emptyConfig.CloudProviderRateLimitQPS, rateLimitReadQPS) - assert.Equal(t, emptyConfig.CloudProviderRateLimitBucket, rateLimitReadBuckets) - assert.Equal(t, emptyConfig.CloudProviderRateLimitQPSWrite, rateLimitReadQPS) - assert.Equal(t, emptyConfig.CloudProviderRateLimitBucketWrite, rateLimitReadBuckets) -} - -func TestInitializeCloudProviderRateLimitConfigWithReadAndWriteRateLimitSettingsFromEnv(t *testing.T) { - emptyConfig := &CloudProviderRateLimitConfig{} - var rateLimitReadQPS float32 = 3.0 - rateLimitReadBuckets := 10 - var rateLimitWriteQPS float32 = 6.0 - rateLimitWriteBuckets := 20 - - t.Setenv(rateLimitReadQPSEnvVar, fmt.Sprintf("%.1f", rateLimitReadQPS)) - t.Setenv(rateLimitReadBucketsEnvVar, fmt.Sprintf("%d", rateLimitReadBuckets)) - t.Setenv(rateLimitWriteQPSEnvVar, fmt.Sprintf("%.1f", rateLimitWriteQPS)) - t.Setenv(rateLimitWriteBucketsEnvVar, fmt.Sprintf("%d", rateLimitWriteBuckets)) + emptyConfig := &providerazureconfig.CloudProviderRateLimitConfig{} + providerazureconfig.InitializeCloudProviderRateLimitConfig(emptyConfig) - err := initializeCloudProviderRateLimitConfig(emptyConfig) - - assert.NoError(t, err) - assert.Equal(t, emptyConfig.CloudProviderRateLimitQPS, rateLimitReadQPS) - assert.Equal(t, emptyConfig.CloudProviderRateLimitBucket, rateLimitReadBuckets) - assert.Equal(t, emptyConfig.CloudProviderRateLimitQPSWrite, rateLimitWriteQPS) - assert.Equal(t, emptyConfig.CloudProviderRateLimitBucketWrite, rateLimitWriteBuckets) + assert.InDelta(t, emptyConfig.CloudProviderRateLimitQPS, providerazureconsts.RateLimitQPSDefault, 0.0001) + assert.InDelta(t, emptyConfig.CloudProviderRateLimitBucket, providerazureconsts.RateLimitBucketDefault, 0.0001) + assert.InDelta(t, emptyConfig.CloudProviderRateLimitQPSWrite, providerazureconsts.RateLimitQPSDefault, 0.0001) + assert.InDelta(t, emptyConfig.CloudProviderRateLimitBucketWrite, providerazureconsts.RateLimitBucketDefault, 0.0001) } -func TestInitializeCloudProviderRateLimitConfigWithReadAndWriteRateLimitAlreadySetInConfig(t *testing.T) { +func TestInitializeCloudProviderRateLimitConfigWithReadRateLimitSettings(t *testing.T) { var rateLimitReadQPS float32 = 3.0 rateLimitReadBuckets := 10 - var rateLimitWriteQPS float32 = 6.0 - rateLimitWriteBuckets := 20 - configWithRateLimits := &CloudProviderRateLimitConfig{ + cfg := &providerazureconfig.CloudProviderRateLimitConfig{ RateLimitConfig: azclients.RateLimitConfig{ - CloudProviderRateLimitBucket: rateLimitReadBuckets, - CloudProviderRateLimitBucketWrite: rateLimitWriteBuckets, - CloudProviderRateLimitQPS: rateLimitReadQPS, - CloudProviderRateLimitQPSWrite: rateLimitWriteQPS, - }, - } - - t.Setenv(rateLimitReadQPSEnvVar, "99") - t.Setenv(rateLimitReadBucketsEnvVar, "99") - t.Setenv(rateLimitWriteQPSEnvVar, "99") - t.Setenv(rateLimitWriteBucketsEnvVar, "99") - - err := initializeCloudProviderRateLimitConfig(configWithRateLimits) - - assert.NoError(t, err) - assert.Equal(t, configWithRateLimits.CloudProviderRateLimitQPS, rateLimitReadQPS) - assert.Equal(t, configWithRateLimits.CloudProviderRateLimitBucket, rateLimitReadBuckets) - assert.Equal(t, configWithRateLimits.CloudProviderRateLimitQPSWrite, rateLimitWriteQPS) - assert.Equal(t, configWithRateLimits.CloudProviderRateLimitBucketWrite, rateLimitWriteBuckets) -} - -// nolint: goconst -func TestInitializeCloudProviderRateLimitConfigWithInvalidReadAndWriteRateLimitSettingsFromEnv(t *testing.T) { - emptyConfig := &CloudProviderRateLimitConfig{} - var rateLimitReadQPS float32 = 3.0 - rateLimitReadBuckets := 10 - var rateLimitWriteQPS float32 = 6.0 - rateLimitWriteBuckets := 20 - - invalidSetting := "invalid" - testCases := []struct { - desc string - isInvalidRateLimitReadQPSEnvVar bool - isInvalidRateLimitReadBucketsEnvVar bool - isInvalidRateLimitWriteQPSEnvVar bool - isInvalidRateLimitWriteBucketsEnvVar bool - expectedErr bool - expectedErrMsg error - }{ - { - desc: "an error shall be returned if invalid rateLimitReadQPSEnvVar", - isInvalidRateLimitReadQPSEnvVar: true, - expectedErr: true, - expectedErrMsg: fmt.Errorf("failed to parse %s: %q, strconv.ParseFloat: parsing \"invalid\": invalid syntax", rateLimitReadQPSEnvVar, invalidSetting), - }, - { - desc: "an error shall be returned if invalid rateLimitReadBucketsEnvVar", - isInvalidRateLimitReadBucketsEnvVar: true, - expectedErr: true, - expectedErrMsg: fmt.Errorf("failed to parse %s: %q, strconv.ParseInt: parsing \"invalid\": invalid syntax", rateLimitReadBucketsEnvVar, invalidSetting), - }, - { - desc: "an error shall be returned if invalid rateLimitWriteQPSEnvVar", - isInvalidRateLimitWriteQPSEnvVar: true, - expectedErr: true, - expectedErrMsg: fmt.Errorf("failed to parse %s: %q, strconv.ParseFloat: parsing \"invalid\": invalid syntax", rateLimitWriteQPSEnvVar, invalidSetting), - }, - { - desc: "an error shall be returned if invalid rateLimitWriteBucketsEnvVar", - isInvalidRateLimitWriteBucketsEnvVar: true, - expectedErr: true, - expectedErrMsg: fmt.Errorf("failed to parse %s: %q, strconv.ParseInt: parsing \"invalid\": invalid syntax", rateLimitWriteBucketsEnvVar, invalidSetting), + CloudProviderRateLimitQPS: rateLimitReadQPS, + CloudProviderRateLimitBucket: rateLimitReadBuckets, }, } - - for i, test := range testCases { - if test.isInvalidRateLimitReadQPSEnvVar { - t.Setenv(rateLimitReadQPSEnvVar, invalidSetting) - } else { - t.Setenv(rateLimitReadQPSEnvVar, fmt.Sprintf("%.1f", rateLimitReadQPS)) - } - if test.isInvalidRateLimitReadBucketsEnvVar { - t.Setenv(rateLimitReadBucketsEnvVar, invalidSetting) - } else { - t.Setenv(rateLimitReadBucketsEnvVar, fmt.Sprintf("%d", rateLimitReadBuckets)) - } - if test.isInvalidRateLimitWriteQPSEnvVar { - t.Setenv(rateLimitWriteQPSEnvVar, invalidSetting) - } else { - t.Setenv(rateLimitWriteQPSEnvVar, fmt.Sprintf("%.1f", rateLimitWriteQPS)) - } - if test.isInvalidRateLimitWriteBucketsEnvVar { - t.Setenv(rateLimitWriteBucketsEnvVar, invalidSetting) - } else { - t.Setenv(rateLimitWriteBucketsEnvVar, fmt.Sprintf("%d", rateLimitWriteBuckets)) - } - - err := initializeCloudProviderRateLimitConfig(emptyConfig) - - assert.Equal(t, test.expectedErr, err != nil, "TestCase[%d]: %s, return error: %v", i, test.desc, err) - assert.Equal(t, test.expectedErrMsg, err, "TestCase[%d]: %s, expected: %v, return: %v", i, test.desc, test.expectedErrMsg, err) - } + //t.Setenv("RATE_LIMIT_READ_QPS", fmt.Sprintf("%.1f", rateLimitReadQPS) + //t.Setenv("RATE_LIMIT_READ_BUCKETS", fmt.Sprintf("%d", rateLimitReadBuckets) + + providerazureconfig.InitializeCloudProviderRateLimitConfig(cfg) + assert.InDelta(t, cfg.CloudProviderRateLimitQPS, rateLimitReadQPS, 0.0001) + assert.InDelta(t, cfg.CloudProviderRateLimitBucket, rateLimitReadBuckets, 0.0001) + assert.InDelta(t, cfg.CloudProviderRateLimitQPSWrite, rateLimitReadQPS, 0.0001) + assert.InDelta(t, cfg.CloudProviderRateLimitBucketWrite, rateLimitReadBuckets, 0.0001) + assert.InDelta(t, cfg.InterfaceRateLimit.CloudProviderRateLimitQPS, rateLimitReadQPS, 0.0001) + assert.InDelta(t, cfg.InterfaceRateLimit.CloudProviderRateLimitBucket, rateLimitReadBuckets, 0.0001) + assert.InDelta(t, cfg.InterfaceRateLimit.CloudProviderRateLimitQPSWrite, rateLimitReadQPS, 0.0001) + assert.InDelta(t, cfg.InterfaceRateLimit.CloudProviderRateLimitBucketWrite, rateLimitReadBuckets, 0.0001) + assert.InDelta(t, cfg.VirtualMachineRateLimit.CloudProviderRateLimitQPS, rateLimitReadQPS, 0.0001) + assert.InDelta(t, cfg.VirtualMachineRateLimit.CloudProviderRateLimitBucket, rateLimitReadBuckets, 0.0001) + assert.InDelta(t, cfg.VirtualMachineRateLimit.CloudProviderRateLimitQPSWrite, rateLimitReadQPS, 0.0001) + assert.InDelta(t, cfg.VirtualMachineRateLimit.CloudProviderRateLimitBucketWrite, rateLimitReadBuckets, 0.0001) + assert.InDelta(t, cfg.StorageAccountRateLimit.CloudProviderRateLimitQPS, rateLimitReadQPS, 0.0001) + assert.InDelta(t, cfg.StorageAccountRateLimit.CloudProviderRateLimitBucket, rateLimitReadBuckets, 0.0001) + assert.InDelta(t, cfg.StorageAccountRateLimit.CloudProviderRateLimitQPSWrite, rateLimitReadQPS, 0.0001) + assert.InDelta(t, cfg.StorageAccountRateLimit.CloudProviderRateLimitBucketWrite, rateLimitReadBuckets, 0.0001) + assert.InDelta(t, cfg.VirtualMachineScaleSetRateLimit.CloudProviderRateLimitQPS, rateLimitReadQPS, 0.0001) + assert.InDelta(t, cfg.VirtualMachineScaleSetRateLimit.CloudProviderRateLimitBucket, rateLimitReadBuckets, 0.0001) + assert.InDelta(t, cfg.VirtualMachineScaleSetRateLimit.CloudProviderRateLimitQPSWrite, rateLimitReadQPS, 0.0001) + assert.InDelta(t, cfg.VirtualMachineScaleSetRateLimit.CloudProviderRateLimitBucketWrite, rateLimitReadBuckets, 0.0001) } -func TestOverrideDefaultRateLimitConfig(t *testing.T) { +func TestInitializeCloudProviderRateLimitConfigWithReadAndWriteRateLimitSettings(t *testing.T) { var rateLimitReadQPS float32 = 3.0 rateLimitReadBuckets := 10 var rateLimitWriteQPS float32 = 6.0 rateLimitWriteBuckets := 20 - defaultConfigWithRateLimits := &CloudProviderRateLimitConfig{ + cfg := &providerazureconfig.CloudProviderRateLimitConfig{ RateLimitConfig: azclients.RateLimitConfig{ CloudProviderRateLimitBucket: rateLimitReadBuckets, CloudProviderRateLimitBucketWrite: rateLimitWriteBuckets, @@ -193,28 +93,31 @@ func TestOverrideDefaultRateLimitConfig(t *testing.T) { }, } - configWithRateLimits := &CloudProviderRateLimitConfig{ - RateLimitConfig: azclients.RateLimitConfig{ - CloudProviderRateLimit: true, - CloudProviderRateLimitBucket: 0, - CloudProviderRateLimitBucketWrite: 0, - CloudProviderRateLimitQPS: 0, - CloudProviderRateLimitQPSWrite: 0, - }, - } - - newconfig := overrideDefaultRateLimitConfig(&defaultConfigWithRateLimits.RateLimitConfig, &configWithRateLimits.RateLimitConfig) - - assert.Equal(t, defaultConfigWithRateLimits.CloudProviderRateLimitQPS, newconfig.CloudProviderRateLimitQPS) - assert.Equal(t, defaultConfigWithRateLimits.CloudProviderRateLimitBucket, newconfig.CloudProviderRateLimitBucket) - assert.Equal(t, defaultConfigWithRateLimits.CloudProviderRateLimitQPSWrite, newconfig.CloudProviderRateLimitQPSWrite) - assert.Equal(t, defaultConfigWithRateLimits.CloudProviderRateLimitBucketWrite, newconfig.CloudProviderRateLimitBucketWrite) - - falseCloudProviderRateLimit := &CloudProviderRateLimitConfig{ - RateLimitConfig: azclients.RateLimitConfig{ - CloudProviderRateLimit: false, - }, - } - newconfig = overrideDefaultRateLimitConfig(&defaultConfigWithRateLimits.RateLimitConfig, &falseCloudProviderRateLimit.RateLimitConfig) - assert.Equal(t, &falseCloudProviderRateLimit.RateLimitConfig, newconfig) + //t.Setenv("RATE_LIMIT_READ_QPS", fmt.Sprintf("%.1f", rateLimitReadQPS) + //t.Setenv("RATE_LIMIT_READ_BUCKETS", fmt.Sprintf("%d", rateLimitReadBuckets) + //t.Setenv("RATE_LIMIT_WRITE_QPS", fmt.Sprintf("%.1f", rateLimitWriteQPS) + //t.Setenv("RATE_LIMIT_WRITE_BUCKETS", fmt.Sprintf("%d", rateLimitWriteBuckets) + + providerazureconfig.InitializeCloudProviderRateLimitConfig(cfg) + + assert.InDelta(t, cfg.CloudProviderRateLimitQPS, rateLimitReadQPS, 0.0001) + assert.InDelta(t, cfg.CloudProviderRateLimitBucket, rateLimitReadBuckets, 0.0001) + assert.InDelta(t, cfg.CloudProviderRateLimitQPSWrite, rateLimitWriteQPS, 0.0001) + assert.InDelta(t, cfg.CloudProviderRateLimitBucketWrite, rateLimitWriteBuckets, 0.0001) + assert.InDelta(t, cfg.InterfaceRateLimit.CloudProviderRateLimitQPS, rateLimitReadQPS, 0.0001) + assert.InDelta(t, cfg.InterfaceRateLimit.CloudProviderRateLimitBucket, rateLimitReadBuckets, 0.0001) + assert.InDelta(t, cfg.InterfaceRateLimit.CloudProviderRateLimitQPSWrite, rateLimitWriteQPS, 0.0001) + assert.InDelta(t, cfg.InterfaceRateLimit.CloudProviderRateLimitBucketWrite, rateLimitWriteBuckets, 0.0001) + assert.InDelta(t, cfg.VirtualMachineRateLimit.CloudProviderRateLimitQPS, rateLimitReadQPS, 0.0001) + assert.InDelta(t, cfg.VirtualMachineRateLimit.CloudProviderRateLimitBucket, rateLimitReadBuckets, 0.0001) + assert.InDelta(t, cfg.VirtualMachineRateLimit.CloudProviderRateLimitQPSWrite, rateLimitWriteQPS, 0.0001) + assert.InDelta(t, cfg.VirtualMachineRateLimit.CloudProviderRateLimitBucketWrite, rateLimitWriteBuckets, 0.0001) + assert.InDelta(t, cfg.StorageAccountRateLimit.CloudProviderRateLimitQPS, rateLimitReadQPS, 0.0001) + assert.InDelta(t, cfg.StorageAccountRateLimit.CloudProviderRateLimitBucket, rateLimitReadBuckets, 0.0001) + assert.InDelta(t, cfg.StorageAccountRateLimit.CloudProviderRateLimitQPSWrite, rateLimitWriteQPS, 0.0001) + assert.InDelta(t, cfg.StorageAccountRateLimit.CloudProviderRateLimitBucketWrite, rateLimitWriteBuckets, 0.0001) + assert.InDelta(t, cfg.VirtualMachineScaleSetRateLimit.CloudProviderRateLimitQPS, rateLimitReadQPS, 0.0001) + assert.InDelta(t, cfg.VirtualMachineScaleSetRateLimit.CloudProviderRateLimitBucket, rateLimitReadBuckets, 0.0001) + assert.InDelta(t, cfg.VirtualMachineScaleSetRateLimit.CloudProviderRateLimitQPSWrite, rateLimitWriteQPS, 0.0001) + assert.InDelta(t, cfg.VirtualMachineScaleSetRateLimit.CloudProviderRateLimitBucketWrite, rateLimitWriteBuckets, 0.0001) } diff --git a/cluster-autoscaler/cloudprovider/azure/azure_fakes.go b/cluster-autoscaler/cloudprovider/azure/azure_fakes.go index 481fcaacf837..adc38e80b39a 100644 --- a/cluster-autoscaler/cloudprovider/azure/azure_fakes.go +++ b/cluster-autoscaler/cloudprovider/azure/azure_fakes.go @@ -19,12 +19,12 @@ package azure import ( "context" "fmt" - "net/http" "sync" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2017-05-10/resources" "github.com/stretchr/testify/mock" + "sigs.k8s.io/cloud-provider-azure/pkg/retry" ) const ( @@ -32,8 +32,8 @@ const ( fakeVirtualMachineVMID = "/subscriptions/test-subscription-id/resourceGroups/test-asg/providers/Microsoft.Compute/virtualMachines/%d" ) -// DeploymentsClientMock mocks for DeploymentsClient. -type DeploymentsClientMock struct { +// DeploymentClientMock mocks for DeploymentsClient. +type DeploymentClientMock struct { mock.Mock mutex sync.Mutex @@ -41,26 +41,26 @@ type DeploymentsClientMock struct { } // Get gets the DeploymentExtended by deploymentName. -func (m *DeploymentsClientMock) Get(ctx context.Context, resourceGroupName string, deploymentName string) (result resources.DeploymentExtended, err error) { +func (m *DeploymentClientMock) Get(ctx context.Context, resourceGroupName string, deploymentName string) (result resources.DeploymentExtended, err *retry.Error) { m.mutex.Lock() defer m.mutex.Unlock() deploy, ok := m.FakeStore[deploymentName] if !ok { - return result, fmt.Errorf("deployment not found") + return result, retry.NewError(false, fmt.Errorf("deployment not found")) } return deploy, nil } // ExportTemplate exports the deployment's template. -func (m *DeploymentsClientMock) ExportTemplate(ctx context.Context, resourceGroupName string, deploymentName string) (result resources.DeploymentExportResult, err error) { +func (m *DeploymentClientMock) ExportTemplate(ctx context.Context, resourceGroupName string, deploymentName string) (result resources.DeploymentExportResult, err *retry.Error) { m.mutex.Lock() defer m.mutex.Unlock() deploy, ok := m.FakeStore[deploymentName] if !ok { - return result, fmt.Errorf("deployment not found") + return result, retry.NewError(false, fmt.Errorf("deployment not found")) } return resources.DeploymentExportResult{ @@ -69,7 +69,7 @@ func (m *DeploymentsClientMock) ExportTemplate(ctx context.Context, resourceGrou } // CreateOrUpdate creates or updates the Deployment. -func (m *DeploymentsClientMock) CreateOrUpdate(ctx context.Context, resourceGroupName string, deploymentName string, parameters resources.Deployment) (resp *http.Response, err error) { +func (m *DeploymentClientMock) CreateOrUpdate(ctx context.Context, resourceGroupName string, deploymentName string, parameters resources.Deployment, etag string) (err *retry.Error) { m.mutex.Lock() defer m.mutex.Unlock() @@ -83,11 +83,11 @@ func (m *DeploymentsClientMock) CreateOrUpdate(ctx context.Context, resourceGrou deploy.Properties.Parameters = parameters.Properties.Parameters deploy.Properties.Template = parameters.Properties.Template - return &http.Response{StatusCode: 200}, nil + return nil } // List gets all the deployments for a resource group. -func (m *DeploymentsClientMock) List(ctx context.Context, resourceGroupName, filter string, top *int32) (result []resources.DeploymentExtended, err error) { +func (m *DeploymentClientMock) List(ctx context.Context, resourceGroupName string) (result []resources.DeploymentExtended, err *retry.Error) { m.mutex.Lock() defer m.mutex.Unlock() @@ -100,12 +100,12 @@ func (m *DeploymentsClientMock) List(ctx context.Context, resourceGroupName, fil } // Delete deletes the given deployment -func (m *DeploymentsClientMock) Delete(ctx context.Context, resourceGroupName, deploymentName string) (resp *http.Response, err error) { +func (m *DeploymentClientMock) Delete(ctx context.Context, resourceGroupName, deploymentName string) (err *retry.Error) { m.mutex.Lock() defer m.mutex.Unlock() if _, ok := m.FakeStore[deploymentName]; !ok { - return nil, fmt.Errorf("there is no such a deployment with name %s", deploymentName) + return retry.NewError(false, fmt.Errorf("there is no such a deployment with name %s", deploymentName)) } delete(m.FakeStore, deploymentName) diff --git a/cluster-autoscaler/cloudprovider/azure/azure_manager.go b/cluster-autoscaler/cloudprovider/azure/azure_manager.go index 717ec86a402d..4a60051ffe18 100644 --- a/cluster-autoscaler/cloudprovider/azure/azure_manager.go +++ b/cluster-autoscaler/cloudprovider/azure/azure_manager.go @@ -32,15 +32,13 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/config/dynamic" kretry "k8s.io/client-go/util/retry" klog "k8s.io/klog/v2" + providerazureconsts "sigs.k8s.io/cloud-provider-azure/pkg/consts" "sigs.k8s.io/cloud-provider-azure/pkg/retry" ) const ( azurePrefix = "azure://" - vmTypeVMSS = "vmss" - vmTypeStandard = "standard" - scaleToZeroSupportedStandard = false scaleToZeroSupportedVMSS = true refreshInterval = 1 * time.Minute @@ -103,8 +101,8 @@ func createAzureManagerInternal(configReader io.Reader, discoveryOpts cloudprovi } cacheTTL := refreshInterval - if cfg.VmssCacheTTL != 0 { - cacheTTL = time.Duration(cfg.VmssCacheTTL) * time.Second + if cfg.VmssCacheTTLInSeconds != 0 { + cacheTTL = time.Duration(cfg.VmssCacheTTLInSeconds) * time.Second } cache, err := newAzureCache(azClient, cacheTTL, *cfg) if err != nil { @@ -166,7 +164,7 @@ func (m *AzureManager) fetchExplicitNodeGroups(specs []string) error { func (m *AzureManager) buildNodeGroupFromSpec(spec string) (cloudprovider.NodeGroup, error) { scaleToZeroSupported := scaleToZeroSupportedStandard - if strings.EqualFold(m.config.VMType, vmTypeVMSS) { + if strings.EqualFold(m.config.VMType, providerazureconsts.VMTypeVMSS) { scaleToZeroSupported = scaleToZeroSupportedVMSS } s, err := dynamic.SpecFromString(spec, scaleToZeroSupported) @@ -179,9 +177,9 @@ func (m *AzureManager) buildNodeGroupFromSpec(spec string) (cloudprovider.NodeGr } switch m.config.VMType { - case vmTypeStandard: + case providerazureconsts.VMTypeStandard: return NewAgentPool(s, m) - case vmTypeVMSS: + case providerazureconsts.VMTypeVMSS: return NewScaleSet(s, m, -1, false) default: return nil, fmt.Errorf("vmtype %s not supported", m.config.VMType) @@ -310,7 +308,7 @@ func (m *AzureManager) getFilteredNodeGroups(filter []labelAutoDiscoveryConfig) return nil, nil } - if m.config.VMType == vmTypeVMSS { + if m.config.VMType == providerazureconsts.VMTypeVMSS { return m.getFilteredScaleSets(filter) } diff --git a/cluster-autoscaler/cloudprovider/azure/azure_manager_test.go b/cluster-autoscaler/cloudprovider/azure/azure_manager_test.go index 8c7fd01df4b8..0fefb34400ce 100644 --- a/cluster-autoscaler/cloudprovider/azure/azure_manager_test.go +++ b/cluster-autoscaler/cloudprovider/azure/azure_manager_test.go @@ -24,6 +24,7 @@ import ( "time" "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmclient/mockvmclient" + "sigs.k8s.io/cloud-provider-azure/pkg/retry" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2017-05-10/resources" @@ -33,9 +34,13 @@ import ( "go.uber.org/mock/gomock" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" "k8s.io/autoscaler/cluster-autoscaler/config" + azclient "sigs.k8s.io/cloud-provider-azure/pkg/azclient" azclients "sigs.k8s.io/cloud-provider-azure/pkg/azureclients" "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssclient/mockvmssclient" "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssvmclient/mockvmssvmclient" + providerazureconsts "sigs.k8s.io/cloud-provider-azure/pkg/consts" + providerazure "sigs.k8s.io/cloud-provider-azure/pkg/provider" + providerazureconfig "sigs.k8s.io/cloud-provider-azure/pkg/provider/config" ) const validAzureCfg = `{ @@ -51,6 +56,29 @@ const validAzureCfg = `{ "vnetName": "fakeName", "routeTableName": "fakeName", "primaryAvailabilitySetName": "fakeName", + "vmssCacheTTLInSeconds": 60, + "vmssVirtualMachinesCacheTTLInSeconds": 240, + "vmssVmsCacheJitter": 120, + "maxDeploymentsCount": 8, + "cloudProviderRateLimit": false, + "routeRateLimit": { + "cloudProviderRateLimit": true, + "cloudProviderRateLimitQPS": 3 + } +}` + +const validAzureCfgLegacy = `{ + "cloud": "AzurePublicCloud", + "tenantId": "fakeId", + "subscriptionId": "fakeId", + "resourceGroup": "fakeId", + "location": "southeastasia", + "useWorkloadIdentityExtension": true, + "subnetName": "fakeName", + "securityGroupName": "fakeName", + "vnetName": "fakeName", + "routeTableName": "fakeName", + "primaryAvailabilitySetName": "fakeName", "vmssCacheTTL": 60, "vmssVmsCacheTTL": 240, "vmssVmsCacheJitter": 120, @@ -76,8 +104,8 @@ const validAzureCfgForStandardVMType = `{ "vnetName": "fakeName", "routeTableName": "fakeName", "primaryAvailabilitySetName": "fakeName", - "vmssCacheTTL": 60, - "vmssVmsCacheTTL": 240, + "vmssCacheTTLInSeconds": 60, + "vmssVirtualMachinesCacheTTLInSeconds": 240, "vmssVmsCacheJitter": 120, "maxDeploymentsCount": 8, "cloudProviderRateLimit": false, @@ -111,29 +139,57 @@ const validAzureCfgForStandardVMType = `{ }` const validAzureCfgForStandardVMTypeWithoutDeploymentParameters = `{ - "cloud": "AzurePublicCloud", - "tenantId": "fakeId", - "subscriptionId": "fakeId", - "aadClientId": "fakeId", - "aadClientSecret": "fakeId", - "resourceGroup": "fakeId", - "vmType":"standard", - "location": "southeastasia", - "subnetName": "fakeName", - "securityGroupName": "fakeName", - "vnetName": "fakeName", - "routeTableName": "fakeName", - "primaryAvailabilitySetName": "fakeName", - "vmssCacheTTL": 60, - "vmssVmsCacheTTL": 240, + "cloud": "AzurePublicCloud", + "tenantId": "fakeId", + "subscriptionId": "fakeId", + "aadClientId": "fakeId", + "aadClientSecret": "fakeId", + "resourceGroup": "fakeId", + "vmType":"standard", + "location": "southeastasia", + "subnetName": "fakeName", + "securityGroupName": "fakeName", + "vnetName": "fakeName", + "routeTableName": "fakeName", + "primaryAvailabilitySetName": "fakeName", + "vmssCacheTTLInSeconds": 60, + "vmssVirtualMachinesCacheTTLInSeconds": 240, "vmssVmsCacheJitter": 120, - "maxDeploymentsCount": 8, - "cloudProviderRateLimit": false, - "routeRateLimit": { - "cloudProviderRateLimit": true, - "cloudProviderRateLimitQPS": 3 - }, - "deployment":"cluster-autoscaler-0001" + "maxDeploymentsCount": 8, + "cloudProviderRateLimit": false, + "routeRateLimit": { + "cloudProviderRateLimit": true, + "cloudProviderRateLimitQPS": 3 + }, + "deployment":"cluster-autoscaler-0001" +}` + +const validAzureCfgForVMsPool = `{ + "cloud": "AzurePublicCloud", + "tenantId": "fakeId", + "subscriptionId": "fakeId", + "aadClientId": "fakeId", + "aadClientSecret": "fakeId", + "resourceGroup": "fakeId", + "location": "southeastasia", + "subnetName": "fakeName", + "securityGroupName": "fakeName", + "vnetName": "fakeName", + "routeTableName": "fakeName", + "primaryAvailabilitySetName": "fakeName", + "vmssCacheTTLInSeconds": 60, + "vmssVirtualMachinesCacheTTLInSeconds": 240, + "vmssVmsCacheJitter": 120, + "maxDeploymentsCount": 8, + "cloudProviderRateLimit": false, + "routeRateLimit": { + "cloudProviderRateLimit": true, + "cloudProviderRateLimitQPS": 3 + }, + + "clusterName": "mycluster", + "clusterResourceGroup": "myrg", + "armBaseURLForAPClient": "nodeprovisioner-svc.nodeprovisioner.svc.cluster.local" }` const ( @@ -155,73 +211,157 @@ func TestCreateAzureManagerValidConfig(t *testing.T) { manager, err := createAzureManagerInternal(strings.NewReader(validAzureCfg), cloudprovider.NodeGroupDiscoveryOptions{}, mockAzClient) expectedConfig := &Config{ - Cloud: "AzurePublicCloud", - Location: "southeastasia", - TenantID: "fakeId", - SubscriptionID: "fakeId", - ResourceGroup: "fakeId", - VMType: "vmss", - AADClientID: "fakeId", - AADClientSecret: "fakeId", - VmssCacheTTL: 60, - VmssVmsCacheTTL: 240, - VmssVmsCacheJitter: 120, - MaxDeploymentsCount: 8, - CloudProviderRateLimitConfig: CloudProviderRateLimitConfig{ - RateLimitConfig: azclients.RateLimitConfig{ - CloudProviderRateLimit: false, - CloudProviderRateLimitBucket: 5, - CloudProviderRateLimitBucketWrite: 5, - CloudProviderRateLimitQPS: 1, - CloudProviderRateLimitQPSWrite: 1, - }, - InterfaceRateLimit: &azclients.RateLimitConfig{ - CloudProviderRateLimit: false, - CloudProviderRateLimitBucket: 5, - CloudProviderRateLimitBucketWrite: 5, - CloudProviderRateLimitQPS: 1, - CloudProviderRateLimitQPSWrite: 1, - }, - VirtualMachineRateLimit: &azclients.RateLimitConfig{ - CloudProviderRateLimit: false, - CloudProviderRateLimitBucket: 5, - CloudProviderRateLimitBucketWrite: 5, - CloudProviderRateLimitQPS: 1, - CloudProviderRateLimitQPSWrite: 1, - }, - StorageAccountRateLimit: &azclients.RateLimitConfig{ - CloudProviderRateLimit: false, - CloudProviderRateLimitBucket: 5, - CloudProviderRateLimitBucketWrite: 5, - CloudProviderRateLimitQPS: 1, - CloudProviderRateLimitQPSWrite: 1, + Config: providerazure.Config{ + AzureAuthConfig: providerazureconfig.AzureAuthConfig{ + ARMClientConfig: azclient.ARMClientConfig{ + Cloud: "AzurePublicCloud", + TenantID: "fakeId", + }, + AzureAuthConfig: azclient.AzureAuthConfig{ + AADClientID: "fakeId", + AADClientSecret: "fakeId", + }, + SubscriptionID: "fakeId", }, - DiskRateLimit: &azclients.RateLimitConfig{ - CloudProviderRateLimit: false, - CloudProviderRateLimitBucket: 5, - CloudProviderRateLimitBucketWrite: 5, - CloudProviderRateLimitQPS: 1, - CloudProviderRateLimitQPSWrite: 1, + Location: "southeastasia", + ResourceGroup: "fakeId", + VMType: "vmss", + VmssCacheTTLInSeconds: 60, + VmssVirtualMachinesCacheTTLInSeconds: 240, + CloudProviderRateLimitConfig: providerazureconfig.CloudProviderRateLimitConfig{ + RateLimitConfig: azclients.RateLimitConfig{ + CloudProviderRateLimit: false, + CloudProviderRateLimitBucket: 5, + CloudProviderRateLimitBucketWrite: 5, + CloudProviderRateLimitQPS: 1, + CloudProviderRateLimitQPSWrite: 1, + }, + InterfaceRateLimit: &azclients.RateLimitConfig{ + CloudProviderRateLimit: false, + CloudProviderRateLimitBucket: 5, + CloudProviderRateLimitBucketWrite: 5, + CloudProviderRateLimitQPS: 1, + CloudProviderRateLimitQPSWrite: 1, + }, + VirtualMachineRateLimit: &azclients.RateLimitConfig{ + CloudProviderRateLimit: false, + CloudProviderRateLimitBucket: 5, + CloudProviderRateLimitBucketWrite: 5, + CloudProviderRateLimitQPS: 1, + CloudProviderRateLimitQPSWrite: 1, + }, + StorageAccountRateLimit: &azclients.RateLimitConfig{ + CloudProviderRateLimit: false, + CloudProviderRateLimitBucket: 5, + CloudProviderRateLimitBucketWrite: 5, + CloudProviderRateLimitQPS: 1, + CloudProviderRateLimitQPSWrite: 1, + }, + DiskRateLimit: &azclients.RateLimitConfig{ + CloudProviderRateLimit: false, + CloudProviderRateLimitBucket: 5, + CloudProviderRateLimitBucketWrite: 5, + CloudProviderRateLimitQPS: 1, + CloudProviderRateLimitQPSWrite: 1, + }, + VirtualMachineScaleSetRateLimit: &azclients.RateLimitConfig{ + CloudProviderRateLimit: false, + CloudProviderRateLimitBucket: 5, + CloudProviderRateLimitBucketWrite: 5, + CloudProviderRateLimitQPS: 1, + CloudProviderRateLimitQPSWrite: 1, + }, }, - VirtualMachineScaleSetRateLimit: &azclients.RateLimitConfig{ - CloudProviderRateLimit: false, - CloudProviderRateLimitBucket: 5, - CloudProviderRateLimitBucketWrite: 5, - CloudProviderRateLimitQPS: 1, - CloudProviderRateLimitQPSWrite: 1, + }, + VmssVmsCacheJitter: 120, + MaxDeploymentsCount: 8, + } + + assert.NoError(t, err) + assertStructsMinimallyEqual(t, *expectedConfig, *manager.config) +} + +func TestCreateAzureManagerLegacyConfig(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + mockVMClient := mockvmclient.NewMockInterface(ctrl) + mockVMSSClient := mockvmssclient.NewMockInterface(ctrl) + mockVMSSClient.EXPECT().List(gomock.Any(), "fakeId").Return([]compute.VirtualMachineScaleSet{}, nil).Times(2) + mockVMClient.EXPECT().List(gomock.Any(), "fakeId").Return([]compute.VirtualMachine{}, nil).Times(2) + mockAzClient := &azClient{ + virtualMachinesClient: mockVMClient, + virtualMachineScaleSetsClient: mockVMSSClient, + } + manager, err := createAzureManagerInternal(strings.NewReader(validAzureCfgLegacy), cloudprovider.NodeGroupDiscoveryOptions{}, mockAzClient) + + expectedConfig := &Config{ + Config: providerazure.Config{ + AzureAuthConfig: providerazureconfig.AzureAuthConfig{ + ARMClientConfig: azclient.ARMClientConfig{ + Cloud: "AzurePublicCloud", + TenantID: "fakeId", + }, + AzureAuthConfig: azclient.AzureAuthConfig{ + UseFederatedWorkloadIdentityExtension: true, + }, + SubscriptionID: "fakeId", }, - KubernetesServiceRateLimit: &azclients.RateLimitConfig{ - CloudProviderRateLimit: false, - CloudProviderRateLimitBucket: 5, - CloudProviderRateLimitBucketWrite: 5, - CloudProviderRateLimitQPS: 1, - CloudProviderRateLimitQPSWrite: 1, + Location: "southeastasia", + ResourceGroup: "fakeId", + VMType: "vmss", + VmssCacheTTLInSeconds: 60, + VmssVirtualMachinesCacheTTLInSeconds: 240, + CloudProviderRateLimitConfig: providerazureconfig.CloudProviderRateLimitConfig{ + RateLimitConfig: azclients.RateLimitConfig{ + CloudProviderRateLimit: false, + CloudProviderRateLimitBucket: 5, + CloudProviderRateLimitBucketWrite: 5, + CloudProviderRateLimitQPS: 1, + CloudProviderRateLimitQPSWrite: 1, + }, + InterfaceRateLimit: &azclients.RateLimitConfig{ + CloudProviderRateLimit: false, + CloudProviderRateLimitBucket: 5, + CloudProviderRateLimitBucketWrite: 5, + CloudProviderRateLimitQPS: 1, + CloudProviderRateLimitQPSWrite: 1, + }, + VirtualMachineRateLimit: &azclients.RateLimitConfig{ + CloudProviderRateLimit: false, + CloudProviderRateLimitBucket: 5, + CloudProviderRateLimitBucketWrite: 5, + CloudProviderRateLimitQPS: 1, + CloudProviderRateLimitQPSWrite: 1, + }, + StorageAccountRateLimit: &azclients.RateLimitConfig{ + CloudProviderRateLimit: false, + CloudProviderRateLimitBucket: 5, + CloudProviderRateLimitBucketWrite: 5, + CloudProviderRateLimitQPS: 1, + CloudProviderRateLimitQPSWrite: 1, + }, + DiskRateLimit: &azclients.RateLimitConfig{ + CloudProviderRateLimit: false, + CloudProviderRateLimitBucket: 5, + CloudProviderRateLimitBucketWrite: 5, + CloudProviderRateLimitQPS: 1, + CloudProviderRateLimitQPSWrite: 1, + }, + VirtualMachineScaleSetRateLimit: &azclients.RateLimitConfig{ + CloudProviderRateLimit: false, + CloudProviderRateLimitBucket: 5, + CloudProviderRateLimitBucketWrite: 5, + CloudProviderRateLimitQPS: 1, + CloudProviderRateLimitQPSWrite: 1, + }, }, }, + VmssVmsCacheJitter: 120, + MaxDeploymentsCount: 8, } assert.NoError(t, err) - assert.Equal(t, true, reflect.DeepEqual(*expectedConfig, *manager.config), "unexpected azure manager configuration") + assertStructsMinimallyEqual(t, *expectedConfig, *manager.config) } func TestCreateAzureManagerValidConfigForStandardVMType(t *testing.T) { @@ -238,70 +378,71 @@ func TestCreateAzureManagerValidConfigForStandardVMType(t *testing.T) { manager, err := createAzureManagerInternal(strings.NewReader(validAzureCfgForStandardVMType), cloudprovider.NodeGroupDiscoveryOptions{}, mockAzClient) expectedConfig := &Config{ - Cloud: "AzurePublicCloud", - Location: "southeastasia", - TenantID: "fakeId", - SubscriptionID: "fakeId", - ResourceGroup: "fakeId", - VMType: "standard", - AADClientID: "fakeId", - AADClientSecret: "fakeId", - VmssCacheTTL: 60, - VmssVmsCacheTTL: 240, - VmssVmsCacheJitter: 120, - MaxDeploymentsCount: 8, - CloudProviderRateLimitConfig: CloudProviderRateLimitConfig{ - RateLimitConfig: azclients.RateLimitConfig{ - CloudProviderRateLimit: false, - CloudProviderRateLimitBucket: 5, - CloudProviderRateLimitBucketWrite: 5, - CloudProviderRateLimitQPS: 1, - CloudProviderRateLimitQPSWrite: 1, - }, - InterfaceRateLimit: &azclients.RateLimitConfig{ - CloudProviderRateLimit: false, - CloudProviderRateLimitBucket: 5, - CloudProviderRateLimitBucketWrite: 5, - CloudProviderRateLimitQPS: 1, - CloudProviderRateLimitQPSWrite: 1, - }, - VirtualMachineRateLimit: &azclients.RateLimitConfig{ - CloudProviderRateLimit: false, - CloudProviderRateLimitBucket: 5, - CloudProviderRateLimitBucketWrite: 5, - CloudProviderRateLimitQPS: 1, - CloudProviderRateLimitQPSWrite: 1, - }, - StorageAccountRateLimit: &azclients.RateLimitConfig{ - CloudProviderRateLimit: false, - CloudProviderRateLimitBucket: 5, - CloudProviderRateLimitBucketWrite: 5, - CloudProviderRateLimitQPS: 1, - CloudProviderRateLimitQPSWrite: 1, - }, - DiskRateLimit: &azclients.RateLimitConfig{ - CloudProviderRateLimit: false, - CloudProviderRateLimitBucket: 5, - CloudProviderRateLimitBucketWrite: 5, - CloudProviderRateLimitQPS: 1, - CloudProviderRateLimitQPSWrite: 1, - }, - VirtualMachineScaleSetRateLimit: &azclients.RateLimitConfig{ - CloudProviderRateLimit: false, - CloudProviderRateLimitBucket: 5, - CloudProviderRateLimitBucketWrite: 5, - CloudProviderRateLimitQPS: 1, - CloudProviderRateLimitQPSWrite: 1, + Config: providerazure.Config{ + AzureAuthConfig: providerazureconfig.AzureAuthConfig{ + ARMClientConfig: azclient.ARMClientConfig{ + Cloud: "AzurePublicCloud", + TenantID: "fakeId", + }, + AzureAuthConfig: azclient.AzureAuthConfig{ + AADClientID: "fakeId", + AADClientSecret: "fakeId", + }, + SubscriptionID: "fakeId", }, - KubernetesServiceRateLimit: &azclients.RateLimitConfig{ - CloudProviderRateLimit: false, - CloudProviderRateLimitBucket: 5, - CloudProviderRateLimitBucketWrite: 5, - CloudProviderRateLimitQPS: 1, - CloudProviderRateLimitQPSWrite: 1, + Location: "southeastasia", + ResourceGroup: "fakeId", + VMType: "standard", + VmssCacheTTLInSeconds: 60, + VmssVirtualMachinesCacheTTLInSeconds: 240, + CloudProviderRateLimitConfig: providerazureconfig.CloudProviderRateLimitConfig{ + RateLimitConfig: azclients.RateLimitConfig{ + CloudProviderRateLimit: false, + CloudProviderRateLimitBucket: 5, + CloudProviderRateLimitBucketWrite: 5, + CloudProviderRateLimitQPS: 1, + CloudProviderRateLimitQPSWrite: 1, + }, + InterfaceRateLimit: &azclients.RateLimitConfig{ + CloudProviderRateLimit: false, + CloudProviderRateLimitBucket: 5, + CloudProviderRateLimitBucketWrite: 5, + CloudProviderRateLimitQPS: 1, + CloudProviderRateLimitQPSWrite: 1, + }, + VirtualMachineRateLimit: &azclients.RateLimitConfig{ + CloudProviderRateLimit: false, + CloudProviderRateLimitBucket: 5, + CloudProviderRateLimitBucketWrite: 5, + CloudProviderRateLimitQPS: 1, + CloudProviderRateLimitQPSWrite: 1, + }, + StorageAccountRateLimit: &azclients.RateLimitConfig{ + CloudProviderRateLimit: false, + CloudProviderRateLimitBucket: 5, + CloudProviderRateLimitBucketWrite: 5, + CloudProviderRateLimitQPS: 1, + CloudProviderRateLimitQPSWrite: 1, + }, + DiskRateLimit: &azclients.RateLimitConfig{ + CloudProviderRateLimit: false, + CloudProviderRateLimitBucket: 5, + CloudProviderRateLimitBucketWrite: 5, + CloudProviderRateLimitQPS: 1, + CloudProviderRateLimitQPSWrite: 1, + }, + VirtualMachineScaleSetRateLimit: &azclients.RateLimitConfig{ + CloudProviderRateLimit: false, + CloudProviderRateLimitBucket: 5, + CloudProviderRateLimitBucketWrite: 5, + CloudProviderRateLimitQPS: 1, + CloudProviderRateLimitQPSWrite: 1, + }, }, }, - Deployment: "cluster-autoscaler-0001", + VmssVmsCacheJitter: 120, + MaxDeploymentsCount: 8, + Deployment: "cluster-autoscaler-0001", DeploymentParameters: map[string]interface{}{ "Name": "cluster-autoscaler-0001", "Properties": map[string]interface{}{ @@ -327,7 +468,7 @@ func TestCreateAzureManagerValidConfigForStandardVMType(t *testing.T) { } assert.NoError(t, err) - assert.Equal(t, *expectedConfig, *manager.config, "unexpected azure manager configuration, expected: %v, actual: %v", *expectedConfig, *manager.config) + assertStructsMinimallyEqual(t, *expectedConfig, *manager.config) } func TestCreateAzureManagerValidConfigForStandardVMTypeWithoutDeploymentParameters(t *testing.T) { @@ -336,6 +477,92 @@ func TestCreateAzureManagerValidConfigForStandardVMTypeWithoutDeploymentParamete assert.Nil(t, manager) assert.Equal(t, expectedErr, err.Error(), "return error does not match, expected: %v, actual: %v", expectedErr, err.Error()) } +func TestCreateAzureManagerValidConfigForVMsPool(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + mockVMClient := mockvmclient.NewMockInterface(ctrl) + mockVMSSClient := mockvmssclient.NewMockInterface(ctrl) + mockVMSSClient.EXPECT().List(gomock.Any(), "fakeId").Return([]compute.VirtualMachineScaleSet{}, nil).Times(2) + mockVMClient.EXPECT().List(gomock.Any(), "fakeId").Return([]compute.VirtualMachine{}, nil).Times(2) + mockAzClient := &azClient{ + virtualMachinesClient: mockVMClient, + virtualMachineScaleSetsClient: mockVMSSClient, + } + manager, err := createAzureManagerInternal(strings.NewReader(validAzureCfgForVMsPool), cloudprovider.NodeGroupDiscoveryOptions{}, mockAzClient) + + expectedConfig := &Config{ + Config: providerazure.Config{ + AzureAuthConfig: providerazureconfig.AzureAuthConfig{ + ARMClientConfig: azclient.ARMClientConfig{ + Cloud: "AzurePublicCloud", + TenantID: "fakeId", + }, + AzureAuthConfig: azclient.AzureAuthConfig{ + AADClientID: "fakeId", + AADClientSecret: "fakeId", + }, + SubscriptionID: "fakeId", + }, + Location: "southeastasia", + ResourceGroup: "fakeId", + VMType: "vmss", + VmssCacheTTLInSeconds: 60, + VmssVirtualMachinesCacheTTLInSeconds: 240, + CloudProviderRateLimitConfig: providerazureconfig.CloudProviderRateLimitConfig{ + RateLimitConfig: azclients.RateLimitConfig{ + CloudProviderRateLimit: false, + CloudProviderRateLimitBucket: 5, + CloudProviderRateLimitBucketWrite: 5, + CloudProviderRateLimitQPS: 1, + CloudProviderRateLimitQPSWrite: 1, + }, + InterfaceRateLimit: &azclients.RateLimitConfig{ + CloudProviderRateLimit: false, + CloudProviderRateLimitBucket: 5, + CloudProviderRateLimitBucketWrite: 5, + CloudProviderRateLimitQPS: 1, + CloudProviderRateLimitQPSWrite: 1, + }, + VirtualMachineRateLimit: &azclients.RateLimitConfig{ + CloudProviderRateLimit: false, + CloudProviderRateLimitBucket: 5, + CloudProviderRateLimitBucketWrite: 5, + CloudProviderRateLimitQPS: 1, + CloudProviderRateLimitQPSWrite: 1, + }, + StorageAccountRateLimit: &azclients.RateLimitConfig{ + CloudProviderRateLimit: false, + CloudProviderRateLimitBucket: 5, + CloudProviderRateLimitBucketWrite: 5, + CloudProviderRateLimitQPS: 1, + CloudProviderRateLimitQPSWrite: 1, + }, + DiskRateLimit: &azclients.RateLimitConfig{ + CloudProviderRateLimit: false, + CloudProviderRateLimitBucket: 5, + CloudProviderRateLimitBucketWrite: 5, + CloudProviderRateLimitQPS: 1, + CloudProviderRateLimitQPSWrite: 1, + }, + VirtualMachineScaleSetRateLimit: &azclients.RateLimitConfig{ + CloudProviderRateLimit: false, + CloudProviderRateLimitBucket: 5, + CloudProviderRateLimitBucketWrite: 5, + CloudProviderRateLimitQPS: 1, + CloudProviderRateLimitQPSWrite: 1, + }, + }, + }, + VmssVmsCacheJitter: 120, + MaxDeploymentsCount: 8, + ClusterName: "mycluster", + ClusterResourceGroup: "myrg", + ARMBaseURLForAPClient: "nodeprovisioner-svc.nodeprovisioner.svc.cluster.local", + } + + assert.NoError(t, err) + assertStructsMinimallyEqual(t, *expectedConfig, *manager.config) +} func TestCreateAzureManagerWithNilConfig(t *testing.T) { ctrl := gomock.NewController(t) @@ -350,83 +577,83 @@ func TestCreateAzureManagerWithNilConfig(t *testing.T) { } expectedConfig := &Config{ - Cloud: "AzurePublicCloud", - Location: "southeastasia", - TenantID: "tenantId", - SubscriptionID: "subscriptionId", - ResourceGroup: "resourceGroup", - ClusterName: "mycluster", - ClusterResourceGroup: "myrg", - ARMBaseURLForAPClient: "nodeprovisioner-svc.nodeprovisioner.svc.cluster.local", - VMType: "vmss", - AADClientID: "aadClientId", - AADClientSecret: "aadClientSecret", - AADClientCertPath: "aadClientCertPath", - AADClientCertPassword: "aadClientCertPassword", - Deployment: "deployment", - UseManagedIdentityExtension: true, - UserAssignedIdentityID: "UserAssignedIdentityID", - VmssCacheTTL: 100, - VmssVmsCacheTTL: 110, - VmssVmsCacheJitter: 90, - GetVmssSizeRefreshPeriod: 30, - MaxDeploymentsCount: 8, - CloudProviderBackoff: true, - CloudProviderBackoffRetries: 1, - CloudProviderBackoffExponent: 1, - CloudProviderBackoffDuration: 1, - CloudProviderBackoffJitter: 1, - CloudProviderRateLimitConfig: CloudProviderRateLimitConfig{ - RateLimitConfig: azclients.RateLimitConfig{ - CloudProviderRateLimit: true, - CloudProviderRateLimitBucket: 5, - CloudProviderRateLimitBucketWrite: 5, - CloudProviderRateLimitQPS: 1, - CloudProviderRateLimitQPSWrite: 1, - }, - InterfaceRateLimit: &azclients.RateLimitConfig{ - CloudProviderRateLimit: true, - CloudProviderRateLimitBucket: 5, - CloudProviderRateLimitBucketWrite: 5, - CloudProviderRateLimitQPS: 1, - CloudProviderRateLimitQPSWrite: 1, - }, - VirtualMachineRateLimit: &azclients.RateLimitConfig{ - CloudProviderRateLimit: true, - CloudProviderRateLimitBucket: 5, - CloudProviderRateLimitBucketWrite: 5, - CloudProviderRateLimitQPS: 1, - CloudProviderRateLimitQPSWrite: 1, - }, - StorageAccountRateLimit: &azclients.RateLimitConfig{ - CloudProviderRateLimit: true, - CloudProviderRateLimitBucket: 5, - CloudProviderRateLimitBucketWrite: 5, - CloudProviderRateLimitQPS: 1, - CloudProviderRateLimitQPSWrite: 1, - }, - DiskRateLimit: &azclients.RateLimitConfig{ - CloudProviderRateLimit: true, - CloudProviderRateLimitBucket: 5, - CloudProviderRateLimitBucketWrite: 5, - CloudProviderRateLimitQPS: 1, - CloudProviderRateLimitQPSWrite: 1, - }, - VirtualMachineScaleSetRateLimit: &azclients.RateLimitConfig{ - CloudProviderRateLimit: true, - CloudProviderRateLimitBucket: 5, - CloudProviderRateLimitBucketWrite: 5, - CloudProviderRateLimitQPS: 1, - CloudProviderRateLimitQPSWrite: 1, + Config: providerazure.Config{ + AzureAuthConfig: providerazureconfig.AzureAuthConfig{ + ARMClientConfig: azclient.ARMClientConfig{ + Cloud: "AzurePublicCloud", + TenantID: "tenantId", + }, + AzureAuthConfig: azclient.AzureAuthConfig{ + AADClientID: "aadClientId", + AADClientSecret: "aadClientSecret", + AADClientCertPath: "aadClientCertPath", + AADClientCertPassword: "aadClientCertPassword", + UseManagedIdentityExtension: true, + UserAssignedIdentityID: "UserAssignedIdentityID", + }, + SubscriptionID: "subscriptionId", }, - KubernetesServiceRateLimit: &azclients.RateLimitConfig{ - CloudProviderRateLimit: true, - CloudProviderRateLimitBucket: 5, - CloudProviderRateLimitBucketWrite: 5, - CloudProviderRateLimitQPS: 1, - CloudProviderRateLimitQPSWrite: 1, + Location: "southeastasia", + ResourceGroup: "resourceGroup", + VMType: "vmss", + VmssCacheTTLInSeconds: 100, + VmssVirtualMachinesCacheTTLInSeconds: 110, + CloudProviderBackoff: true, + CloudProviderBackoffRetries: 1, + CloudProviderBackoffExponent: 1, + CloudProviderBackoffDuration: 1, + CloudProviderBackoffJitter: 1, + CloudProviderRateLimitConfig: providerazureconfig.CloudProviderRateLimitConfig{ + RateLimitConfig: azclients.RateLimitConfig{ + CloudProviderRateLimit: true, + CloudProviderRateLimitBucket: 5, + CloudProviderRateLimitBucketWrite: 5, + CloudProviderRateLimitQPS: 1, + CloudProviderRateLimitQPSWrite: 1, + }, + InterfaceRateLimit: &azclients.RateLimitConfig{ + CloudProviderRateLimit: true, + CloudProviderRateLimitBucket: 5, + CloudProviderRateLimitBucketWrite: 5, + CloudProviderRateLimitQPS: 1, + CloudProviderRateLimitQPSWrite: 1, + }, + VirtualMachineRateLimit: &azclients.RateLimitConfig{ + CloudProviderRateLimit: true, + CloudProviderRateLimitBucket: 5, + CloudProviderRateLimitBucketWrite: 5, + CloudProviderRateLimitQPS: 1, + CloudProviderRateLimitQPSWrite: 1, + }, + StorageAccountRateLimit: &azclients.RateLimitConfig{ + CloudProviderRateLimit: true, + CloudProviderRateLimitBucket: 5, + CloudProviderRateLimitBucketWrite: 5, + CloudProviderRateLimitQPS: 1, + CloudProviderRateLimitQPSWrite: 1, + }, + DiskRateLimit: &azclients.RateLimitConfig{ + CloudProviderRateLimit: true, + CloudProviderRateLimitBucket: 5, + CloudProviderRateLimitBucketWrite: 5, + CloudProviderRateLimitQPS: 1, + CloudProviderRateLimitQPSWrite: 1, + }, + VirtualMachineScaleSetRateLimit: &azclients.RateLimitConfig{ + CloudProviderRateLimit: true, + CloudProviderRateLimitBucket: 5, + CloudProviderRateLimitBucketWrite: 5, + CloudProviderRateLimitQPS: 1, + CloudProviderRateLimitQPSWrite: 1, + }, }, }, + ClusterName: "mycluster", + ClusterResourceGroup: "myrg", + ARMBaseURLForAPClient: "nodeprovisioner-svc.nodeprovisioner.svc.cluster.local", + Deployment: "deployment", + VmssVmsCacheJitter: 90, + MaxDeploymentsCount: 8, } t.Setenv("ARM_CLOUD", "AzurePublicCloud") @@ -463,15 +690,13 @@ func TestCreateAzureManagerWithNilConfig(t *testing.T) { t.Run("environment variables correctly set", func(t *testing.T) { manager, err := createAzureManagerInternal(nil, cloudprovider.NodeGroupDiscoveryOptions{}, mockAzClient) assert.NoError(t, err) - manager.config.TenantID = "tenantId" - manager.config.AADClientID = "aadClientId" - assert.Equal(t, true, reflect.DeepEqual(*expectedConfig, *manager.config), "unexpected azure manager configuration") + assertStructsMinimallyEqual(t, *expectedConfig, *manager.config) }) t.Run("invalid bool for ARM_USE_MANAGED_IDENTITY_EXTENSION", func(t *testing.T) { t.Setenv("ARM_USE_MANAGED_IDENTITY_EXTENSION", "invalidbool") manager, err := createAzureManagerInternal(nil, cloudprovider.NodeGroupDiscoveryOptions{}, mockAzClient) - expectedErr0 := "strconv.ParseBool: parsing \"invalidbool\": invalid syntax" + expectedErr0 := "failed to parse ARM_USE_MANAGED_IDENTITY_EXTENSION \"invalidbool\": strconv.ParseBool: parsing \"invalidbool\": invalid syntax" assert.Nil(t, manager) assert.Equal(t, expectedErr0, err.Error(), "Return err does not match, expected: %v, actual: %v", expectedErr0, err.Error()) }) @@ -487,7 +712,7 @@ func TestCreateAzureManagerWithNilConfig(t *testing.T) { t.Run("invalid int for AZURE_GET_VMSS_SIZE_REFRESH_PERIOD", func(t *testing.T) { t.Setenv("AZURE_GET_VMSS_SIZE_REFRESH_PERIOD", "invalidint") manager, err := createAzureManagerInternal(nil, cloudprovider.NodeGroupDiscoveryOptions{}, mockAzClient) - expectedErr := fmt.Errorf("failed to parse AZURE_GET_VMSS_SIZE_REFRESH_PERIOD \"invalidint\": strconv.Atoi: parsing \"invalidint\": invalid syntax") + expectedErr := fmt.Errorf("failed to parse AZURE_GET_VMSS_SIZE_REFRESH_PERIOD \"invalidint\": strconv.ParseInt: parsing \"invalidint\": invalid syntax") assert.Nil(t, manager) assert.Equal(t, expectedErr, err, "Return err does not match, expected: %v, actual: %v", expectedErr, err) }) @@ -518,7 +743,7 @@ func TestCreateAzureManagerWithNilConfig(t *testing.T) { t.Run("invalid int for BACKOFF_RETRIES", func(t *testing.T) { t.Setenv("BACKOFF_RETRIES", "invalidint") manager, err := createAzureManagerInternal(nil, cloudprovider.NodeGroupDiscoveryOptions{}, mockAzClient) - expectedErr := fmt.Errorf("failed to parse BACKOFF_RETRIES '\\x00': strconv.ParseInt: parsing \"invalidint\": invalid syntax") + expectedErr := fmt.Errorf("failed to parse BACKOFF_RETRIES \"invalidint\": strconv.ParseInt: parsing \"invalidint\": invalid syntax") assert.Nil(t, manager) assert.Equal(t, expectedErr, err, "Return err does not match, expected: %v, actual: %v", expectedErr, err) }) @@ -527,7 +752,7 @@ func TestCreateAzureManagerWithNilConfig(t *testing.T) { t.Setenv("BACKOFF_RETRIES", "") manager, err := createAzureManagerInternal(nil, cloudprovider.NodeGroupDiscoveryOptions{}, mockAzClient) assert.NoError(t, err) - assert.Equal(t, backoffRetriesDefault, (*manager.config).CloudProviderBackoffRetries, "CloudProviderBackoffRetries does not match.") + assert.Equal(t, providerazureconsts.BackoffRetriesDefault, (*manager.config).CloudProviderBackoffRetries, "CloudProviderBackoffRetries does not match.") }) t.Run("invalid float for BACKOFF_EXPONENT", func(t *testing.T) { @@ -542,7 +767,7 @@ func TestCreateAzureManagerWithNilConfig(t *testing.T) { t.Setenv("BACKOFF_EXPONENT", "") manager, err := createAzureManagerInternal(nil, cloudprovider.NodeGroupDiscoveryOptions{}, mockAzClient) assert.NoError(t, err) - assert.Equal(t, backoffExponentDefault, (*manager.config).CloudProviderBackoffExponent, "CloudProviderBackoffExponent does not match.") + assert.Equal(t, providerazureconsts.BackoffExponentDefault, (*manager.config).CloudProviderBackoffExponent, "CloudProviderBackoffExponent does not match.") }) t.Run("invalid int for BACKOFF_DURATION", func(t *testing.T) { @@ -557,7 +782,7 @@ func TestCreateAzureManagerWithNilConfig(t *testing.T) { t.Setenv("BACKOFF_DURATION", "") manager, err := createAzureManagerInternal(nil, cloudprovider.NodeGroupDiscoveryOptions{}, mockAzClient) assert.NoError(t, err) - assert.Equal(t, backoffDurationDefault, (*manager.config).CloudProviderBackoffDuration, "CloudProviderBackoffDuration does not match.") + assert.Equal(t, providerazureconsts.BackoffDurationDefault, (*manager.config).CloudProviderBackoffDuration, "CloudProviderBackoffDuration does not match.") }) t.Run("invalid float for BACKOFF_JITTER", func(t *testing.T) { @@ -572,18 +797,148 @@ func TestCreateAzureManagerWithNilConfig(t *testing.T) { t.Setenv("BACKOFF_JITTER", "") manager, err := createAzureManagerInternal(nil, cloudprovider.NodeGroupDiscoveryOptions{}, mockAzClient) assert.NoError(t, err) - assert.Equal(t, backoffJitterDefault, (*manager.config).CloudProviderBackoffJitter, "CloudProviderBackoffJitter does not match.") + assert.Equal(t, providerazureconsts.BackoffJitterDefault, (*manager.config).CloudProviderBackoffJitter, "CloudProviderBackoffJitter does not match.") }) t.Run("invalid bool for CLOUD_PROVIDER_RATE_LIMIT", func(t *testing.T) { t.Setenv("CLOUD_PROVIDER_RATE_LIMIT", "invalidbool") manager, err := createAzureManagerInternal(nil, cloudprovider.NodeGroupDiscoveryOptions{}, mockAzClient) - expectedErr := fmt.Errorf("failed to parse CLOUD_PROVIDER_RATE_LIMIT: \"invalidbool\", strconv.ParseBool: parsing \"invalidbool\": invalid syntax") + expectedErr := fmt.Errorf("failed to parse CLOUD_PROVIDER_RATE_LIMIT \"invalidbool\": strconv.ParseBool: parsing \"invalidbool\": invalid syntax") assert.Nil(t, manager) assert.Equal(t, expectedErr, err, "Return err does not match, expected: %v, actual: %v", expectedErr, err) }) } +func TestCreateAzureManagerWithEnvOverridingConfig(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + mockVMClient := mockvmclient.NewMockInterface(ctrl) + mockVMSSClient := mockvmssclient.NewMockInterface(ctrl) + mockVMSSClient.EXPECT().List(gomock.Any(), "resourceGroup").Return([]compute.VirtualMachineScaleSet{}, nil).AnyTimes() + mockVMClient.EXPECT().List(gomock.Any(), "resourceGroup").Return([]compute.VirtualMachine{}, nil).AnyTimes() + mockAzClient := &azClient{ + virtualMachinesClient: mockVMClient, + virtualMachineScaleSetsClient: mockVMSSClient, + } + + expectedConfig := &Config{ + Config: providerazure.Config{ + AzureAuthConfig: providerazureconfig.AzureAuthConfig{ + ARMClientConfig: azclient.ARMClientConfig{ + Cloud: "AzurePublicCloud", + TenantID: "tenantId", + }, + AzureAuthConfig: azclient.AzureAuthConfig{ + AADClientID: "aadClientId", + AADClientSecret: "aadClientSecret", + AADClientCertPath: "aadClientCertPath", + AADClientCertPassword: "aadClientCertPassword", + UseManagedIdentityExtension: true, + UserAssignedIdentityID: "UserAssignedIdentityID", + }, + SubscriptionID: "subscriptionId", + }, + Location: "southeastasia", + ResourceGroup: "resourceGroup", + VMType: "vmss", + VmssCacheTTLInSeconds: 100, + VmssVirtualMachinesCacheTTLInSeconds: 110, + CloudProviderBackoff: true, + CloudProviderBackoffRetries: 1, + CloudProviderBackoffExponent: 1, + CloudProviderBackoffDuration: 1, + CloudProviderBackoffJitter: 1, + CloudProviderRateLimitConfig: providerazureconfig.CloudProviderRateLimitConfig{ + RateLimitConfig: azclients.RateLimitConfig{ + CloudProviderRateLimit: true, + CloudProviderRateLimitBucket: 5, + CloudProviderRateLimitBucketWrite: 5, + CloudProviderRateLimitQPS: 1, + CloudProviderRateLimitQPSWrite: 1, + }, + InterfaceRateLimit: &azclients.RateLimitConfig{ + CloudProviderRateLimit: true, + CloudProviderRateLimitBucket: 5, + CloudProviderRateLimitBucketWrite: 5, + CloudProviderRateLimitQPS: 1, + CloudProviderRateLimitQPSWrite: 1, + }, + VirtualMachineRateLimit: &azclients.RateLimitConfig{ + CloudProviderRateLimit: true, + CloudProviderRateLimitBucket: 5, + CloudProviderRateLimitBucketWrite: 5, + CloudProviderRateLimitQPS: 1, + CloudProviderRateLimitQPSWrite: 1, + }, + StorageAccountRateLimit: &azclients.RateLimitConfig{ + CloudProviderRateLimit: true, + CloudProviderRateLimitBucket: 5, + CloudProviderRateLimitBucketWrite: 5, + CloudProviderRateLimitQPS: 1, + CloudProviderRateLimitQPSWrite: 1, + }, + DiskRateLimit: &azclients.RateLimitConfig{ + CloudProviderRateLimit: true, + CloudProviderRateLimitBucket: 5, + CloudProviderRateLimitBucketWrite: 5, + CloudProviderRateLimitQPS: 1, + CloudProviderRateLimitQPSWrite: 1, + }, + VirtualMachineScaleSetRateLimit: &azclients.RateLimitConfig{ + CloudProviderRateLimit: true, + CloudProviderRateLimitBucket: 5, + CloudProviderRateLimitBucketWrite: 5, + CloudProviderRateLimitQPS: 1, + CloudProviderRateLimitQPSWrite: 1, + }, + }, + }, + ClusterName: "mycluster", + ClusterResourceGroup: "myrg", + ARMBaseURLForAPClient: "nodeprovisioner-svc.nodeprovisioner.svc.cluster.local", + Deployment: "deployment", + VmssVmsCacheJitter: 90, + MaxDeploymentsCount: 8, + } + + t.Setenv("ARM_CLOUD", "AzurePublicCloud") + // LOCATION is not set from env to test getting it from config file + t.Setenv("AZURE_TENANT_ID", "tenantId") + t.Setenv("AZURE_CLIENT_ID", "aadClientId") + t.Setenv("ARM_SUBSCRIPTION_ID", "subscriptionId") + t.Setenv("ARM_RESOURCE_GROUP", "resourceGroup") + t.Setenv("AZURE_TENANT_ID", "tenantId") + t.Setenv("ARM_TENANT_ID", "tenantId") + t.Setenv("AZURE_CLIENT_ID", "aadClientId") + t.Setenv("ARM_CLIENT_ID", "aadClientId") + t.Setenv("ARM_CLIENT_SECRET", "aadClientSecret") + t.Setenv("ARM_VM_TYPE", "vmss") // this is one of the differences with the config file, expect this to take precedence + t.Setenv("ARM_CLIENT_CERT_PATH", "aadClientCertPath") + t.Setenv("ARM_CLIENT_CERT_PASSWORD", "aadClientCertPassword") + t.Setenv("ARM_DEPLOYMENT", "deployment") + t.Setenv("ARM_USE_MANAGED_IDENTITY_EXTENSION", "true") + t.Setenv("ARM_USER_ASSIGNED_IDENTITY_ID", "UserAssignedIdentityID") + t.Setenv("AZURE_VMSS_CACHE_TTL", "100") + t.Setenv("AZURE_VMSS_VMS_CACHE_TTL", "110") + t.Setenv("AZURE_VMSS_VMS_CACHE_JITTER", "90") + t.Setenv("AZURE_MAX_DEPLOYMENT_COUNT", "8") + t.Setenv("ENABLE_BACKOFF", "true") + t.Setenv("BACKOFF_RETRIES", "1") + t.Setenv("BACKOFF_EXPONENT", "1") + t.Setenv("BACKOFF_DURATION", "1") + t.Setenv("BACKOFF_JITTER", "1") + t.Setenv("CLOUD_PROVIDER_RATE_LIMIT", "true") + t.Setenv("CLUSTER_NAME", "mycluster") + t.Setenv("ARM_CLUSTER_RESOURCE_GROUP", "myrg") + t.Setenv("ARM_BASE_URL_FOR_AP_CLIENT", "nodeprovisioner-svc.nodeprovisioner.svc.cluster.local") + + t.Run("environment variables correctly set", func(t *testing.T) { + manager, err := createAzureManagerInternal(strings.NewReader(validAzureCfgForStandardVMType), cloudprovider.NodeGroupDiscoveryOptions{}, mockAzClient) + assert.NoError(t, err) + assertStructsMinimallyEqual(t, *expectedConfig, *manager.config) + }) +} + func TestCreateAzureManagerInvalidConfig(t *testing.T) { _, err := createAzureManagerInternal(strings.NewReader(invalidAzureCfg), cloudprovider.NodeGroupDiscoveryOptions{}, &azClient{}) assert.Error(t, err, "failed to unmarshal config body") @@ -620,7 +975,7 @@ func TestFetchExplicitNodeGroups(t *testing.T) { } else { mockVMClient := mockvmclient.NewMockInterface(ctrl) - manager.config.EnableVmssFlex = true + manager.config.EnableVmssFlexNodes = true mockVMClient.EXPECT().ListVmssFlexVMsWithoutInstanceView(gomock.Any(), "test-asg").Return(expectedVMs, nil).AnyTimes() manager.azClient.virtualMachinesClient = mockVMClient } @@ -638,7 +993,7 @@ func TestFetchExplicitNodeGroups(t *testing.T) { testAS := newTestAgentPool(newTestAzureManager(t), "testAS") timeLayout := "2006-01-02 15:04:05" timeBenchMark, _ := time.Parse(timeLayout, "2000-01-01 00:00:00") - testAS.manager.azClient.deploymentsClient = &DeploymentsClientMock{ + testAS.manager.azClient.deploymentClient = &DeploymentClientMock{ FakeStore: map[string]resources.DeploymentExtended{ "cluster-autoscaler-0001": { Name: to.StringPtr("cluster-autoscaler-0001"), @@ -649,9 +1004,9 @@ func TestFetchExplicitNodeGroups(t *testing.T) { }, }, } - testAS.manager.config.VMType = vmTypeStandard + testAS.manager.config.VMType = providerazureconsts.VMTypeStandard err := testAS.manager.fetchExplicitNodeGroups([]string{"1:5:testAS"}) - expectedErr := fmt.Errorf("failed to parse node group spec: deployment not found") + expectedErr := fmt.Errorf("failed to parse node group spec: %v", retry.NewError(false, fmt.Errorf("deployment not found")).Error()) assert.Equal(t, expectedErr, err, "testAS.manager.fetchExplicitNodeGroups return error does not match, expected: %v, actual: %v", expectedErr, err) err = testAS.manager.fetchExplicitNodeGroups(nil) assert.NoError(t, err) @@ -840,3 +1195,36 @@ func TestGetScaleSetOptions(t *testing.T) { opts = manager.GetScaleSetOptions("test3", defaultOptions) assert.Equal(t, *opts, defaultOptions) } + +func assertStructsMinimallyEqual(t *testing.T, struct1, struct2 interface{}) bool { + return compareStructFields(t, reflect.ValueOf(struct1), reflect.ValueOf(struct2)) +} + +func compareStructFields(t *testing.T, v1, v2 reflect.Value) bool { + if v1.Type() != v2.Type() { + return assert.Fail(t, "different types", "v1 type: %v, v2 type: %v", v1.Type(), v2.Type()) + } + + for i := 0; i < v1.NumField(); i++ { + field1 := v1.Field(i) + field2 := v2.Field(i) + fieldType := v1.Type().Field(i) + + if field1.IsZero() || reflect.DeepEqual(field1.Interface(), reflect.Zero(field1.Type()).Interface()) { + continue // Skip zero value fields in struct1 + } + + if field1.Kind() == reflect.Struct { + // Recursively compare nested structs + if !compareStructFields(t, field1, field2) { + return false + } + } else { + if !assert.Equal(t, field1.Interface(), field2.Interface(), "field %s", fieldType.Name) { + return false + } + } + } + + return true +} diff --git a/cluster-autoscaler/cloudprovider/azure/azure_scale_set.go b/cluster-autoscaler/cloudprovider/azure/azure_scale_set.go index 71a8dfd0043c..fdb3220224c8 100644 --- a/cluster-autoscaler/cloudprovider/azure/azure_scale_set.go +++ b/cluster-autoscaler/cloudprovider/azure/azure_scale_set.go @@ -112,8 +112,8 @@ func NewScaleSet(spec *dynamic.NodeGroupSpec, az *AzureManager, curSize int64, d dedicatedHost: dedicatedHost, } - if az.config.VmssVmsCacheTTL != 0 { - scaleSet.instancesRefreshPeriod = time.Duration(az.config.VmssVmsCacheTTL) * time.Second + if az.config.VmssVirtualMachinesCacheTTLInSeconds != 0 { + scaleSet.instancesRefreshPeriod = time.Duration(az.config.VmssVirtualMachinesCacheTTLInSeconds) * time.Second } else { scaleSet.instancesRefreshPeriod = defaultVmssInstancesRefreshPeriod } diff --git a/cluster-autoscaler/cloudprovider/azure/azure_scale_set_instance_cache.go b/cluster-autoscaler/cloudprovider/azure/azure_scale_set_instance_cache.go index 5b6843caf412..bc6bda019f3d 100644 --- a/cluster-autoscaler/cloudprovider/azure/azure_scale_set_instance_cache.go +++ b/cluster-autoscaler/cloudprovider/azure/azure_scale_set_instance_cache.go @@ -106,10 +106,10 @@ func (scaleSet *ScaleSet) updateInstanceCache() error { } if orchestrationMode == compute.Flexible { - if scaleSet.manager.config.EnableVmssFlex { + if scaleSet.manager.config.EnableVmssFlexNodes { return scaleSet.buildScaleSetCacheForFlex() } - return fmt.Errorf("vmss - %q with Flexible orchestration detected but 'enableVmssFlex' feature flag is turned off", scaleSet.Name) + return fmt.Errorf("vmss - %q with Flexible orchestration detected but 'enableVmssFlexNodes' feature flag is turned off", scaleSet.Name) } else if orchestrationMode == compute.Uniform { return scaleSet.buildScaleSetCacheForUniform() } diff --git a/cluster-autoscaler/cloudprovider/azure/azure_scale_set_test.go b/cluster-autoscaler/cloudprovider/azure/azure_scale_set_test.go index f3aa0dc356d8..b3bd5fca5b5c 100644 --- a/cluster-autoscaler/cloudprovider/azure/azure_scale_set_test.go +++ b/cluster-autoscaler/cloudprovider/azure/azure_scale_set_test.go @@ -203,7 +203,7 @@ func TestTargetSize(t *testing.T) { mockVMSSVMClient.EXPECT().List(gomock.Any(), provider.azureManager.config.ResourceGroup, "test-asg", gomock.Any()).Return(expectedVMSSVMs, nil).AnyTimes() provider.azureManager.azClient.virtualMachineScaleSetVMsClient = mockVMSSVMClient } else { - provider.azureManager.config.EnableVmssFlex = true + provider.azureManager.config.EnableVmssFlexNodes = true mockVMClient.EXPECT().ListVmssFlexVMsWithoutInstanceView(gomock.Any(), "test-asg").Return(expectedVMs, nil).AnyTimes() } @@ -278,7 +278,7 @@ func TestIncreaseSize(t *testing.T) { mockVMSSVMClient.EXPECT().List(gomock.Any(), provider.azureManager.config.ResourceGroup, "test-asg", gomock.Any()).Return(expectedVMSSVMs, nil).AnyTimes() provider.azureManager.azClient.virtualMachineScaleSetVMsClient = mockVMSSVMClient } else { - provider.azureManager.config.EnableVmssFlex = true + provider.azureManager.config.EnableVmssFlexNodes = true mockVMClient.EXPECT().ListVmssFlexVMsWithoutInstanceView(gomock.Any(), "test-asg").Return(expectedVMs, nil).AnyTimes() } err := provider.azureManager.forceRefresh() @@ -514,7 +514,7 @@ func TestBelongs(t *testing.T) { mockVMSSVMClient.EXPECT().List(gomock.Any(), provider.azureManager.config.ResourceGroup, "test-asg", gomock.Any()).Return(expectedVMSSVMs, nil).AnyTimes() provider.azureManager.azClient.virtualMachineScaleSetVMsClient = mockVMSSVMClient } else { - provider.azureManager.config.EnableVmssFlex = true + provider.azureManager.config.EnableVmssFlexNodes = true mockVMClient.EXPECT().ListVmssFlexVMsWithoutInstanceView(gomock.Any(), "test-asg").Return(expectedVMs, nil).AnyTimes() } @@ -603,7 +603,7 @@ func TestDeleteNodes(t *testing.T) { mockVMSSVMClient.EXPECT().List(gomock.Any(), manager.config.ResourceGroup, "test-asg", gomock.Any()).Return(expectedVMSSVMs, nil).AnyTimes() manager.azClient.virtualMachineScaleSetVMsClient = mockVMSSVMClient } else { - manager.config.EnableVmssFlex = true + manager.config.EnableVmssFlexNodes = true mockVMClient.EXPECT().ListVmssFlexVMsWithoutInstanceView(gomock.Any(), "test-asg").Return(expectedVMs, nil).AnyTimes() manager.azClient.virtualMachinesClient = mockVMClient } @@ -739,7 +739,7 @@ func TestDeleteNodeUnregistered(t *testing.T) { mockVMSSVMClient.EXPECT().List(gomock.Any(), manager.config.ResourceGroup, "test-asg", gomock.Any()).Return(expectedVMSSVMs, nil).AnyTimes() manager.azClient.virtualMachineScaleSetVMsClient = mockVMSSVMClient } else { - manager.config.EnableVmssFlex = true + manager.config.EnableVmssFlexNodes = true mockVMClient.EXPECT().ListVmssFlexVMsWithoutInstanceView(gomock.Any(), "test-asg").Return(expectedVMs, nil).AnyTimes() } err := manager.forceRefresh() @@ -1013,7 +1013,7 @@ func TestScaleSetNodes(t *testing.T) { provider.azureManager.azClient.virtualMachineScaleSetVMsClient = mockVMSSVMClient } else { - provider.azureManager.config.EnableVmssFlex = true + provider.azureManager.config.EnableVmssFlexNodes = true mockVMClient.EXPECT().ListVmssFlexVMsWithoutInstanceView(gomock.Any(), "test-asg").Return(expectedVMs, nil).AnyTimes() } @@ -1057,7 +1057,7 @@ func TestScaleSetNodes(t *testing.T) { } -func TestEnableVmssFlexFlag(t *testing.T) { +func TestEnableVmssFlexNodesFlag(t *testing.T) { // flag set to false ctrl := gomock.NewController(t) @@ -1069,7 +1069,7 @@ func TestEnableVmssFlexFlag(t *testing.T) { provider := newTestProvider(t) mockVMSSClient := mockvmssclient.NewMockInterface(ctrl) mockVMSSClient.EXPECT().List(gomock.Any(), provider.azureManager.config.ResourceGroup).Return(expectedScaleSets, nil).AnyTimes() - provider.azureManager.config.EnableVmssFlex = false + provider.azureManager.config.EnableVmssFlexNodes = false provider.azureManager.azClient.virtualMachineScaleSetsClient = mockVMSSClient mockVMClient := mockvmclient.NewMockInterface(ctrl) @@ -1084,7 +1084,7 @@ func TestEnableVmssFlexFlag(t *testing.T) { assert.Error(t, err, "vmss - \"test-asg\" with Flexible orchestration detected but 'enbaleVmssFlex' feature flag is turned off") // flag set to true - provider.azureManager.config.EnableVmssFlex = true + provider.azureManager.config.EnableVmssFlexNodes = true err = provider.azureManager.Refresh() assert.NoError(t, err) } diff --git a/cluster-autoscaler/cloudprovider/azure/azure_util.go b/cluster-autoscaler/cloudprovider/azure/azure_util.go index 9f4a44284ed2..44a87980571c 100644 --- a/cluster-autoscaler/cloudprovider/azure/azure_util.go +++ b/cluster-autoscaler/cloudprovider/azure/azure_util.go @@ -636,3 +636,21 @@ func vmPowerStateFromStatuses(statuses []compute.InstanceViewStatus) string { // PowerState is not set if the VM is still creating (or has failed creation) return vmPowerStateUnknown } + +// strconv.ParseInt, but for int +func parseInt32(s string, base int) (int, error) { + val, err := strconv.ParseInt(s, base, 32) + if err != nil { + return 0, err + } + return int(val), nil +} + +// strconv.ParseFloat, but for float32 +func parseFloat32(s string) (float32, error) { + val, err := strconv.ParseFloat(s, 32) + if err != nil { + return 0, err + } + return float32(val), nil +} diff --git a/cluster-autoscaler/go.mod b/cluster-autoscaler/go.mod index c7e4d065cac9..f10d0740196c 100644 --- a/cluster-autoscaler/go.mod +++ b/cluster-autoscaler/go.mod @@ -6,12 +6,12 @@ require ( cloud.google.com/go/compute/metadata v0.2.3 github.com/Azure/azure-sdk-for-go v68.0.0+incompatible github.com/Azure/azure-sdk-for-go-extensions v0.1.6 - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.2 - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0 - github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4 v4.8.0-beta.1 + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4 v4.9.0-beta.1 github.com/Azure/go-autorest/autorest v0.11.29 - github.com/Azure/go-autorest/autorest/adal v0.9.23 - github.com/Azure/go-autorest/autorest/azure/auth v0.5.8 + github.com/Azure/go-autorest/autorest/adal v0.9.24 + github.com/Azure/go-autorest/autorest/azure/auth v0.5.13 github.com/Azure/go-autorest/autorest/date v0.3.0 github.com/Azure/go-autorest/autorest/to v0.4.0 github.com/Azure/skewer v0.0.14 @@ -22,20 +22,20 @@ require ( github.com/golang/mock v1.6.0 github.com/google/go-cmp v0.6.0 github.com/google/go-querystring v1.0.0 - github.com/google/uuid v1.5.0 + github.com/google/uuid v1.6.0 github.com/jmespath/go-jmespath v0.4.0 github.com/json-iterator/go v1.1.12 - github.com/onsi/ginkgo/v2 v2.13.2 - github.com/onsi/gomega v1.30.0 + github.com/onsi/ginkgo/v2 v2.17.1 + github.com/onsi/gomega v1.32.0 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.16.0 github.com/satori/go.uuid v1.2.0 github.com/spf13/pflag v1.0.5 - github.com/stretchr/testify v1.8.4 + github.com/stretchr/testify v1.9.0 go.uber.org/mock v0.4.0 - golang.org/x/net v0.23.0 + golang.org/x/net v0.24.0 golang.org/x/oauth2 v0.11.0 - golang.org/x/sys v0.18.0 + golang.org/x/sys v0.19.0 google.golang.org/api v0.126.0 google.golang.org/grpc v1.59.0 google.golang.org/protobuf v1.33.0 @@ -55,7 +55,8 @@ require ( k8s.io/kubernetes v1.29.6 k8s.io/legacy-cloud-providers v0.0.0 k8s.io/utils v0.0.0-20231127182322-b307cd553661 - sigs.k8s.io/cloud-provider-azure v1.29.0 + sigs.k8s.io/cloud-provider-azure v1.29.4 + sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.13 sigs.k8s.io/structured-merge-diff/v4 v4.4.1 sigs.k8s.io/yaml v1.4.0 ) @@ -63,7 +64,9 @@ require ( require ( cloud.google.com/go/compute v1.23.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 // indirect - github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.4.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets v0.12.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal v0.7.1 // indirect + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.6.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerregistry/armcontainerregistry v1.2.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/keyvault/armkeyvault v1.4.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 // indirect @@ -72,12 +75,12 @@ require ( github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0 // indirect github.com/Azure/go-armbalancer v0.0.2 // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect - github.com/Azure/go-autorest/autorest/azure/cli v0.4.2 // indirect + github.com/Azure/go-autorest/autorest/azure/cli v0.4.6 // indirect github.com/Azure/go-autorest/autorest/mocks v0.4.2 // indirect github.com/Azure/go-autorest/autorest/validation v0.3.1 // indirect github.com/Azure/go-autorest/logger v0.2.1 // indirect github.com/Azure/go-autorest/tracing v0.6.0 // indirect - github.com/AzureAD/microsoft-authentication-library-for-go v1.2.0 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect github.com/GoogleCloudPlatform/k8s-cloud-provider v1.18.1-0.20220218231025-f11817397a1b // indirect github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab // indirect github.com/Microsoft/go-winio v0.6.0 // indirect @@ -104,7 +107,7 @@ require ( github.com/docker/go-units v0.5.0 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/euank/go-kmsg-parser v2.0.0+incompatible // indirect - github.com/evanphx/json-patch v5.7.0+incompatible // indirect + github.com/evanphx/json-patch v5.9.0+incompatible // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/go-logr/logr v1.4.1 // indirect @@ -117,7 +120,7 @@ require ( github.com/godbus/dbus/v5 v5.1.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v4 v4.5.0 // indirect - github.com/golang-jwt/jwt/v5 v5.0.0 // indirect + github.com/golang-jwt/jwt/v5 v5.2.1 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/cadvisor v0.48.1 // indirect @@ -153,7 +156,7 @@ require ( github.com/opencontainers/runc v1.1.10 // indirect github.com/opencontainers/runtime-spec v1.0.3-0.20220909204839-494a5a6aca78 // indirect github.com/opencontainers/selinux v1.11.0 // indirect - github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect + github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_model v0.4.0 // indirect github.com/prometheus/common v0.44.0 // indirect @@ -163,7 +166,7 @@ require ( github.com/sirupsen/logrus v1.9.0 // indirect github.com/spf13/cobra v1.8.0 // indirect github.com/stoewer/go-strcase v1.3.0 // indirect - github.com/stretchr/objx v0.5.0 // indirect + github.com/stretchr/objx v0.5.2 // indirect github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect github.com/vishvananda/netlink v1.1.0 // indirect github.com/vishvananda/netns v0.0.4 // indirect @@ -185,14 +188,14 @@ require ( go.uber.org/atomic v1.10.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.24.0 // indirect - golang.org/x/crypto v0.21.0 // indirect + golang.org/x/crypto v0.22.0 // indirect golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect golang.org/x/mod v0.14.0 // indirect - golang.org/x/sync v0.6.0 // indirect - golang.org/x/term v0.18.0 // indirect + golang.org/x/sync v0.7.0 // indirect + golang.org/x/term v0.19.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.5.0 // indirect - golang.org/x/tools v0.16.1 // indirect + golang.org/x/tools v0.17.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d // indirect google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d // indirect @@ -213,8 +216,7 @@ require ( k8s.io/kubectl v0.28.0 // indirect k8s.io/mount-utils v0.26.0-alpha.0 // indirect sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 // indirect - sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.0-20240105075710-c4d4895a970b // indirect - sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.0-20231205023417-1ba5a224ab0e // indirect + sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.4 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect ) diff --git a/cluster-autoscaler/go.sum b/cluster-autoscaler/go.sum index ad285637f1c3..38231df3e2f6 100644 --- a/cluster-autoscaler/go.sum +++ b/cluster-autoscaler/go.sum @@ -53,20 +53,24 @@ github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0 github.com/Azure/azure-sdk-for-go v68.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go-extensions v0.1.6 h1:EXGvDcj54u98XfaI/Cy65Ds6vNsIJeGKYf0eNLB1y4Q= github.com/Azure/azure-sdk-for-go-extensions v0.1.6/go.mod h1:27StPiXJp6Xzkq2AQL7gPK7VC0hgmCnUKlco1dO1jaM= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.2 h1:c4k2FIYIh4xtwqrQwV0Ct1v5+ehlNXj5NI/MWVsiTkQ= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.2/go.mod h1:5FDJtLEO/GxwNgUxbwrY3LP0pEoThTQJtk2oysdXHxM= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0 h1:BMAjVKJM0U/CYF27gA0ZMmXGkOcvfFtD0oHVZ1TIPRI= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0/go.mod h1:1fXstnBMas5kzG+S3q8UoJcmyU6nUeunJcMDHcRYHhs= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 h1:E+OJmp2tPvt1W+amx48v1eqbjDYsgN+RzP4q16yV5eM= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 h1:FDif4R1+UUR+00q6wquyX90K7A8dN+R5E8GEadoP7sU= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2/go.mod h1:aiYBYui4BJ/BJCAIKs92XiPyQfTaBWqvHujDwKb6CBU= github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 h1:LqbJ/WzJUwBf8UiaSzgX7aMclParm9/5Vgp+TY51uBQ= github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2/go.mod h1:yInRyqWXAuaPrgI7p70+lDDgh3mlBohis29jGMISnmc= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.4.0 h1:QfV5XZt6iNa2aWMAt96CZEbfJ7kgG/qYIpq465Shr5E= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.4.0/go.mod h1:uYt4CfhkJA9o0FN7jfE5minm/i4nUE4MjGUJkzB6Zs8= +github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets v0.12.0 h1:xnO4sFyG8UH2fElBkcqLTOZsAajvKfnSlgBBW8dXYjw= +github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets v0.12.0/go.mod h1:XD3DIOOVgBCO03OleB1fHjgktVRFxlT++KwKgIOewdM= +github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal v0.7.1 h1:FbH3BbSb4bvGluTesZZ+ttN/MDsnMmQP36OSnDuSXqw= +github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal v0.7.1/go.mod h1:9V2j0jn9jDEkCkv8w/bKTNppX/d0FVA1ud77xCIP4KA= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.6.0 h1:ui3YNbxfW7J3tTFIZMH6LIGRjCngp+J+nIFlnizfNTE= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.6.0/go.mod h1:gZmgV+qBqygoznvqo2J9oKZAFziqhLZ2xE/WVUmzkHA= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerregistry/armcontainerregistry v1.2.0 h1:DWlwvVV5r/Wy1561nZ3wrpI1/vDIBRY/Wd1HWaRBZWA= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerregistry/armcontainerregistry v1.2.0/go.mod h1:E7ltexgRDmeJ0fJWv0D/HLwY2xbDdN+uv+X2uZtOx3w= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v2 v2.4.0 h1:1u/K2BFv0MwkG6he8RYuUcbbeK22rkoZbg4lKa/msZU= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v2 v2.4.0/go.mod h1:U5gpsREQZE6SLk1t/cFfc1eMhYAlYpEzvaYXuDfefy8= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4 v4.8.0-beta.1 h1:6RFNcR7iE8Ka8j76gE0a/b28eAX6AZF4zqSw0XnFWbg= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4 v4.8.0-beta.1/go.mod h1:gYq8wyDgv6JLhGbAU6gg8amCPgQWRE+aCvrV2gyzdfs= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4 v4.9.0-beta.1 h1:iqhrjj9w9/AQZsHjaOVyloamkeAFRbWI0iHNy6INMYk= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4 v4.9.0-beta.1/go.mod h1:gYq8wyDgv6JLhGbAU6gg8amCPgQWRE+aCvrV2gyzdfs= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v2 v2.0.0 h1:PTFGRSlMKCQelWwxUyYVEUqseBJVemLyqWJjvMyt0do= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v2 v2.0.0/go.mod h1:LRr2FzBTQlONPPa5HREE5+RjSCTXl7BwOvYOaWTqCaI= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/keyvault/armkeyvault v1.4.0 h1:HlZMUZW8S4P9oob1nCHxCCKrytxyLc+24nUJGssoEto= @@ -86,20 +90,19 @@ github.com/Azure/go-armbalancer v0.0.2/go.mod h1:yTg7MA/8YnfKQc9o97tzAJ7fbdVkod1 github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.11.4/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= -github.com/Azure/go-autorest/autorest v0.11.17/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= +github.com/Azure/go-autorest/autorest v0.11.28/go.mod h1:MrkzG3Y3AH668QyF9KRk5neJnGgmhQ6krbhR8Q5eMvA= github.com/Azure/go-autorest/autorest v0.11.29 h1:I4+HL/JDvErx2LjyzaVxllw2lRDB5/BT2Bm4g20iqYw= github.com/Azure/go-autorest/autorest v0.11.29/go.mod h1:ZtEzC4Jy2JDrZLxvWs8LrBWEBycl1hbT1eknI8MtfAs= github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= github.com/Azure/go-autorest/autorest/adal v0.9.2/go.mod h1:/3SMAM86bP6wC9Ev35peQDUeqFZBMH07vvUOmg4z/fE= -github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= -github.com/Azure/go-autorest/autorest/adal v0.9.11/go.mod h1:nBKAnTomx8gDtl+3ZCJv2v0KACFHWTB2drffI1B68Pk= +github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= github.com/Azure/go-autorest/autorest/adal v0.9.22/go.mod h1:XuAbAEUv2Tta//+voMI038TrJBqjKam0me7qR+L8Cmk= -github.com/Azure/go-autorest/autorest/adal v0.9.23 h1:Yepx8CvFxwNKpH6ja7RZ+sKX+DWYNldbLiALMC3BTz8= -github.com/Azure/go-autorest/autorest/adal v0.9.23/go.mod h1:5pcMqFkdPhviJdlEy3kC/v1ZLnQl0MH6XA5YCcMhy4c= -github.com/Azure/go-autorest/autorest/azure/auth v0.5.8 h1:TzPg6B6fTZ0G1zBf3T54aI7p3cAT6u//TOXGPmFMOXg= -github.com/Azure/go-autorest/autorest/azure/auth v0.5.8/go.mod h1:kxyKZTSfKh8OVFWPAgOgQ/frrJgeYQJPyR5fLFmXko4= -github.com/Azure/go-autorest/autorest/azure/cli v0.4.2 h1:dMOmEJfkLKW/7JsokJqkyoYSgmR08hi9KrhjZb+JALY= -github.com/Azure/go-autorest/autorest/azure/cli v0.4.2/go.mod h1:7qkJkT+j6b+hIpzMOwPChJhTqS8VbsqqgULzMNRugoM= +github.com/Azure/go-autorest/autorest/adal v0.9.24 h1:BHZfgGsGwdkHDyZdtQRQk1WeUdW0m2WPAwuHZwUi5i4= +github.com/Azure/go-autorest/autorest/adal v0.9.24/go.mod h1:7T1+g0PYFmACYW5LlG2fcoPiPlFHjClyRGL7dRlP5c8= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.13 h1:Ov8avRZi2vmrE2JcXw+tu5K/yB41r7xK9GZDiBF7NdM= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.13/go.mod h1:5BAVfWLWXihP47vYrPuBKKf4cS0bXI+KM9Qx6ETDJYo= +github.com/Azure/go-autorest/autorest/azure/cli v0.4.6 h1:w77/uPk80ZET2F+AfQExZyEWtn+0Rk/uw17m9fv5Ajc= +github.com/Azure/go-autorest/autorest/azure/cli v0.4.6/go.mod h1:piCfgPho7BiIDdEQ1+g4VmKyD5y+p/XtSNqE6Hc4QD0= github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= @@ -118,8 +121,8 @@ github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUM github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/Azure/skewer v0.0.14 h1:0mzUJhspECkajYyynYsOCp//E2PSnYXrgP45bcskqfQ= github.com/Azure/skewer v0.0.14/go.mod h1:6WTecuPyfGtuvS8Mh4JYWuHhO4kcWycGfsUBB+XTFG4= -github.com/AzureAD/microsoft-authentication-library-for-go v1.2.0 h1:hVeq+yCyUi+MsoO/CU95yqCIcdzra5ovzk8Q2BBpV2M= -github.com/AzureAD/microsoft-authentication-library-for-go v1.2.0/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/GoogleCloudPlatform/k8s-cloud-provider v1.18.1-0.20220218231025-f11817397a1b h1:Heo1J/ttaQFgGJSVnCZquy3e5eH5j1nqxBuomztB3P0= @@ -219,7 +222,6 @@ github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZm github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/digitalocean/godo v1.27.0 h1:78iE9oVvTnAEqhMip2UHFvL01b8LJcydbNUpr0cAmN4= github.com/digitalocean/godo v1.27.0/go.mod h1:iJnN9rVu6K5LioLxLimlq0uRI+y/eAQjROUmeU/r0hY= -github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= @@ -252,11 +254,10 @@ github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBF github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= github.com/euank/go-kmsg-parser v2.0.0+incompatible h1:cHD53+PLQuuQyLZeriD1V/esuG4MuU0Pjs5y6iknohY= github.com/euank/go-kmsg-parser v2.0.0+incompatible/go.mod h1:MhmAMZ8V4CYH4ybgdRwPr2TU5ThnS43puaKEMpja1uw= -github.com/evanphx/json-patch v5.7.0+incompatible h1:vgGkfT/9f8zE6tvSCe74nfpAVDQ2tG6yudJd8LBksgI= -github.com/evanphx/json-patch v5.7.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lShz4oaXpDTX2bLe7ls= +github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= github.com/frankban/quicktest v1.14.0 h1:+cqqvzZV87b4adx/5ayVOaYZ2CrvM4ejQvUdBzPPUss= github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og= @@ -301,10 +302,11 @@ github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zV github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= +github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/golang-jwt/jwt/v5 v5.0.0 h1:1n1XNM9hk7O9mnQoNBGolZvzebBQ7p93ULHRc28XJUE= -github.com/golang-jwt/jwt/v5 v5.0.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= +github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= +github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= @@ -399,8 +401,8 @@ github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm4 github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU= -github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.2.3 h1:yk9/cqRKtT9wXZSsRH9aurXEpJX+U6FLtpYTdC3R06k= github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= @@ -499,10 +501,10 @@ github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRW github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/onsi/ginkgo/v2 v2.13.2 h1:Bi2gGVkfn6gQcjNjZJVO8Gf0FHzMPf2phUei9tejVMs= -github.com/onsi/ginkgo/v2 v2.13.2/go.mod h1:XStQ8QcGwLyF4HdfcZB8SFOS/MWCgDuXMSBe6zrvLgM= -github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8= -github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= +github.com/onsi/ginkgo/v2 v2.17.1 h1:V++EzdbhI4ZV4ev0UTIj0PzhzOcReJFyJaLjtSF55M8= +github.com/onsi/ginkgo/v2 v2.17.1/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs= +github.com/onsi/gomega v1.32.0 h1:JRYU78fJ1LPxlckP6Txi/EYqJvjtMrDC04/MM5XRHPk= +github.com/onsi/gomega v1.32.0/go.mod h1:a4x4gW6Pz2yK1MAmvluYme5lvYTn61afQ2ETw/8n4Lg= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= @@ -516,8 +518,8 @@ github.com/opencontainers/runtime-spec v1.0.3-0.20220909204839-494a5a6aca78/go.m github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU= github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= -github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -581,8 +583,9 @@ github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8w github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -593,8 +596,8 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= @@ -691,13 +694,12 @@ golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= -golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= -golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= +golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30= +golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -736,6 +738,7 @@ golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -780,8 +783,9 @@ golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= -golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= +golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -813,8 +817,9 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -864,7 +869,6 @@ golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -876,14 +880,18 @@ golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= -golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= -golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= +golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q= +golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -895,6 +903,7 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -960,8 +969,9 @@ golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.16.1 h1:TLyB3WofjdOEepBHAU20JdNC1Zbg87elYofWYAY5oZA= -golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc= +golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1119,8 +1129,8 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/dnaeon/go-vcr.v3 v3.1.2 h1:F1smfXBqQqwpVifDfUBQG6zzaGjzT+EnVZakrOdr5wA= -gopkg.in/dnaeon/go-vcr.v3 v3.1.2/go.mod h1:2IMOnnlx9I6u9x+YBsM3tAMx6AlOxnJ0pWxQAzZ79Ag= +gopkg.in/dnaeon/go-vcr.v3 v3.2.0 h1:Rltp0Vf+Aq0u4rQXgmXgtgoRDStTnFN83cWgSGSoRzM= +gopkg.in/dnaeon/go-vcr.v3 v3.2.0/go.mod h1:2IMOnnlx9I6u9x+YBsM3tAMx6AlOxnJ0pWxQAzZ79Ag= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/gcfg.v1 v1.2.3 h1:m8OOJ4ccYHnx2f4gQwpno8nAX5OGOh7RLaaz0pj3Ogs= gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= @@ -1207,12 +1217,12 @@ rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 h1:TgtAeesdhpm2SGwkQasmbeqDo8th5wOBA5h/AjTKA4I= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0/go.mod h1:VHVDI/KrK4fjnV61bE2g3sA7tiETLn8sooImelsCx3Y= -sigs.k8s.io/cloud-provider-azure v1.29.0 h1:lHk6AB+3XfURM7bbR+uABKeRcMC1TYreWA6GM5wUT6g= -sigs.k8s.io/cloud-provider-azure v1.29.0/go.mod h1:0WCrYlWxqk3/AptztkqPk1r9Gr3IULSHat7LipAA1sI= -sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.0-20240105075710-c4d4895a970b h1:onCsa2FoC9HGIgW+eQYJI8/IZnefwCcU9rF7ZKtD7f0= -sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.0-20240105075710-c4d4895a970b/go.mod h1:seH99Elt7KgWOOonCmzRcB1yLouqK7B7+l8RoSbqaYE= -sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.0-20231205023417-1ba5a224ab0e h1:WjkP0sFCicdlRoTUwJZ0Nm72fZApS/vnAePc+Y7R364= -sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.0-20231205023417-1ba5a224ab0e/go.mod h1:dDc0Ixf5VI01TkTj83ENW1hH5jImGJsdKhQgFRyQsyA= +sigs.k8s.io/cloud-provider-azure v1.29.4 h1:lW/mqq9fofs52/T+Crs6JNzzEhz0NjzQUtSXMseh67M= +sigs.k8s.io/cloud-provider-azure v1.29.4/go.mod h1:73KgMVXVtMqG/JlhRRezonfirvoO2ldsxC6H+1FKEPg= +sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.13 h1:dxpo41/N6m2R//9fmqKgqYZL2k0rQSE1NvQteIQ9pGA= +sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.13/go.mod h1:tN2BDTM6RDyQsae6JRvaaA14LVxDsRaLU3Ea2MRUBjg= +sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.4 h1:CFMHYo6/OQpLTycJGQIze2pchNeJQ7L2TQC6fDo4JGY= +sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.4/go.mod h1:PvXgFxPcfve6yBiWNIO/fqAMvGVC9W7qN6M2vIj4zmY= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md index 7a0a524e3321..a6675492b1a6 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md @@ -1,5 +1,46 @@ # Release History +## 1.11.1 (2024-04-02) + +### Bugs Fixed + +* Pollers that use the `Location` header won't consider `http.StatusRequestTimeout` a terminal failure. +* `runtime.Poller[T].Result` won't consider non-terminal error responses as terminal. + +## 1.11.0 (2024-04-01) + +### Features Added + +* Added `StatusCodes` to `arm/policy.RegistrationOptions` to allow supporting non-standard HTTP status codes during registration. +* Added field `InsecureAllowCredentialWithHTTP` to `azcore.ClientOptions` and dependent authentication pipeline policies. +* Added type `MultipartContent` to the `streaming` package to support multipart/form payloads with custom Content-Type and file name. + +### Bugs Fixed + +* `runtime.SetMultipartFormData` won't try to stringify `[]byte` values. +* Pollers that use the `Location` header won't consider `http.StatusTooManyRequests` a terminal failure. + +### Other Changes + +* Update dependencies. + +## 1.10.0 (2024-02-29) + +### Features Added + +* Added logging event `log.EventResponseError` that will contain the contents of `ResponseError.Error()` whenever an `azcore.ResponseError` is created. +* Added `runtime.NewResponseErrorWithErrorCode` for creating an `azcore.ResponseError` with a caller-supplied error code. +* Added type `MatchConditions` for use in conditional requests. + +### Bugs Fixed + +* Fixed a potential race condition between `NullValue` and `IsNullValue`. +* `runtime.EncodeQueryParams` will escape semicolons before calling `url.ParseQuery`. + +### Other Changes + +* Update dependencies. + ## 1.9.2 (2024-02-06) ### Bugs Fixed diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/policy/policy.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/policy/policy.go index 83cf91e3ecb5..f18caf848935 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/policy/policy.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/policy/policy.go @@ -20,6 +20,11 @@ type BearerTokenOptions struct { // policy's credential must support multitenant authentication. AuxiliaryTenants []string + // InsecureAllowCredentialWithHTTP enables authenticated requests over HTTP. + // By default, authenticated requests to an HTTP endpoint are rejected by the client. + // WARNING: setting this to true will allow sending the authentication key in clear text. Use with caution. + InsecureAllowCredentialWithHTTP bool + // Scopes contains the list of permission scopes required for the token. Scopes []string } @@ -44,6 +49,11 @@ type RegistrationOptions struct { // The default valule is 5 minutes. // NOTE: Setting this to a small value might cause the policy to prematurely fail. PollingDuration time.Duration + + // StatusCodes contains the slice of custom HTTP status codes to use instead + // of the default http.StatusConflict. This should only be set if a service + // returns a non-standard HTTP status code when unregistered. + StatusCodes []int } // ClientOptions contains configuration settings for a client's pipeline. diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/pipeline.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/pipeline.go index 302c19cd4265..039b758bf988 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/pipeline.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/pipeline.go @@ -30,8 +30,9 @@ func NewPipeline(module, version string, cred azcore.TokenCredential, plOpts azr return azruntime.Pipeline{}, err } authPolicy := NewBearerTokenPolicy(cred, &armpolicy.BearerTokenOptions{ - AuxiliaryTenants: options.AuxiliaryTenants, - Scopes: []string{conf.Audience + "/.default"}, + AuxiliaryTenants: options.AuxiliaryTenants, + InsecureAllowCredentialWithHTTP: options.InsecureAllowCredentialWithHTTP, + Scopes: []string{conf.Audience + "/.default"}, }) perRetry := make([]azpolicy.Policy, len(plOpts.PerRetry), len(plOpts.PerRetry)+1) copy(perRetry, plOpts.PerRetry) diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_bearer_token.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_bearer_token.go index 54b3bb78d859..765fbc6843da 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_bearer_token.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_bearer_token.go @@ -64,6 +64,7 @@ func NewBearerTokenPolicy(cred azcore.TokenCredential, opts *armpolicy.BearerTok p.scopes = make([]string, len(opts.Scopes)) copy(p.scopes, opts.Scopes) p.btp = azruntime.NewBearerTokenPolicy(cred, opts.Scopes, &azpolicy.BearerTokenOptions{ + InsecureAllowCredentialWithHTTP: opts.InsecureAllowCredentialWithHTTP, AuthorizationHandler: azpolicy.AuthorizationHandler{ OnChallenge: p.onChallenge, OnRequest: p.onRequest, diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_register_rp.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_register_rp.go index 83e15949aa36..810ac9d9fabf 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_register_rp.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_register_rp.go @@ -8,7 +8,6 @@ package runtime import ( "context" - "errors" "fmt" "net/http" "net/url" @@ -16,6 +15,7 @@ import ( "time" "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource" armpolicy "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/policy" "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" @@ -45,6 +45,9 @@ func setDefaults(r *armpolicy.RegistrationOptions) { if r.PollingDuration == 0 { r.PollingDuration = 5 * time.Minute } + if len(r.StatusCodes) == 0 { + r.StatusCodes = []int{http.StatusConflict} + } } // NewRPRegistrationPolicy creates a policy object configured using the specified options. @@ -88,7 +91,7 @@ func (r *rpRegistrationPolicy) Do(req *azpolicy.Request) (*http.Response, error) // make the original request resp, err = req.Next() // getting a 409 is the first indication that the RP might need to be registered, check error response - if err != nil || resp.StatusCode != http.StatusConflict { + if err != nil || !runtime.HasStatusCode(resp, r.options.StatusCodes...) { return resp, err } var reqErr requestError @@ -105,17 +108,12 @@ func (r *rpRegistrationPolicy) Do(req *azpolicy.Request) (*http.Response, error) // to the caller so its error unmarshalling will kick in return resp, err } - // RP needs to be registered. start by getting the subscription ID from the original request - subID, err := getSubscription(req.Raw().URL.Path) - if err != nil { - return resp, err - } - // now get the RP from the error - rp, err = getProvider(reqErr) + res, err := resource.ParseResourceID(req.Raw().URL.Path) if err != nil { return resp, err } - logRegistrationExit := func(v interface{}) { + rp = res.ResourceType.Namespace + logRegistrationExit := func(v any) { log.Writef(LogRPRegistration, "END registration for %s: %v", rp, v) } log.Writef(LogRPRegistration, "BEGIN registration for %s", rp) @@ -124,7 +122,7 @@ func (r *rpRegistrationPolicy) Do(req *azpolicy.Request) (*http.Response, error) rpOps := &providersOperations{ p: r.pipeline, u: r.endpoint, - subID: subID, + subID: res.SubscriptionID, } if _, err = rpOps.Register(&shared.ContextWithDeniedValues{Context: req.Raw().Context()}, rp); err != nil { logRegistrationExit(err) @@ -189,36 +187,13 @@ func isUnregisteredRPCode(errorCode string) bool { return false } -func getSubscription(path string) (string, error) { - parts := strings.Split(path, "/") - for i, v := range parts { - if v == "subscriptions" && (i+1) < len(parts) { - return parts[i+1], nil - } - } - return "", fmt.Errorf("failed to obtain subscription ID from %s", path) -} - -func getProvider(re requestError) (string, error) { - if len(re.ServiceError.Details) > 0 { - return re.ServiceError.Details[0].Target, nil - } - return "", errors.New("unexpected empty Details") -} - // minimal error definitions to simplify detection type requestError struct { ServiceError *serviceError `json:"error"` } type serviceError struct { - Code string `json:"code"` - Details []serviceErrorDetails `json:"details"` -} - -type serviceErrorDetails struct { - Code string `json:"code"` - Target string `json:"target"` + Code string `json:"code"` } /////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/ci.yml b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/ci.yml index aab9218538da..99348527b541 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/ci.yml +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/ci.yml @@ -23,7 +23,7 @@ pr: - sdk/azcore/ - eng/ -stages: -- template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml +extends: + template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml parameters: ServiceDirectory: azcore diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/core.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/core.go index 8eef8633a7e8..9d1c2f0c0537 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/core.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/core.go @@ -8,6 +8,7 @@ package azcore import ( "reflect" + "sync" "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" @@ -41,13 +42,28 @@ func NewSASCredential(sas string) *SASCredential { } // holds sentinel values used to send nulls -var nullables map[reflect.Type]interface{} = map[reflect.Type]interface{}{} +var nullables map[reflect.Type]any = map[reflect.Type]any{} +var nullablesMu sync.RWMutex // NullValue is used to send an explicit 'null' within a request. // This is typically used in JSON-MERGE-PATCH operations to delete a value. func NullValue[T any]() T { t := shared.TypeOfT[T]() + + nullablesMu.RLock() v, found := nullables[t] + nullablesMu.RUnlock() + + if found { + // return the sentinel object + return v.(T) + } + + // promote to exclusive lock and check again (double-checked locking pattern) + nullablesMu.Lock() + defer nullablesMu.Unlock() + v, found = nullables[t] + if !found { var o reflect.Value if k := t.Kind(); k == reflect.Map { @@ -72,6 +88,9 @@ func NullValue[T any]() T { func IsNullValue[T any](v T) bool { // see if our map has a sentinel object for this *T t := reflect.TypeOf(v) + nullablesMu.RLock() + defer nullablesMu.RUnlock() + if o, found := nullables[t]; found { o1 := reflect.ValueOf(o) v1 := reflect.ValueOf(v) diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/etag.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/etag.go index 23ea7e7c8eac..2b19d01f76ec 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/etag.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/etag.go @@ -46,3 +46,12 @@ func (e ETag) WeakEquals(other ETag) bool { func (e ETag) IsWeak() bool { return len(e) >= 4 && strings.HasPrefix(string(e), "W/\"") && strings.HasSuffix(string(e), "\"") } + +// MatchConditions specifies HTTP options for conditional requests. +type MatchConditions struct { + // Optionally limit requests to resources that have a matching ETag. + IfMatch *ETag + + // Optionally limit requests to resources that do not match the ETag. + IfNoneMatch *ETag +} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go index 8d1ae213c958..3041984d9b1f 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go @@ -51,15 +51,15 @@ type Request struct { values opValues } -type opValues map[reflect.Type]interface{} +type opValues map[reflect.Type]any // Set adds/changes a value -func (ov opValues) set(value interface{}) { +func (ov opValues) set(value any) { ov[reflect.TypeOf(value)] = value } // Get looks for a value set by SetValue first -func (ov opValues) get(value interface{}) bool { +func (ov opValues) get(value any) bool { v, ok := ov[reflect.ValueOf(value).Elem().Type()] if ok { reflect.ValueOf(value).Elem().Set(reflect.ValueOf(v)) @@ -108,7 +108,7 @@ func (req *Request) Next() (*http.Response, error) { } // SetOperationValue adds/changes a mutable key/value associated with a single operation. -func (req *Request) SetOperationValue(value interface{}) { +func (req *Request) SetOperationValue(value any) { if req.values == nil { req.values = opValues{} } @@ -116,7 +116,7 @@ func (req *Request) SetOperationValue(value interface{}) { } // OperationValue looks for a value set by SetOperationValue(). -func (req *Request) OperationValue(value interface{}) bool { +func (req *Request) OperationValue(value any) bool { if req.values == nil { return false } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go index f243552885d1..08a95458730b 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go @@ -13,6 +13,7 @@ import ( "net/http" "regexp" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" "github.com/Azure/azure-sdk-for-go/sdk/internal/exported" ) @@ -20,36 +21,45 @@ import ( // NewResponseError creates a new *ResponseError from the provided HTTP response. // Exported as runtime.NewResponseError(). func NewResponseError(resp *http.Response) error { - respErr := &ResponseError{ - StatusCode: resp.StatusCode, - RawResponse: resp, - } - // prefer the error code in the response header if ec := resp.Header.Get(shared.HeaderXMSErrorCode); ec != "" { - respErr.ErrorCode = ec - return respErr + return NewResponseErrorWithErrorCode(resp, ec) } // if we didn't get x-ms-error-code, check in the response body body, err := exported.Payload(resp, nil) if err != nil { + // since we're not returning the ResponseError in this + // case we also don't want to write it to the log. return err } + var errorCode string if len(body) > 0 { - if code := extractErrorCodeJSON(body); code != "" { - respErr.ErrorCode = code - } else if code := extractErrorCodeXML(body); code != "" { - respErr.ErrorCode = code + if fromJSON := extractErrorCodeJSON(body); fromJSON != "" { + errorCode = fromJSON + } else if fromXML := extractErrorCodeXML(body); fromXML != "" { + errorCode = fromXML } } + return NewResponseErrorWithErrorCode(resp, errorCode) +} + +// NewResponseErrorWithErrorCode creates an *azcore.ResponseError from the provided HTTP response and errorCode. +// Exported as runtime.NewResponseErrorWithErrorCode(). +func NewResponseErrorWithErrorCode(resp *http.Response, errorCode string) error { + respErr := &ResponseError{ + ErrorCode: errorCode, + StatusCode: resp.StatusCode, + RawResponse: resp, + } + log.Write(log.EventResponseError, respErr.Error()) return respErr } func extractErrorCodeJSON(body []byte) string { - var rawObj map[string]interface{} + var rawObj map[string]any if err := json.Unmarshal(body, &rawObj); err != nil { // not a JSON object return "" @@ -58,7 +68,7 @@ func extractErrorCodeJSON(body []byte) string { // check if this is a wrapped error, i.e. { "error": { ... } } // if so then unwrap it if wrapped, ok := rawObj["error"]; ok { - unwrapped, ok := wrapped.(map[string]interface{}) + unwrapped, ok := wrapped.(map[string]any) if !ok { return "" } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log/log.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log/log.go index 0684cb317390..6fc6d1400e7f 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log/log.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log/log.go @@ -15,24 +15,36 @@ import ( type Event = log.Event const ( - EventRequest = azlog.EventRequest - EventResponse = azlog.EventResponse - EventRetryPolicy = azlog.EventRetryPolicy - EventLRO = azlog.EventLRO + EventRequest = azlog.EventRequest + EventResponse = azlog.EventResponse + EventResponseError = azlog.EventResponseError + EventRetryPolicy = azlog.EventRetryPolicy + EventLRO = azlog.EventLRO ) +// Write invokes the underlying listener with the specified event and message. +// If the event shouldn't be logged or there is no listener then Write does nothing. func Write(cls log.Event, msg string) { log.Write(cls, msg) } -func Writef(cls log.Event, format string, a ...interface{}) { +// Writef invokes the underlying listener with the specified event and formatted message. +// If the event shouldn't be logged or there is no listener then Writef does nothing. +func Writef(cls log.Event, format string, a ...any) { log.Writef(cls, format, a...) } +// SetListener will set the Logger to write to the specified listener. func SetListener(lst func(Event, string)) { log.SetListener(lst) } +// Should returns true if the specified log event should be written to the log. +// By default all log events will be logged. Call SetEvents() to limit +// the log events for logging. +// If no listener has been set this will return false. +// Calling this method is useful when the message to log is computationally expensive +// and you want to avoid the overhead if its log event is not enabled. func Should(cls log.Event) bool { return log.Should(cls) } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go index b05bd8b38d2b..ccd4794e9e9b 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go @@ -27,7 +27,7 @@ func Applicable(resp *http.Response) bool { } // CanResume returns true if the token can rehydrate this poller type. -func CanResume(token map[string]interface{}) bool { +func CanResume(token map[string]any) bool { _, ok := token["asyncURL"] return ok } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go index 2bb9e105b666..0d781b31d0c7 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go @@ -29,7 +29,7 @@ func Applicable(resp *http.Response) bool { } // CanResume returns true if the token can rehydrate this poller type. -func CanResume(token map[string]interface{}) bool { +func CanResume(token map[string]any) bool { t, ok := token["type"] if !ok { return false diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/fake/fake.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/fake/fake.go index 25983471867b..51aede8a2b8f 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/fake/fake.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/fake/fake.go @@ -26,7 +26,7 @@ func Applicable(resp *http.Response) bool { } // CanResume returns true if the token can rehydrate this poller type. -func CanResume(token map[string]interface{}) bool { +func CanResume(token map[string]any) bool { _, ok := token["fakeURL"] return ok } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go index d6be89876aba..7a56c5211b71 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go @@ -28,7 +28,7 @@ func Applicable(resp *http.Response) bool { } // CanResume returns true if the token can rehydrate this poller type. -func CanResume(token map[string]interface{}) bool { +func CanResume(token map[string]any) bool { t, ok := token["type"] if !ok { return false @@ -103,6 +103,10 @@ func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) { } else if resp.StatusCode > 199 && resp.StatusCode < 300 { // any 2xx other than a 202 indicates success p.CurState = poller.StatusSucceeded + } else if pollers.IsNonTerminalHTTPStatusCode(resp) { + // the request timed out or is being throttled. + // DO NOT include this as a terminal failure. preserve + // the existing state and return the response. } else { p.CurState = poller.StatusFailed } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go index 1bc7ad0acedb..ac1c0efb5acf 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go @@ -25,7 +25,7 @@ func Applicable(resp *http.Response) bool { } // CanResume returns true if the token can rehydrate this poller type. -func CanResume(token map[string]interface{}) bool { +func CanResume(token map[string]any) bool { _, ok := token["oplocURL"] return ok } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go index d8d86a46c2de..eb3cf651db03 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go @@ -74,7 +74,7 @@ func ExtractToken(token string) ([]byte, error) { // IsTokenValid returns an error if the specified token isn't applicable for generic type T. func IsTokenValid[T any](token string) error { - raw := map[string]interface{}{} + raw := map[string]any{} if err := json.Unmarshal([]byte(token), &raw); err != nil { return err } @@ -185,3 +185,16 @@ func ResultHelper[T any](resp *http.Response, failed bool, out *T) error { } return nil } + +// IsNonTerminalHTTPStatusCode returns true if the HTTP status code should be +// considered non-terminal thus eligible for retry. +func IsNonTerminalHTTPStatusCode(resp *http.Response) bool { + return exported.HasStatusCode(resp, + http.StatusRequestTimeout, // 408 + http.StatusTooManyRequests, // 429 + http.StatusInternalServerError, // 500 + http.StatusBadGateway, // 502 + http.StatusServiceUnavailable, // 503 + http.StatusGatewayTimeout, // 504 + ) +} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go index 8f749f48d9bf..03691cbf024c 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go @@ -40,5 +40,5 @@ const ( Module = "azcore" // Version is the semantic version (see http://semver.org) of this module. - Version = "v1.9.2" + Version = "v1.11.1" ) diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/log.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/log.go index 7bde29d0a462..f260dac3637c 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/log.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/log.go @@ -23,6 +23,11 @@ const ( // This includes information like the HTTP status code, headers, and request URL. EventResponse Event = "Response" + // EventResponseError entries contain information about HTTP responses that returned + // an *azcore.ResponseError (i.e. responses with a non 2xx HTTP status code). + // This includes the contents of ResponseError.Error(). + EventResponseError Event = "ResponseError" + // EventRetryPolicy entries contain information specific to the retry policy in use. EventRetryPolicy Event = "Retry" diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go index d934f1dc5fa3..8d984535887e 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go @@ -39,6 +39,11 @@ type ClientOptions struct { // Cloud specifies a cloud for the client. The default is Azure Public Cloud. Cloud cloud.Configuration + // InsecureAllowCredentialWithHTTP enables authenticated requests over HTTP. + // By default, authenticated requests to an HTTP endpoint are rejected by the client. + // WARNING: setting this to true will allow sending the credential in clear text. Use with caution. + InsecureAllowCredentialWithHTTP bool + // Logging configures the built-in logging policy. Logging LogOptions @@ -147,6 +152,11 @@ type BearerTokenOptions struct { // When this field isn't set, the policy follows its default behavior of authorizing every request with a bearer token from // its given credential. AuthorizationHandler AuthorizationHandler + + // InsecureAllowCredentialWithHTTP enables authenticated requests over HTTP. + // By default, authenticated requests to an HTTP endpoint are rejected by the client. + // WARNING: setting this to true will allow sending the bearer token in clear text. Use with caution. + InsecureAllowCredentialWithHTTP bool } // AuthorizationHandler allows SDK developers to insert custom logic that runs when BearerTokenPolicy must authorize a request. diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/errors.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/errors.go index 6d03b291ebff..c0d56158e229 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/errors.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/errors.go @@ -14,6 +14,14 @@ import ( // NewResponseError creates an *azcore.ResponseError from the provided HTTP response. // Call this when a service request returns a non-successful status code. +// The error code will be extracted from the *http.Response, either from the x-ms-error-code +// header (preferred) or attempted to be parsed from the response body. func NewResponseError(resp *http.Response) error { return exported.NewResponseError(resp) } + +// NewResponseErrorWithErrorCode creates an *azcore.ResponseError from the provided HTTP response and errorCode. +// Use this variant when the error code is in a non-standard location. +func NewResponseErrorWithErrorCode(resp *http.Response, errorCode string) error { + return exported.NewResponseErrorWithErrorCode(resp, errorCode) +} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go index f0f280355955..cb2a6952805d 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go @@ -24,6 +24,7 @@ type BearerTokenPolicy struct { authzHandler policy.AuthorizationHandler cred exported.TokenCredential scopes []string + allowHTTP bool } type acquiringResourceState struct { @@ -55,6 +56,7 @@ func NewBearerTokenPolicy(cred exported.TokenCredential, scopes []string, opts * cred: cred, scopes: scopes, mainResource: temporal.NewResource(acquire), + allowHTTP: opts.InsecureAllowCredentialWithHTTP, } } @@ -80,7 +82,7 @@ func (b *BearerTokenPolicy) Do(req *policy.Request) (*http.Response, error) { return req.Next() } - if err := checkHTTPSForAuth(req); err != nil { + if err := checkHTTPSForAuth(req, b.allowHTTP); err != nil { return nil, err } @@ -113,8 +115,8 @@ func (b *BearerTokenPolicy) Do(req *policy.Request) (*http.Response, error) { return res, err } -func checkHTTPSForAuth(req *policy.Request) error { - if strings.ToLower(req.Raw().URL.Scheme) != "https" { +func checkHTTPSForAuth(req *policy.Request, allowHTTP bool) error { + if strings.ToLower(req.Raw().URL.Scheme) != "https" && !allowHTTP { return errorinfo.NonRetriableError(errors.New("authenticated requests are not permitted for non TLS protected (https) endpoints")) } return nil diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_key_credential.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_key_credential.go index 6f577fa7a9e4..eeb1c09cc122 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_key_credential.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_key_credential.go @@ -12,13 +12,19 @@ import ( // KeyCredentialPolicy authorizes requests with a [azcore.KeyCredential]. type KeyCredentialPolicy struct { - cred *exported.KeyCredential - header string - prefix string + cred *exported.KeyCredential + header string + prefix string + allowHTTP bool } // KeyCredentialPolicyOptions contains the optional values configuring [KeyCredentialPolicy]. type KeyCredentialPolicyOptions struct { + // InsecureAllowCredentialWithHTTP enables authenticated requests over HTTP. + // By default, authenticated requests to an HTTP endpoint are rejected by the client. + // WARNING: setting this to true will allow sending the authentication key in clear text. Use with caution. + InsecureAllowCredentialWithHTTP bool + // Prefix is used if the key requires a prefix before it's inserted into the HTTP request. Prefix string } @@ -32,9 +38,10 @@ func NewKeyCredentialPolicy(cred *exported.KeyCredential, header string, options options = &KeyCredentialPolicyOptions{} } return &KeyCredentialPolicy{ - cred: cred, - header: header, - prefix: options.Prefix, + cred: cred, + header: header, + prefix: options.Prefix, + allowHTTP: options.InsecureAllowCredentialWithHTTP, } } @@ -44,7 +51,7 @@ func (k *KeyCredentialPolicy) Do(req *policy.Request) (*http.Response, error) { // this prevents a panic that might be hard to diagnose and allows testing // against http endpoints that don't require authentication. if k.cred != nil { - if err := checkHTTPSForAuth(req); err != nil { + if err := checkHTTPSForAuth(req, k.allowHTTP); err != nil { return nil, err } val := exported.KeyCredentialGet(k.cred) diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_sas_credential.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_sas_credential.go index ebe2b7772ba7..3964beea8623 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_sas_credential.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_sas_credential.go @@ -12,13 +12,17 @@ import ( // SASCredentialPolicy authorizes requests with a [azcore.SASCredential]. type SASCredentialPolicy struct { - cred *exported.SASCredential - header string + cred *exported.SASCredential + header string + allowHTTP bool } // SASCredentialPolicyOptions contains the optional values configuring [SASCredentialPolicy]. type SASCredentialPolicyOptions struct { - // placeholder for future optional values + // InsecureAllowCredentialWithHTTP enables authenticated requests over HTTP. + // By default, authenticated requests to an HTTP endpoint are rejected by the client. + // WARNING: setting this to true will allow sending the authentication key in clear text. Use with caution. + InsecureAllowCredentialWithHTTP bool } // NewSASCredentialPolicy creates a new instance of [SASCredentialPolicy]. @@ -26,9 +30,13 @@ type SASCredentialPolicyOptions struct { // - header is the name of the HTTP request header in which the shared access signature is placed // - options contains optional configuration, pass nil to accept the default values func NewSASCredentialPolicy(cred *exported.SASCredential, header string, options *SASCredentialPolicyOptions) *SASCredentialPolicy { + if options == nil { + options = &SASCredentialPolicyOptions{} + } return &SASCredentialPolicy{ - cred: cred, - header: header, + cred: cred, + header: header, + allowHTTP: options.InsecureAllowCredentialWithHTTP, } } @@ -38,7 +46,7 @@ func (k *SASCredentialPolicy) Do(req *policy.Request) (*http.Response, error) { // this prevents a panic that might be hard to diagnose and allows testing // against http endpoints that don't require authentication. if k.cred != nil { - if err := checkHTTPSForAuth(req); err != nil { + if err := checkHTTPSForAuth(req, k.allowHTTP); err != nil { return nil, err } req.Raw().Header.Add(k.header, exported.SASCredentialGet(k.cred)) diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go index c373f68962e3..03f76c9aa8ef 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go @@ -154,7 +154,7 @@ func NewPollerFromResumeToken[T any](token string, pl exported.Pipeline, options if err != nil { return nil, err } - var asJSON map[string]interface{} + var asJSON map[string]any if err := json.Unmarshal(raw, &asJSON); err != nil { return nil, err } @@ -240,7 +240,7 @@ func (p *Poller[T]) PollUntilDone(ctx context.Context, options *PollUntilDoneOpt } start := time.Now() - logPollUntilDoneExit := func(v interface{}) { + logPollUntilDoneExit := func(v any) { log.Writef(log.EventLRO, "END PollUntilDone() for %T: %v, total time: %s", p.op, v, time.Since(start)) } log.Writef(log.EventLRO, "BEGIN PollUntilDone() for %T", p.op) @@ -334,6 +334,11 @@ func (p *Poller[T]) Result(ctx context.Context) (res T, err error) { err = p.op.Result(ctx, p.result) var respErr *exported.ResponseError if errors.As(err, &respErr) { + if pollers.IsNonTerminalHTTPStatusCode(respErr.RawResponse) { + // the request failed in a non-terminal way. + // don't cache the error or mark the Poller as done + return + } // the LRO failed. record the error p.err = err } else if err != nil { diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go index 5d1569c8dd2b..06ac95b1b718 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go @@ -11,9 +11,11 @@ import ( "context" "encoding/json" "encoding/xml" + "errors" "fmt" "io" "mime/multipart" + "net/textproto" "net/url" "path" "strings" @@ -21,6 +23,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" ) // Base64Encoding is usesd to specify which base-64 encoder/decoder to use when @@ -42,12 +45,19 @@ func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*polic } // EncodeQueryParams will parse and encode any query parameters in the specified URL. +// Any semicolons will automatically be escaped. func EncodeQueryParams(u string) (string, error) { before, after, found := strings.Cut(u, "?") if !found { return u, nil } - qp, err := url.ParseQuery(after) + // starting in Go 1.17, url.ParseQuery will reject semicolons in query params. + // so, we must escape them first. note that this assumes that semicolons aren't + // being used as query param separators which is per the current RFC. + // for more info: + // https://github.com/golang/go/issues/25192 + // https://github.com/golang/go/issues/50034 + qp, err := url.ParseQuery(strings.ReplaceAll(after, ";", "%3B")) if err != nil { return "", err } @@ -102,7 +112,7 @@ func MarshalAsByteArray(req *policy.Request, v []byte, format Base64Encoding) er } // MarshalAsJSON calls json.Marshal() to get the JSON encoding of v then calls SetBody. -func MarshalAsJSON(req *policy.Request, v interface{}) error { +func MarshalAsJSON(req *policy.Request, v any) error { b, err := json.Marshal(v) if err != nil { return fmt.Errorf("error marshalling type %T: %s", v, err) @@ -112,7 +122,7 @@ func MarshalAsJSON(req *policy.Request, v interface{}) error { } // MarshalAsXML calls xml.Marshal() to get the XML encoding of v then calls SetBody. -func MarshalAsXML(req *policy.Request, v interface{}) error { +func MarshalAsXML(req *policy.Request, v any) error { b, err := xml.Marshal(v) if err != nil { return fmt.Errorf("error marshalling type %T: %s", v, err) @@ -122,10 +132,10 @@ func MarshalAsXML(req *policy.Request, v interface{}) error { return req.SetBody(exported.NopCloser(bytes.NewReader(b)), shared.ContentTypeAppXML) } -// SetMultipartFormData writes the specified keys/values as multi-part form -// fields with the specified value. File content must be specified as a ReadSeekCloser. -// All other values are treated as string values. -func SetMultipartFormData(req *policy.Request, formData map[string]interface{}) error { +// SetMultipartFormData writes the specified keys/values as multi-part form fields with the specified value. +// File content must be specified as an [io.ReadSeekCloser] or [streaming.MultipartContent]. +// Byte slices will be treated as JSON. All other values are treated as string values. +func SetMultipartFormData(req *policy.Request, formData map[string]any) error { body := bytes.Buffer{} writer := multipart.NewWriter(&body) @@ -141,6 +151,60 @@ func SetMultipartFormData(req *policy.Request, formData map[string]interface{}) return nil } + quoteEscaper := strings.NewReplacer("\\", "\\\\", `"`, "\\\"") + + writeMultipartContent := func(fieldname string, mpc streaming.MultipartContent) error { + if mpc.Body == nil { + return errors.New("streaming.MultipartContent.Body cannot be nil") + } + + // use fieldname for the file name when unspecified + filename := fieldname + + if mpc.ContentType == "" && mpc.Filename == "" { + return writeContent(fieldname, filename, mpc.Body) + } + if mpc.Filename != "" { + filename = mpc.Filename + } + // this is pretty much copied from multipart.Writer.CreateFormFile + // but lets us set the caller provided Content-Type and filename + h := make(textproto.MIMEHeader) + h.Set("Content-Disposition", + fmt.Sprintf(`form-data; name="%s"; filename="%s"`, + quoteEscaper.Replace(fieldname), quoteEscaper.Replace(filename))) + contentType := "application/octet-stream" + if mpc.ContentType != "" { + contentType = mpc.ContentType + } + h.Set("Content-Type", contentType) + fd, err := writer.CreatePart(h) + if err != nil { + return err + } + // copy the data to the form file + if _, err = io.Copy(fd, mpc.Body); err != nil { + return err + } + return nil + } + + // the same as multipart.Writer.WriteField but lets us specify the Content-Type + writeField := func(fieldname, contentType string, value string) error { + h := make(textproto.MIMEHeader) + h.Set("Content-Disposition", + fmt.Sprintf(`form-data; name="%s"`, quoteEscaper.Replace(fieldname))) + h.Set("Content-Type", contentType) + fd, err := writer.CreatePart(h) + if err != nil { + return err + } + if _, err = fd.Write([]byte(value)); err != nil { + return err + } + return nil + } + for k, v := range formData { if rsc, ok := v.(io.ReadSeekCloser); ok { if err := writeContent(k, k, rsc); err != nil { @@ -154,13 +218,35 @@ func SetMultipartFormData(req *policy.Request, formData map[string]interface{}) } } continue + } else if mpc, ok := v.(streaming.MultipartContent); ok { + if err := writeMultipartContent(k, mpc); err != nil { + return err + } + continue + } else if mpcs, ok := v.([]streaming.MultipartContent); ok { + for _, mpc := range mpcs { + if err := writeMultipartContent(k, mpc); err != nil { + return err + } + } + continue } - // ensure the value is in string format - s, ok := v.(string) - if !ok { - s = fmt.Sprintf("%v", v) + + var content string + contentType := shared.ContentTypeTextPlain + switch tt := v.(type) { + case []byte: + // JSON, don't quote it + content = string(tt) + contentType = shared.ContentTypeAppJSON + case string: + content = tt + default: + // ensure the value is in string format + content = fmt.Sprintf("%v", v) } - if err := writer.WriteField(k, s); err != nil { + + if err := writeField(k, contentType, content); err != nil { return err } } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/response.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/response.go index 003c875b1f56..048566e02c06 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/response.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/response.go @@ -40,7 +40,7 @@ func UnmarshalAsByteArray(resp *http.Response, v *[]byte, format Base64Encoding) } // UnmarshalAsJSON calls json.Unmarshal() to unmarshal the received payload into the value pointed to by v. -func UnmarshalAsJSON(resp *http.Response, v interface{}) error { +func UnmarshalAsJSON(resp *http.Response, v any) error { payload, err := Payload(resp) if err != nil { return err @@ -61,7 +61,7 @@ func UnmarshalAsJSON(resp *http.Response, v interface{}) error { } // UnmarshalAsXML calls xml.Unmarshal() to unmarshal the received payload into the value pointed to by v. -func UnmarshalAsXML(resp *http.Response, v interface{}) error { +func UnmarshalAsXML(resp *http.Response, v any) error { payload, err := Payload(resp) if err != nil { return err diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/progress.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/progress.go index fbcd48311b85..2468540bd757 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/progress.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/progress.go @@ -73,3 +73,17 @@ func (p *progress) Seek(offset int64, whence int) (int64, error) { func (p *progress) Close() error { return p.rc.Close() } + +// MultipartContent contains streaming content used in multipart/form payloads. +type MultipartContent struct { + // Body contains the required content body. + Body io.ReadSeekCloser + + // ContentType optionally specifies the HTTP Content-Type for this Body. + // The default value is application/octet-stream. + ContentType string + + // Filename optionally specifies the filename for this Body. + // The default value is the field name for the multipart/form section. + Filename string +} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md index 7ea119ab30dd..f6749c030590 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md @@ -1,5 +1,47 @@ # Release History +## 1.5.2 (2024-04-09) + +### Bugs Fixed +* `ManagedIdentityCredential` now specifies resource IDs correctly for Azure Container Instances + +### Other Changes +* Restored v1.4.0 error behavior for empty tenant IDs +* Upgraded dependencies + +## 1.5.1 (2024-01-17) + +### Bugs Fixed +* `InteractiveBrowserCredential` handles `AdditionallyAllowedTenants` correctly + +## 1.5.0 (2024-01-16) + +### Breaking Changes +> These changes affect only code written against a beta version such as v1.5.0-beta.1 +* Removed persistent token caching. It will return in v1.6.0-beta.1 + +### Bugs Fixed +* Credentials now preserve MSAL headers e.g. X-Client-Sku + +### Other Changes +* Upgraded dependencies + +## 1.5.0-beta.2 (2023-11-07) + +### Features Added +* `DefaultAzureCredential` and `ManagedIdentityCredential` support Azure ML managed identity +* Added spans for distributed tracing. + +## 1.5.0-beta.1 (2023-10-10) + +### Features Added +* Optional persistent token caching for most credentials. Set `TokenCachePersistenceOptions` + on a credential's options to enable and configure this. See the package documentation for + this version and [TOKEN_CACHING.md](https://aka.ms/azsdk/go/identity/caching) for more + details. +* `AzureDeveloperCLICredential` authenticates with the Azure Developer CLI (`azd`). This + credential is also part of the `DefaultAzureCredential` authentication flow. + ## 1.4.0 (2023-10-10) ### Bugs Fixed @@ -94,14 +136,14 @@ ### Features Added * By default, credentials set client capability "CP1" to enable support for [Continuous Access Evaluation (CAE)](https://docs.microsoft.com/azure/active-directory/develop/app-resilience-continuous-access-evaluation). - This indicates to Azure Active Directory that your application can handle CAE claims challenges. + This indicates to Microsoft Entra ID that your application can handle CAE claims challenges. You can disable this behavior by setting the environment variable "AZURE_IDENTITY_DISABLE_CP1" to "true". * `InteractiveBrowserCredentialOptions.LoginHint` enables pre-populating the login prompt with a username ([#15599](https://github.com/Azure/azure-sdk-for-go/pull/15599)) * Service principal and user credentials support ADFS authentication on Azure Stack. Specify "adfs" as the credential's tenant. * Applications running in private or disconnected clouds can prevent credentials from - requesting Azure AD instance metadata by setting the `DisableInstanceDiscovery` + requesting Microsoft Entra instance metadata by setting the `DisableInstanceDiscovery` field on credential options. * Many credentials can now be configured to authenticate in multiple tenants. The options types for these credentials have an `AdditionallyAllowedTenants` field @@ -454,4 +496,4 @@ ## 0.1.0 (2020-07-23) ### Features Added -* Initial Release. Azure Identity library that provides Azure Active Directory token authentication support for the SDK. +* Initial Release. Azure Identity library that provides Microsoft Entra token authentication support for the SDK. diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/MIGRATION.md b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/MIGRATION.md index 4ac53eb7b276..1a649202303c 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/MIGRATION.md +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/MIGRATION.md @@ -1,6 +1,6 @@ # Migrating from autorest/adal to azidentity -`azidentity` provides Azure Active Directory (Azure AD) authentication for the newest Azure SDK modules (`github.com/azure-sdk-for-go/sdk/...`). Older Azure SDK packages (`github.com/azure-sdk-for-go/services/...`) use types from `github.com/go-autorest/autorest/adal` instead. +`azidentity` provides Microsoft Entra ID ([formerly Azure Active Directory](https://learn.microsoft.com/azure/active-directory/fundamentals/new-name)) authentication for the newest Azure SDK modules (`github.com/azure-sdk-for-go/sdk/...`). Older Azure SDK packages (`github.com/azure-sdk-for-go/services/...`) use types from `github.com/go-autorest/autorest/adal` instead. This guide shows common authentication code using `autorest/adal` and its equivalent using `azidentity`. @@ -18,7 +18,7 @@ This guide shows common authentication code using `autorest/adal` and its equiva ### `autorest/adal` -Token providers require a token audience (resource identifier) and an instance of `adal.OAuthConfig`, which requires an Azure AD endpoint and tenant: +Token providers require a token audience (resource identifier) and an instance of `adal.OAuthConfig`, which requires a Microsoft Entra endpoint and tenant: ```go import "github.com/Azure/go-autorest/autorest/adal" @@ -284,7 +284,7 @@ if err == nil { } ``` -Note that `azidentity` credentials use the Azure AD v2.0 endpoint, which requires OAuth 2 scopes instead of the resource identifiers `autorest/adal` expects. For more information, see [Azure AD documentation](https://docs.microsoft.com/azure/active-directory/develop/v2-permissions-and-consent). +Note that `azidentity` credentials use the Microsoft Entra endpoint, which requires OAuth 2 scopes instead of the resource identifiers `autorest/adal` expects. For more information, see [Microsoft Entra ID documentation](https://learn.microsoft.com/azure/active-directory/develop/permissions-consent-overview). ## Use azidentity credentials with older packages diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md index da0baa9add3d..b6ad2d39f840 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md @@ -1,9 +1,9 @@ # Azure Identity Client Module for Go -The Azure Identity module provides Azure Active Directory (Azure AD) token authentication support across the Azure SDK. It includes a set of `TokenCredential` implementations, which can be used with Azure SDK clients supporting token authentication. +The Azure Identity module provides Microsoft Entra ID ([formerly Azure Active Directory](https://learn.microsoft.com/azure/active-directory/fundamentals/new-name)) token authentication support across the Azure SDK. It includes a set of `TokenCredential` implementations, which can be used with Azure SDK clients supporting token authentication. [![PkgGoDev](https://pkg.go.dev/badge/github.com/Azure/azure-sdk-for-go/sdk/azidentity)](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity) -| [Azure Active Directory documentation](https://docs.microsoft.com/azure/active-directory/) +| [Microsoft Entra ID documentation](https://learn.microsoft.com/azure/active-directory/) | [Source code](https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/azidentity) # Getting started @@ -35,6 +35,12 @@ signed in to the [Azure CLI](https://docs.microsoft.com/cli/azure). To sign in t When no default browser is available, `az login` will use the device code authentication flow. This can also be selected manually by running `az login --use-device-code`. +#### Authenticate via the Azure Developer CLI + +Developers coding outside of an IDE can also use the [Azure Developer CLI](https://aka.ms/azure-dev) to authenticate. Applications using the `DefaultAzureCredential` or the `AzureDeveloperCLICredential` can use the account logged in to the Azure Developer CLI to authenticate calls in their application when running locally. + +To authenticate with the Azure Developer CLI, run `azd auth login`. On a system with a default web browser, `azd` will launch the browser to authenticate. On systems without a default web browser, run `azd auth login --use-device-code` to use the device code authentication flow. + ## Key concepts ### Credentials @@ -44,9 +50,7 @@ service client to authenticate requests. Service clients across the Azure SDK accept a credential instance when they are constructed, and use that credential to authenticate requests. -The `azidentity` module focuses on OAuth authentication with Azure Active -Directory (AAD). It offers a variety of credential types capable of acquiring -an Azure AD access token. See [Credential Types](#credential-types "Credential Types") for a list of this module's credential types. +The `azidentity` module focuses on OAuth authentication with Microsoft Entra ID. It offers a variety of credential types capable of acquiring a Microsoft Entra access token. See [Credential Types](#credential-types "Credential Types") for a list of this module's credential types. ### DefaultAzureCredential @@ -58,6 +62,7 @@ an Azure AD access token. See [Credential Types](#credential-types "Credential T 1. **Workload Identity** - If the app is deployed on Kubernetes with environment variables set by the workload identity webhook, `DefaultAzureCredential` will authenticate the configured identity. 1. **Managed Identity** - If the app is deployed to an Azure host with managed identity enabled, `DefaultAzureCredential` will authenticate with it. 1. **Azure CLI** - If a user or service principal has authenticated via the Azure CLI `az login` command, `DefaultAzureCredential` will authenticate that identity. +1. **Azure Developer CLI** - If the developer has authenticated via the Azure Developer CLI `azd auth login` command, the `DefaultAzureCredential` will authenticate with that account. > Note: `DefaultAzureCredential` is intended to simplify getting started with the SDK by handling common scenarios with reasonable default behaviors. Developers who want more control or whose scenario isn't served by the default settings should use other credential types. @@ -152,6 +157,7 @@ client := armresources.NewResourceGroupsClient("subscription ID", chain, nil) |Credential|Usage |-|- |[AzureCLICredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#AzureCLICredential)|Authenticate as the user signed in to the Azure CLI +|[`AzureDeveloperCLICredential`](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#AzureDeveloperCLICredential)|Authenticates as the user signed in to the Azure Developer CLI ## Environment Variables @@ -161,16 +167,16 @@ client := armresources.NewResourceGroupsClient("subscription ID", chain, nil) |variable name|value |-|- -|`AZURE_CLIENT_ID`|ID of an Azure Active Directory application -|`AZURE_TENANT_ID`|ID of the application's Azure Active Directory tenant +|`AZURE_CLIENT_ID`|ID of a Microsoft Entra application +|`AZURE_TENANT_ID`|ID of the application's Microsoft Entra tenant |`AZURE_CLIENT_SECRET`|one of the application's client secrets #### Service principal with certificate |variable name|value |-|- -|`AZURE_CLIENT_ID`|ID of an Azure Active Directory application -|`AZURE_TENANT_ID`|ID of the application's Azure Active Directory tenant +|`AZURE_CLIENT_ID`|ID of a Microsoft Entra application +|`AZURE_TENANT_ID`|ID of the application's Microsoft Entra tenant |`AZURE_CLIENT_CERTIFICATE_PATH`|path to a certificate file including private key |`AZURE_CLIENT_CERTIFICATE_PASSWORD`|password of the certificate file, if any @@ -178,22 +184,30 @@ client := armresources.NewResourceGroupsClient("subscription ID", chain, nil) |variable name|value |-|- -|`AZURE_CLIENT_ID`|ID of an Azure Active Directory application +|`AZURE_CLIENT_ID`|ID of a Microsoft Entra application |`AZURE_USERNAME`|a username (usually an email address) |`AZURE_PASSWORD`|that user's password Configuration is attempted in the above order. For example, if values for a client secret and certificate are both present, the client secret will be used. +## Token caching + +Token caching is an `azidentity` feature that allows apps to: + +* Cache tokens in memory (default) or on disk (opt-in). +* Improve resilience and performance. +* Reduce the number of requests made to Microsoft Entra ID to obtain access tokens. + +For more details, see the [token caching documentation](https://aka.ms/azsdk/go/identity/caching). + ## Troubleshooting ### Error Handling Credentials return an `error` when they fail to authenticate or lack data they require to authenticate. For guidance on resolving errors from specific credential types, see the [troubleshooting guide](https://aka.ms/azsdk/go/identity/troubleshoot). -For more details on handling specific Azure Active Directory errors please refer to the -Azure Active Directory -[error code documentation](https://docs.microsoft.com/azure/active-directory/develop/reference-aadsts-error-codes). +For more details on handling specific Microsoft Entra errors, see the Microsoft Entra [error code documentation](https://learn.microsoft.com/azure/active-directory/develop/reference-error-codes). ### Logging diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD new file mode 100644 index 000000000000..c0d6601469cc --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD @@ -0,0 +1,70 @@ +## Token caching in the Azure Identity client module + +*Token caching* is a feature provided by the Azure Identity library that allows apps to: + +- Improve their resilience and performance. +- Reduce the number of requests made to Microsoft Entra ID to obtain access tokens. +- Reduce the number of times the user is prompted to authenticate. + +When an app needs to access a protected Azure resource, it typically needs to obtain an access token from Entra ID. Obtaining that token involves sending a request to Entra ID and may also involve prompting the user. Entra ID then validates the credentials provided in the request and issues an access token. + +Token caching, via the Azure Identity library, allows the app to store this access token [in memory](#in-memory-token-caching), where it's accessible to the current process, or [on disk](#persistent-token-caching) where it can be accessed across application or process invocations. The token can then be retrieved quickly and easily the next time the app needs to access the same resource. The app can avoid making another request to Entra ID, which reduces network traffic and improves resilience. Additionally, in scenarios where the app is authenticating users, token caching also avoids prompting the user each time new tokens are requested. + +### In-memory token caching + +*In-memory token caching* is the default option provided by the Azure Identity library. This caching approach allows apps to store access tokens in memory. With in-memory token caching, the library first determines if a valid access token for the requested resource is already stored in memory. If a valid token is found, it's returned to the app without the need to make another request to Entra ID. If a valid token isn't found, the library will automatically acquire a token by sending a request to Entra ID. The in-memory token cache provided by the Azure Identity library is thread-safe. + +**Note:** When Azure Identity library credentials are used with Azure service libraries (for example, Azure Blob Storage), the in-memory token caching is active in the `Pipeline` layer as well. All `TokenCredential` implementations are supported there, including custom implementations external to the Azure Identity library. + +#### Caching cannot be disabled + +As there are many levels of caching, it's not possible disable in-memory caching. However, the in-memory cache may be cleared by creating a new credential instance. + +### Persistent token caching + +> Only azidentity v1.5.0-beta versions support persistent token caching + +*Persistent disk token caching* is an opt-in feature in the Azure Identity library. The feature allows apps to cache access tokens in an encrypted, persistent storage mechanism. As indicated in the following table, the storage mechanism differs across operating systems. + +| Operating system | Storage mechanism | +|------------------|---------------------------------------| +| Linux | kernel key retention service (keyctl) | +| macOS | Keychain | +| Windows | DPAPI | + +By default the token cache will protect any data which is persisted using the user data protection APIs available on the current platform. +However, there are cases where no data protection is available, and applications may choose to allow storing the token cache in an unencrypted state by setting `TokenCachePersistenceOptions.AllowUnencryptedStorage` to `true`. This allows a credential to fall back to unencrypted storage if it can't encrypt the cache. However, we do not recommend using this storage method due to its significantly lower security measures. In addition, tokens are not encrypted solely to the current user, which could potentially allow unauthorized access to the cache by individuals with machine access. + +With persistent disk token caching enabled, the library first determines if a valid access token for the requested resource is already stored in the persistent cache. If a valid token is found, it's returned to the app without the need to make another request to Entra ID. Additionally, the tokens are preserved across app runs, which: + +- Makes the app more resilient to failures. +- Ensures the app can continue to function during an Entra ID outage or disruption. +- Avoids having to prompt users to authenticate each time the process is restarted. + +>IMPORTANT! The token cache contains sensitive data and **MUST** be protected to prevent compromising accounts. All application decisions regarding the persistence of the token cache must consider that a breach of its content will fully compromise all the accounts it contains. + +#### Example code + +See the [package documentation](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity@v1.5.0-beta.1#pkg-overview) for code examples demonstrating how to configure persistent caching and access cached data. + +### Credentials supporting token caching + +The following table indicates the state of in-memory and persistent caching in each credential type. + +**Note:** In-memory caching is activated by default. Persistent token caching needs to be enabled as shown in [this example](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity@v1.5.0-beta.1#example-package-PersistentCache). + +| Credential | In-memory token caching | Persistent token caching | +|--------------------------------|---------------------------------------------------------------------|--------------------------| +| `AzureCLICredential` | Not Supported | Not Supported | +| `AzureDeveloperCLICredential` | Not Supported | Not Supported | +| `ClientAssertionCredential` | Supported | Supported | +| `ClientCertificateCredential` | Supported | Supported | +| `ClientSecretCredential` | Supported | Supported | +| `DefaultAzureCredential` | Supported if the target credential in the default chain supports it | Not Supported | +| `DeviceCodeCredential` | Supported | Supported | +| `EnvironmentCredential` | Supported | Not Supported | +| `InteractiveBrowserCredential` | Supported | Supported | +| `ManagedIdentityCredential` | Supported | Not Supported | +| `OnBehalfOfCredential` | Supported | Supported | +| `UsernamePasswordCredential` | Supported | Supported | +| `WorkloadIdentityCredential` | Supported | Supported | diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md index fef099813c87..832c599eb90a 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md @@ -8,7 +8,8 @@ This troubleshooting guide covers failure investigation techniques, common error - [Permission issues](#permission-issues) - [Find relevant information in errors](#find-relevant-information-in-errors) - [Enable and configure logging](#enable-and-configure-logging) -- [Troubleshoot AzureCliCredential authentication issues](#troubleshoot-azureclicredential-authentication-issues) +- [Troubleshoot AzureCLICredential authentication issues](#troubleshoot-azureclicredential-authentication-issues) +- [Troubleshoot AzureDeveloperCLICredential authentication issues](#troubleshoot-azuredeveloperclicredential-authentication-issues) - [Troubleshoot ClientCertificateCredential authentication issues](#troubleshoot-clientcertificatecredential-authentication-issues) - [Troubleshoot ClientSecretCredential authentication issues](#troubleshoot-clientsecretcredential-authentication-issues) - [Troubleshoot DefaultAzureCredential authentication issues](#troubleshoot-defaultazurecredential-authentication-issues) @@ -23,7 +24,7 @@ This troubleshooting guide covers failure investigation techniques, common error ## Handle azidentity errors -Any service client method that makes a request to the service may return an error due to authentication failure. This is because the credential authenticates on the first call to the service and on any subsequent call that needs to refresh an access token. Authentication errors include a description of the failure and possibly an error message from Azure Active Directory (Azure AD). Depending on the application, these errors may or may not be recoverable. +Any service client method that makes a request to the service may return an error due to authentication failure. This is because the credential authenticates on the first call to the service and on any subsequent call that needs to refresh an access token. Authentication errors include a description of the failure and possibly an error message from Microsoft Entra ID. Depending on the application, these errors may or may not be recoverable. ### Permission issues @@ -31,7 +32,7 @@ Service client errors with a status code of 401 or 403 often indicate that authe ## Find relevant information in errors -Authentication errors can include responses from Azure AD and often contain information helpful in diagnosis. Consider the following error message: +Authentication errors can include responses from Microsoft Entra ID and often contain information helpful in diagnosis. Consider the following error message: ``` ClientSecretCredential authentication failed @@ -57,9 +58,9 @@ This error contains several pieces of information: - __Failing Credential Type__: The type of credential that failed to authenticate. This can be helpful when diagnosing issues with chained credential types such as `DefaultAzureCredential` or `ChainedTokenCredential`. -- __Azure AD Error Code and Message__: The error code and message returned by Azure AD. This can give insight into the specific reason the request failed. For instance, in this case authentication failed because the provided client secret is incorrect. [Azure AD documentation](https://docs.microsoft.com/azure/active-directory/develop/reference-aadsts-error-codes#aadsts-error-codes) has more information on AADSTS error codes. +- __Microsoft Entra ID Error Code and Message__: The error code and message returned by Microsoft Entra ID. This can give insight into the specific reason the request failed. For instance, in this case authentication failed because the provided client secret is incorrect. [Microsoft Entra ID documentation](https://learn.microsoft.com/azure/active-directory/develop/reference-error-codes#aadsts-error-codes) has more information on AADSTS error codes. -- __Correlation ID and Timestamp__: The correlation ID and timestamp identify the request in server-side logs. This information can be useful to support engineers diagnosing unexpected Azure AD failures. +- __Correlation ID and Timestamp__: The correlation ID and timestamp identify the request in server-side logs. This information can be useful to support engineers diagnosing unexpected Microsoft Entra failures. ### Enable and configure logging @@ -96,17 +97,17 @@ azlog.SetEvents(azidentity.EventAuthentication) | Error Code | Issue | Mitigation | |---|---|---| -|AADSTS7000215|An invalid client secret was provided.|Ensure the secret provided to the credential constructor is valid. If unsure, create a new client secret using the Azure portal. Details on creating a new client secret are in [Azure AD documentation](https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal#option-2-create-a-new-application-secret).| -|AADSTS7000222|An expired client secret was provided.|Create a new client secret using the Azure portal. Details on creating a new client secret are in [Azure AD documentation](https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal#option-2-create-a-new-application-secret).| -|AADSTS700016|The specified application wasn't found in the specified tenant.|Ensure the client and tenant IDs provided to the credential constructor are correct for your application registration. For multi-tenant apps, ensure the application has been added to the desired tenant by a tenant admin. To add a new application in the desired tenant, follow the [Azure AD instructions](https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal).| +|AADSTS7000215|An invalid client secret was provided.|Ensure the secret provided to the credential constructor is valid. If unsure, create a new client secret using the Azure portal. Details on creating a new client secret are in [Microsoft Entra ID documentation](https://learn.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal#option-2-create-a-new-application-secret).| +|AADSTS7000222|An expired client secret was provided.|Create a new client secret using the Azure portal. Details on creating a new client secret are in [Microsoft Entra ID documentation](https://learn.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal#option-2-create-a-new-application-secret).| +|AADSTS700016|The specified application wasn't found in the specified tenant.|Ensure the client and tenant IDs provided to the credential constructor are correct for your application registration. For multi-tenant apps, ensure the application has been added to the desired tenant by a tenant admin. To add a new application in the desired tenant, follow the [Microsoft Entra ID instructions](https://learn.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal).| ## Troubleshoot ClientCertificateCredential authentication issues | Error Code | Description | Mitigation | |---|---|---| -|AADSTS700027|Client assertion contains an invalid signature.|Ensure the specified certificate has been uploaded to the application registration as described in [Azure AD documentation](https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal#option-1-upload-a-certificate).| -|AADSTS700016|The specified application wasn't found in the specified tenant.|Ensure the client and tenant IDs provided to the credential constructor are correct for your application registration. For multi-tenant apps, ensure the application has been added to the desired tenant by a tenant admin. To add a new application in the desired tenant, follow the [Azure AD instructions](https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal).| +|AADSTS700027|Client assertion contains an invalid signature.|Ensure the specified certificate has been uploaded to the application registration as described in [Microsoft Entra ID documentation](https://learn.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal#option-1-upload-a-certificate).| +|AADSTS700016|The specified application wasn't found in the specified tenant.|Ensure the client and tenant IDs provided to the credential constructor are correct for your application registration. For multi-tenant apps, ensure the application has been added to the desired tenant by a tenant admin. To add a new application in the desired tenant, follow the [Microsoft Entra ID instructions](https://learn.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal).| ## Troubleshoot UsernamePasswordCredential authentication issues @@ -172,7 +173,7 @@ curl "$IDENTITY_ENDPOINT?resource=https://management.core.windows.net&api-versio |"no azure identity found for request clientID"|The application attempted to authenticate before an identity was assigned to its pod|Verify the pod is labeled correctly. This also occurs when a correctly labeled pod authenticates before the identity is ready. To prevent initialization races, configure NMI to set the Retry-After header in its responses as described in [Pod Identity documentation](https://azure.github.io/aad-pod-identity/docs/configure/feature_flags/#set-retry-after-header-in-nmi-response). -## Troubleshoot AzureCliCredential authentication issues +## Troubleshoot AzureCLICredential authentication issues | Error Message |Description| Mitigation | |---|---|---| @@ -195,6 +196,29 @@ az account get-access-token --output json --resource https://management.core.win > This command's output will contain an access token and SHOULD NOT BE SHARED, to avoid compromising account security. + +## Troubleshoot AzureDeveloperCLICredential authentication issues + +| Error Message |Description| Mitigation | +|---|---|---| +|Azure Developer CLI not found on path|The Azure Developer CLI isn't installed or couldn't be found.|| +|Please run "azd auth login"|No account is logged into the Azure Developer CLI, or the login has expired.|| + +#### Verify the Azure Developer CLI can obtain tokens + +You can manually verify that the Azure Developer CLI is properly authenticated and can obtain tokens. First, use the `config` command to verify the account that is currently logged in to the Azure Developer CLI. + +```sh +azd config list +``` + +Once you've verified the Azure Developer CLI is using correct account, you can validate that it's able to obtain tokens for this account. + +```sh +azd auth token --output json --scope https://management.core.windows.net/.default +``` +>Note that output of this command will contain a valid access token, and SHOULD NOT BE SHARED to avoid compromising account security. + ## Troubleshoot `WorkloadIdentityCredential` authentication issues diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json index 47e77f88e3f7..1be55a4bdd3b 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "go", "TagPrefix": "go/azidentity", - "Tag": "go/azidentity_6225ab0470" + "Tag": "go/azidentity_98074050dc" } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/authentication_record.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/authentication_record.go new file mode 100644 index 000000000000..ada4d6501d2c --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/authentication_record.go @@ -0,0 +1,95 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "encoding/json" + "errors" + "fmt" + "net/url" + "strings" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/public" +) + +var supportedAuthRecordVersions = []string{"1.0"} + +// authenticationRecord is non-secret account information about an authenticated user that user credentials such as +// [DeviceCodeCredential] and [InteractiveBrowserCredential] can use to access previously cached authentication +// data. Call these credentials' Authenticate method to get an authenticationRecord for a user. +type authenticationRecord struct { + // Authority is the URL of the authority that issued the token. + Authority string `json:"authority"` + + // ClientID is the ID of the application that authenticated the user. + ClientID string `json:"clientId"` + + // HomeAccountID uniquely identifies the account. + HomeAccountID string `json:"homeAccountId"` + + // TenantID identifies the tenant in which the user authenticated. + TenantID string `json:"tenantId"` + + // Username is the user's preferred username. + Username string `json:"username"` + + // Version of the AuthenticationRecord. + Version string `json:"version"` +} + +// UnmarshalJSON implements json.Unmarshaler for AuthenticationRecord +func (a *authenticationRecord) UnmarshalJSON(b []byte) error { + // Default unmarshaling is fine but we want to return an error if the record's version isn't supported i.e., we + // want to inspect the unmarshalled values before deciding whether to return an error. Unmarshaling a formally + // different type enables this by assigning all the fields without recursing into this method. + type r authenticationRecord + err := json.Unmarshal(b, (*r)(a)) + if err != nil { + return err + } + if a.Version == "" { + return errors.New("AuthenticationRecord must have a version") + } + for _, v := range supportedAuthRecordVersions { + if a.Version == v { + return nil + } + } + return fmt.Errorf("unsupported AuthenticationRecord version %q. This module supports %v", a.Version, supportedAuthRecordVersions) +} + +// account returns the AuthenticationRecord as an MSAL Account. The account is zero-valued when the AuthenticationRecord is zero-valued. +func (a *authenticationRecord) account() public.Account { + return public.Account{ + Environment: a.Authority, + HomeAccountID: a.HomeAccountID, + PreferredUsername: a.Username, + } +} + +func newAuthenticationRecord(ar public.AuthResult) (authenticationRecord, error) { + u, err := url.Parse(ar.IDToken.Issuer) + if err != nil { + return authenticationRecord{}, fmt.Errorf("Authenticate expected a URL issuer but got %q", ar.IDToken.Issuer) + } + tenant := ar.IDToken.TenantID + if tenant == "" { + tenant = strings.Trim(u.Path, "/") + } + username := ar.IDToken.PreferredUsername + if username == "" { + username = ar.IDToken.UPN + } + return authenticationRecord{ + Authority: fmt.Sprintf("%s://%s", u.Scheme, u.Host), + ClientID: ar.IDToken.Audience, + HomeAccountID: ar.Account.HomeAccountID, + TenantID: tenant, + Username: username, + Version: "1.0", + }, nil +} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go index 10b742ce1a13..b0965036bbf0 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go @@ -15,12 +15,12 @@ import ( "net/http" "net/url" "os" - "regexp" "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/public" ) @@ -41,6 +41,10 @@ const ( organizationsTenantID = "organizations" developerSignOnClientID = "04b07795-8ddb-461a-bbee-02f9e1bf7b46" defaultSuffix = "/.default" + + traceNamespace = "Microsoft.Entra" + traceOpGetToken = "GetToken" + traceOpAuthenticate = "Authenticate" ) var ( @@ -49,6 +53,9 @@ var ( errInvalidTenantID = errors.New("invalid tenantID. You can locate your tenantID by following the instructions listed here: https://learn.microsoft.com/partner-center/find-ids-and-domain-names") ) +// tokenCachePersistenceOptions contains options for persistent token caching +type tokenCachePersistenceOptions = internal.TokenCachePersistenceOptions + // setAuthorityHost initializes the authority host for credentials. Precedence is: // 1. cloud.Configuration.ActiveDirectoryAuthorityHost value set by user // 2. value of AZURE_AUTHORITY_HOST @@ -109,29 +116,23 @@ func resolveTenant(defaultTenant, specified, credName string, additionalTenants return "", fmt.Errorf(`%s isn't configured to acquire tokens for tenant %q. To enable acquiring tokens for this tenant add it to the AdditionallyAllowedTenants on the credential options, or add "*" to allow acquiring tokens for any tenant`, credName, specified) } -// validTenantID return true is it receives a valid tenantID, returns false otherwise +func alphanumeric(r rune) bool { + return ('0' <= r && r <= '9') || ('a' <= r && r <= 'z') || ('A' <= r && r <= 'Z') +} + func validTenantID(tenantID string) bool { - match, err := regexp.MatchString("^[0-9a-zA-Z-.]+$", tenantID) - if err != nil { + if len(tenantID) < 1 { return false } - return match -} - -func newPipelineAdapter(opts *azcore.ClientOptions) pipelineAdapter { - pl := runtime.NewPipeline(component, version, runtime.PipelineOptions{}, opts) - return pipelineAdapter{pl: pl} -} - -type pipelineAdapter struct { - pl runtime.Pipeline -} - -func (p pipelineAdapter) CloseIdleConnections() { - // do nothing + for _, r := range tenantID { + if !(alphanumeric(r) || r == '.' || r == '-') { + return false + } + } + return true } -func (p pipelineAdapter) Do(r *http.Request) (*http.Response, error) { +func doForClient(client *azcore.Client, r *http.Request) (*http.Response, error) { req, err := runtime.NewRequest(r.Context(), r.Method, r.URL.String()) if err != nil { return nil, err @@ -153,7 +154,18 @@ func (p pipelineAdapter) Do(r *http.Request) (*http.Response, error) { return nil, err } } - resp, err := p.pl.Do(req) + + // copy headers to the new request, ignoring any for which the new request has a value + h := req.Raw().Header + for key, vals := range r.Header { + if _, has := h[key]; !has { + for _, val := range vals { + h.Add(key, val) + } + } + } + + resp, err := client.Pipeline().Do(req) if err != nil { return nil, err } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go index 55a0d654347e..43577ab3c5f3 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go @@ -14,7 +14,6 @@ import ( "fmt" "os" "os/exec" - "regexp" "runtime" "strings" "sync" @@ -25,13 +24,9 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/internal/log" ) -const ( - credNameAzureCLI = "AzureCLICredential" - timeoutCLIRequest = 10 * time.Second -) +const credNameAzureCLI = "AzureCLICredential" -// used by tests to fake invoking the CLI -type azureCLITokenProvider func(ctx context.Context, resource string, tenantID string) ([]byte, error) +type azTokenProvider func(ctx context.Context, scopes []string, tenant, subscription string) ([]byte, error) // AzureCLICredentialOptions contains optional parameters for AzureCLICredential. type AzureCLICredentialOptions struct { @@ -39,17 +34,25 @@ type AzureCLICredentialOptions struct { // to TenantID. Add the wildcard value "*" to allow the credential to acquire tokens for any tenant the // logged in account can access. AdditionallyAllowedTenants []string + + // subscription is the name or ID of a subscription. Set this to acquire tokens for an account other + // than the Azure CLI's current account. + subscription string + // TenantID identifies the tenant the credential should authenticate in. // Defaults to the CLI's default tenant, which is typically the home tenant of the logged in user. TenantID string - tokenProvider azureCLITokenProvider + // inDefaultChain is true when the credential is part of DefaultAzureCredential + inDefaultChain bool + // tokenProvider is used by tests to fake invoking az + tokenProvider azTokenProvider } // init returns an instance of AzureCLICredentialOptions initialized with default values. func (o *AzureCLICredentialOptions) init() { if o.tokenProvider == nil { - o.tokenProvider = defaultTokenProvider + o.tokenProvider = defaultAzTokenProvider } } @@ -65,6 +68,14 @@ func NewAzureCLICredential(options *AzureCLICredentialOptions) (*AzureCLICredent if options != nil { cp = *options } + for _, r := range cp.subscription { + if !(alphanumeric(r) || r == '-' || r == '_' || r == ' ' || r == '.') { + return nil, fmt.Errorf("%s: invalid Subscription %q", credNameAzureCLI, cp.subscription) + } + } + if cp.TenantID != "" && !validTenantID(cp.TenantID) { + return nil, errInvalidTenantID + } cp.init() cp.AdditionallyAllowedTenants = resolveAdditionalTenants(cp.AdditionallyAllowedTenants) return &AzureCLICredential{mu: &sync.Mutex{}, opts: cp}, nil @@ -73,50 +84,51 @@ func NewAzureCLICredential(options *AzureCLICredentialOptions) (*AzureCLICredent // GetToken requests a token from the Azure CLI. This credential doesn't cache tokens, so every call invokes the CLI. // This method is called automatically by Azure SDK clients. func (c *AzureCLICredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + at := azcore.AccessToken{} if len(opts.Scopes) != 1 { - return azcore.AccessToken{}, errors.New(credNameAzureCLI + ": GetToken() requires exactly one scope") + return at, errors.New(credNameAzureCLI + ": GetToken() requires exactly one scope") + } + if !validScope(opts.Scopes[0]) { + return at, fmt.Errorf("%s.GetToken(): invalid scope %q", credNameAzureCLI, opts.Scopes[0]) } tenant, err := resolveTenant(c.opts.TenantID, opts.TenantID, credNameAzureCLI, c.opts.AdditionallyAllowedTenants) if err != nil { - return azcore.AccessToken{}, err + return at, err } - // pass the CLI an AAD v1 resource because we don't know which CLI version is installed and older ones don't support v2 scopes - opts.Scopes = []string{strings.TrimSuffix(opts.Scopes[0], defaultSuffix)} c.mu.Lock() defer c.mu.Unlock() - b, err := c.opts.tokenProvider(ctx, opts.Scopes[0], tenant) - if err != nil { - return azcore.AccessToken{}, err + b, err := c.opts.tokenProvider(ctx, opts.Scopes, tenant, c.opts.subscription) + if err == nil { + at, err = c.createAccessToken(b) } - at, err := c.createAccessToken(b) if err != nil { - return azcore.AccessToken{}, err + err = unavailableIfInChain(err, c.opts.inDefaultChain) + return at, err } msg := fmt.Sprintf("%s.GetToken() acquired a token for scope %q", credNameAzureCLI, strings.Join(opts.Scopes, ", ")) log.Write(EventAuthentication, msg) return at, nil } -var defaultTokenProvider azureCLITokenProvider = func(ctx context.Context, resource string, tenantID string) ([]byte, error) { - match, err := regexp.MatchString("^[0-9a-zA-Z-.:/]+$", resource) - if err != nil { - return nil, err - } - if !match { - return nil, fmt.Errorf(`%s: unexpected scope "%s". Only alphanumeric characters and ".", ";", "-", and "/" are allowed`, credNameAzureCLI, resource) - } - +// defaultAzTokenProvider invokes the Azure CLI to acquire a token. It assumes +// callers have verified that all string arguments are safe to pass to the CLI. +var defaultAzTokenProvider azTokenProvider = func(ctx context.Context, scopes []string, tenantID, subscription string) ([]byte, error) { + // pass the CLI a Microsoft Entra ID v1 resource because we don't know which CLI version is installed and older ones don't support v2 scopes + resource := strings.TrimSuffix(scopes[0], defaultSuffix) // set a default timeout for this authentication iff the application hasn't done so already var cancel context.CancelFunc if _, hasDeadline := ctx.Deadline(); !hasDeadline { - ctx, cancel = context.WithTimeout(ctx, timeoutCLIRequest) + ctx, cancel = context.WithTimeout(ctx, cliTimeout) defer cancel() } - commandLine := "az account get-access-token -o json --resource " + resource if tenantID != "" { commandLine += " --tenant " + tenantID } + if subscription != "" { + // subscription needs quotes because it may contain spaces + commandLine += ` --subscription "` + subscription + `"` + } var cliCmd *exec.Cmd if runtime.GOOS == "windows" { dir := os.Getenv("SYSTEMROOT") diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_developer_cli_credential.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_developer_cli_credential.go new file mode 100644 index 000000000000..cbe7c4c2db1f --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_developer_cli_credential.go @@ -0,0 +1,169 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "os" + "os/exec" + "runtime" + "strings" + "sync" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" +) + +const credNameAzureDeveloperCLI = "AzureDeveloperCLICredential" + +type azdTokenProvider func(ctx context.Context, scopes []string, tenant string) ([]byte, error) + +// AzureDeveloperCLICredentialOptions contains optional parameters for AzureDeveloperCLICredential. +type AzureDeveloperCLICredentialOptions struct { + // AdditionallyAllowedTenants specifies tenants for which the credential may acquire tokens, in addition + // to TenantID. Add the wildcard value "*" to allow the credential to acquire tokens for any tenant the + // logged in account can access. + AdditionallyAllowedTenants []string + + // TenantID identifies the tenant the credential should authenticate in. Defaults to the azd environment, + // which is the tenant of the selected Azure subscription. + TenantID string + + // inDefaultChain is true when the credential is part of DefaultAzureCredential + inDefaultChain bool + // tokenProvider is used by tests to fake invoking azd + tokenProvider azdTokenProvider +} + +// AzureDeveloperCLICredential authenticates as the identity logged in to the [Azure Developer CLI]. +// +// [Azure Developer CLI]: https://learn.microsoft.com/azure/developer/azure-developer-cli/overview +type AzureDeveloperCLICredential struct { + mu *sync.Mutex + opts AzureDeveloperCLICredentialOptions +} + +// NewAzureDeveloperCLICredential constructs an AzureDeveloperCLICredential. Pass nil to accept default options. +func NewAzureDeveloperCLICredential(options *AzureDeveloperCLICredentialOptions) (*AzureDeveloperCLICredential, error) { + cp := AzureDeveloperCLICredentialOptions{} + if options != nil { + cp = *options + } + if cp.TenantID != "" && !validTenantID(cp.TenantID) { + return nil, errInvalidTenantID + } + if cp.tokenProvider == nil { + cp.tokenProvider = defaultAzdTokenProvider + } + return &AzureDeveloperCLICredential{mu: &sync.Mutex{}, opts: cp}, nil +} + +// GetToken requests a token from the Azure Developer CLI. This credential doesn't cache tokens, so every call invokes azd. +// This method is called automatically by Azure SDK clients. +func (c *AzureDeveloperCLICredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + at := azcore.AccessToken{} + if len(opts.Scopes) == 0 { + return at, errors.New(credNameAzureDeveloperCLI + ": GetToken() requires at least one scope") + } + for _, scope := range opts.Scopes { + if !validScope(scope) { + return at, fmt.Errorf("%s.GetToken(): invalid scope %q", credNameAzureDeveloperCLI, scope) + } + } + tenant, err := resolveTenant(c.opts.TenantID, opts.TenantID, credNameAzureDeveloperCLI, c.opts.AdditionallyAllowedTenants) + if err != nil { + return at, err + } + c.mu.Lock() + defer c.mu.Unlock() + b, err := c.opts.tokenProvider(ctx, opts.Scopes, tenant) + if err == nil { + at, err = c.createAccessToken(b) + } + if err != nil { + err = unavailableIfInChain(err, c.opts.inDefaultChain) + return at, err + } + msg := fmt.Sprintf("%s.GetToken() acquired a token for scope %q", credNameAzureDeveloperCLI, strings.Join(opts.Scopes, ", ")) + log.Write(EventAuthentication, msg) + return at, nil +} + +// defaultAzTokenProvider invokes the Azure Developer CLI to acquire a token. It assumes +// callers have verified that all string arguments are safe to pass to the CLI. +var defaultAzdTokenProvider azdTokenProvider = func(ctx context.Context, scopes []string, tenant string) ([]byte, error) { + // set a default timeout for this authentication iff the application hasn't done so already + var cancel context.CancelFunc + if _, hasDeadline := ctx.Deadline(); !hasDeadline { + ctx, cancel = context.WithTimeout(ctx, cliTimeout) + defer cancel() + } + commandLine := "azd auth token -o json" + if tenant != "" { + commandLine += " --tenant-id " + tenant + } + for _, scope := range scopes { + commandLine += " --scope " + scope + } + var cliCmd *exec.Cmd + if runtime.GOOS == "windows" { + dir := os.Getenv("SYSTEMROOT") + if dir == "" { + return nil, newCredentialUnavailableError(credNameAzureDeveloperCLI, "environment variable 'SYSTEMROOT' has no value") + } + cliCmd = exec.CommandContext(ctx, "cmd.exe", "/c", commandLine) + cliCmd.Dir = dir + } else { + cliCmd = exec.CommandContext(ctx, "/bin/sh", "-c", commandLine) + cliCmd.Dir = "/bin" + } + cliCmd.Env = os.Environ() + var stderr bytes.Buffer + cliCmd.Stderr = &stderr + output, err := cliCmd.Output() + if err != nil { + msg := stderr.String() + var exErr *exec.ExitError + if errors.As(err, &exErr) && exErr.ExitCode() == 127 || strings.HasPrefix(msg, "'azd' is not recognized") { + msg = "Azure Developer CLI not found on path" + } else if strings.Contains(msg, "azd auth login") { + msg = `please run "azd auth login" from a command prompt to authenticate before using this credential` + } + if msg == "" { + msg = err.Error() + } + return nil, newCredentialUnavailableError(credNameAzureDeveloperCLI, msg) + } + return output, nil +} + +func (c *AzureDeveloperCLICredential) createAccessToken(tk []byte) (azcore.AccessToken, error) { + t := struct { + AccessToken string `json:"token"` + ExpiresOn string `json:"expiresOn"` + }{} + err := json.Unmarshal(tk, &t) + if err != nil { + return azcore.AccessToken{}, err + } + exp, err := time.Parse("2006-01-02T15:04:05Z", t.ExpiresOn) + if err != nil { + return azcore.AccessToken{}, fmt.Errorf("error parsing token expiration time %q: %v", t.ExpiresOn, err) + } + return azcore.AccessToken{ + ExpiresOn: exp.UTC(), + Token: t.AccessToken, + }, nil +} + +var _ azcore.TokenCredential = (*AzureDeveloperCLICredential)(nil) diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml index 9002ea0b0505..d077682c5c2b 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml @@ -25,6 +25,7 @@ stages: - template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml parameters: RunLiveTests: true + UsePipelineProxy: false ServiceDirectory: 'azidentity' CloudConfig: Public: diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go index 303d5fc0925c..fc3df68eb196 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go @@ -12,6 +12,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" ) @@ -20,9 +21,9 @@ const credNameAssertion = "ClientAssertionCredential" // ClientAssertionCredential authenticates an application with assertions provided by a callback function. // This credential is for advanced scenarios. [ClientCertificateCredential] has a more convenient API for // the most common assertion scenario, authenticating a service principal with a certificate. See -// [Azure AD documentation] for details of the assertion format. +// [Microsoft Entra ID documentation] for details of the assertion format. // -// [Azure AD documentation]: https://docs.microsoft.com/azure/active-directory/develop/active-directory-certificate-credentials#assertion-format +// [Microsoft Entra ID documentation]: https://learn.microsoft.com/azure/active-directory/develop/active-directory-certificate-credentials#assertion-format type ClientAssertionCredential struct { client *confidentialClient } @@ -35,11 +36,15 @@ type ClientAssertionCredentialOptions struct { // Add the wildcard value "*" to allow the credential to acquire tokens for any tenant in which the // application is registered. AdditionallyAllowedTenants []string + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or - // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making // the application responsible for ensuring the configured authority is valid and trustworthy. DisableInstanceDiscovery bool + + // tokenCachePersistenceOptions enables persistent token caching when not nil. + tokenCachePersistenceOptions *tokenCachePersistenceOptions } // NewClientAssertionCredential constructs a ClientAssertionCredential. The getAssertion function must be thread safe. Pass nil for options to accept defaults. @@ -56,9 +61,10 @@ func NewClientAssertionCredential(tenantID, clientID string, getAssertion func(c }, ) msalOpts := confidentialClientOptions{ - AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, - ClientOptions: options.ClientOptions, - DisableInstanceDiscovery: options.DisableInstanceDiscovery, + AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, + ClientOptions: options.ClientOptions, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, + tokenCachePersistenceOptions: options.tokenCachePersistenceOptions, } c, err := newConfidentialClient(tenantID, clientID, credNameAssertion, cred, msalOpts) if err != nil { @@ -67,9 +73,13 @@ func NewClientAssertionCredential(tenantID, clientID string, getAssertion func(c return &ClientAssertionCredential{client: c}, nil } -// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. +// GetToken requests an access token from Microsoft Entra ID. This method is called automatically by Azure SDK clients. func (c *ClientAssertionCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { - return c.client.GetToken(ctx, opts) + var err error + ctx, endSpan := runtime.StartSpan(ctx, credNameAssertion+"."+traceOpGetToken, c.client.azClient.Tracer(), nil) + defer func() { endSpan(err) }() + tk, err := c.client.GetToken(ctx, opts) + return tk, err } var _ azcore.TokenCredential = (*ClientAssertionCredential)(nil) diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go index d3300e3053bd..607533f486e1 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go @@ -15,6 +15,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" "golang.org/x/crypto/pkcs12" ) @@ -29,15 +30,20 @@ type ClientCertificateCredentialOptions struct { // Add the wildcard value "*" to allow the credential to acquire tokens for any tenant in which the // application is registered. AdditionallyAllowedTenants []string + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or - // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making // the application responsible for ensuring the configured authority is valid and trustworthy. DisableInstanceDiscovery bool + // SendCertificateChain controls whether the credential sends the public certificate chain in the x5c // header of each token request's JWT. This is required for Subject Name/Issuer (SNI) authentication. // Defaults to False. SendCertificateChain bool + + // tokenCachePersistenceOptions enables persistent token caching when not nil. + tokenCachePersistenceOptions *tokenCachePersistenceOptions } // ClientCertificateCredential authenticates a service principal with a certificate. @@ -58,10 +64,11 @@ func NewClientCertificateCredential(tenantID string, clientID string, certs []*x return nil, err } msalOpts := confidentialClientOptions{ - AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, - ClientOptions: options.ClientOptions, - DisableInstanceDiscovery: options.DisableInstanceDiscovery, - SendX5C: options.SendCertificateChain, + AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, + ClientOptions: options.ClientOptions, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, + SendX5C: options.SendCertificateChain, + tokenCachePersistenceOptions: options.tokenCachePersistenceOptions, } c, err := newConfidentialClient(tenantID, clientID, credNameCert, cred, msalOpts) if err != nil { @@ -70,9 +77,13 @@ func NewClientCertificateCredential(tenantID string, clientID string, certs []*x return &ClientCertificateCredential{client: c}, nil } -// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. +// GetToken requests an access token from Microsoft Entra ID. This method is called automatically by Azure SDK clients. func (c *ClientCertificateCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { - return c.client.GetToken(ctx, opts) + var err error + ctx, endSpan := runtime.StartSpan(ctx, credNameCert+"."+traceOpGetToken, c.client.azClient.Tracer(), nil) + defer func() { endSpan(err) }() + tk, err := c.client.GetToken(ctx, opts) + return tk, err } // ParseCertificates loads certificates and a private key, in PEM or PKCS12 format, for use with NewClientCertificateCredential. diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go index d2ff7582b997..9e6772e9b80a 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go @@ -11,6 +11,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" ) @@ -24,11 +25,15 @@ type ClientSecretCredentialOptions struct { // Add the wildcard value "*" to allow the credential to acquire tokens for any tenant in which the // application is registered. AdditionallyAllowedTenants []string + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or - // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making // the application responsible for ensuring the configured authority is valid and trustworthy. DisableInstanceDiscovery bool + + // tokenCachePersistenceOptions enables persistent token caching when not nil. + tokenCachePersistenceOptions *tokenCachePersistenceOptions } // ClientSecretCredential authenticates an application with a client secret. @@ -46,20 +51,25 @@ func NewClientSecretCredential(tenantID string, clientID string, clientSecret st return nil, err } msalOpts := confidentialClientOptions{ - AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, - ClientOptions: options.ClientOptions, - DisableInstanceDiscovery: options.DisableInstanceDiscovery, + AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, + ClientOptions: options.ClientOptions, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, + tokenCachePersistenceOptions: options.tokenCachePersistenceOptions, } c, err := newConfidentialClient(tenantID, clientID, credNameSecret, cred, msalOpts) if err != nil { return nil, err } - return &ClientSecretCredential{c}, nil + return &ClientSecretCredential{client: c}, nil } -// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. +// GetToken requests an access token from Microsoft Entra ID. This method is called automatically by Azure SDK clients. func (c *ClientSecretCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { - return c.client.GetToken(ctx, opts) + var err error + ctx, endSpan := runtime.StartSpan(ctx, credNameSecret+"."+traceOpGetToken, c.client.azClient.Tracer(), nil) + defer func() { endSpan(err) }() + tk, err := c.client.GetToken(ctx, opts) + return tk, err } var _ azcore.TokenCredential = (*ClientSecretCredential)(nil) diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go index 4853a9a0095d..854267bdbfd8 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go @@ -10,6 +10,7 @@ import ( "context" "errors" "fmt" + "net/http" "os" "strings" "sync" @@ -17,6 +18,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal" "github.com/Azure/azure-sdk-for-go/sdk/internal/log" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" ) @@ -28,6 +30,7 @@ type confidentialClientOptions struct { // Assertion for on-behalf-of authentication Assertion string DisableInstanceDiscovery, SendX5C bool + tokenCachePersistenceOptions *tokenCachePersistenceOptions } // confidentialClient wraps the MSAL confidential client @@ -40,6 +43,7 @@ type confidentialClient struct { name string opts confidentialClientOptions region string + azClient *azcore.Client } func newConfidentialClient(tenantID, clientID, name string, cred confidential.Credential, opts confidentialClientOptions) (*confidentialClient, error) { @@ -50,6 +54,14 @@ func newConfidentialClient(tenantID, clientID, name string, cred confidential.Cr if err != nil { return nil, err } + client, err := azcore.NewClient(module, version, runtime.PipelineOptions{ + Tracing: runtime.TracingOptions{ + Namespace: traceNamespace, + }, + }, &opts.ClientOptions) + if err != nil { + return nil, err + } opts.AdditionallyAllowedTenants = resolveAdditionalTenants(opts.AdditionallyAllowedTenants) return &confidentialClient{ caeMu: &sync.Mutex{}, @@ -62,6 +74,7 @@ func newConfidentialClient(tenantID, clientID, name string, cred confidential.Cr opts: opts, region: os.Getenv(azureRegionalAuthorityName), tenantID: tenantID, + azClient: client, }, nil } @@ -132,10 +145,15 @@ func (c *confidentialClient) client(ctx context.Context, tro policy.TokenRequest } func (c *confidentialClient) newMSALClient(enableCAE bool) (msalConfidentialClient, error) { + cache, err := internal.NewCache(c.opts.tokenCachePersistenceOptions, enableCAE) + if err != nil { + return nil, err + } authority := runtime.JoinPaths(c.host, c.tenantID) o := []confidential.Option{ confidential.WithAzureRegion(c.region), - confidential.WithHTTPClient(newPipelineAdapter(&c.opts.ClientOptions)), + confidential.WithCache(cache), + confidential.WithHTTPClient(c), } if enableCAE { o = append(o, confidential.WithClientCapabilities(cp1)) @@ -149,8 +167,18 @@ func (c *confidentialClient) newMSALClient(enableCAE bool) (msalConfidentialClie return confidential.New(authority, c.clientID, c.cred, o...) } -// resolveTenant returns the correct tenant for a token request given the client's +// resolveTenant returns the correct WithTenantID() argument for a token request given the client's // configuration, or an error when that configuration doesn't allow the specified tenant func (c *confidentialClient) resolveTenant(specified string) (string, error) { return resolveTenant(c.tenantID, specified, c.name, c.opts.AdditionallyAllowedTenants) } + +// these methods satisfy the MSAL ops.HTTPClient interface + +func (c *confidentialClient) CloseIdleConnections() { + // do nothing +} + +func (c *confidentialClient) Do(r *http.Request) (*http.Response, error) { + return doForClient(c.azClient, r) +} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go index 7647c60b1cb7..35aeef867478 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go @@ -30,7 +30,7 @@ type DefaultAzureCredentialOptions struct { // set as a semicolon delimited list of tenants in the environment variable AZURE_ADDITIONALLY_ALLOWED_TENANTS. AdditionallyAllowedTenants []string // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or - // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making // the application responsible for ensuring the configured authority is valid and trustworthy. DisableInstanceDiscovery bool @@ -49,6 +49,7 @@ type DefaultAzureCredentialOptions struct { // more control over its configuration. // - [ManagedIdentityCredential] // - [AzureCLICredential] +// - [AzureDeveloperCLICredential] // // Consult the documentation for these credential types for more information on how they authenticate. // Once a credential has successfully authenticated, DefaultAzureCredential will use that credential for @@ -117,6 +118,17 @@ func NewDefaultAzureCredential(options *DefaultAzureCredentialOptions) (*Default creds = append(creds, &defaultCredentialErrorReporter{credType: credNameAzureCLI, err: err}) } + azdCred, err := NewAzureDeveloperCLICredential(&AzureDeveloperCLICredentialOptions{ + AdditionallyAllowedTenants: additionalTenants, + TenantID: options.TenantID, + }) + if err == nil { + creds = append(creds, azdCred) + } else { + errorMessages = append(errorMessages, credNameAzureDeveloperCLI+": "+err.Error()) + creds = append(creds, &defaultCredentialErrorReporter{credType: credNameAzureDeveloperCLI, err: err}) + } + if len(errorMessages) > 0 { log.Writef(EventAuthentication, "NewDefaultAzureCredential failed to initialize some credentials:\n\t%s", strings.Join(errorMessages, "\n\t")) } @@ -129,7 +141,7 @@ func NewDefaultAzureCredential(options *DefaultAzureCredentialOptions) (*Default return &DefaultAzureCredential{chain: chain}, nil } -// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. +// GetToken requests an access token from Microsoft Entra ID. This method is called automatically by Azure SDK clients. func (c *DefaultAzureCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { return c.chain.GetToken(ctx, opts) } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/developer_credential_util.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/developer_credential_util.go new file mode 100644 index 000000000000..d8b952f532ee --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/developer_credential_util.go @@ -0,0 +1,38 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "errors" + "time" +) + +// cliTimeout is the default timeout for authentication attempts via CLI tools +const cliTimeout = 10 * time.Second + +// unavailableIfInChain returns err or, if the credential was invoked by DefaultAzureCredential, a +// credentialUnavailableError having the same message. This ensures DefaultAzureCredential will try +// the next credential in its chain (another developer credential). +func unavailableIfInChain(err error, inDefaultChain bool) error { + if err != nil && inDefaultChain { + var unavailableErr *credentialUnavailableError + if !errors.As(err, &unavailableErr) { + err = newCredentialUnavailableError(credNameAzureDeveloperCLI, err.Error()) + } + } + return err +} + +// validScope is for credentials authenticating via external tools. The authority validates scopes for all other credentials. +func validScope(scope string) bool { + for _, r := range scope { + if !(alphanumeric(r) || r == '.' || r == '-' || r == '_' || r == '/' || r == ':') { + return false + } + } + return true +} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go index d245c269a760..1b7a283703a0 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go @@ -12,6 +12,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" ) const credNameDeviceCode = "DeviceCodeCredential" @@ -23,19 +24,34 @@ type DeviceCodeCredentialOptions struct { // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire // tokens. Add the wildcard value "*" to allow the credential to acquire tokens for any tenant. AdditionallyAllowedTenants []string + + // authenticationRecord returned by a call to a credential's Authenticate method. Set this option + // to enable the credential to use data from a previous authentication. + authenticationRecord authenticationRecord + // ClientID is the ID of the application users will authenticate to. // Defaults to the ID of an Azure development application. ClientID string + + // disableAutomaticAuthentication prevents the credential from automatically prompting the user to authenticate. + // When this option is true, [DeviceCodeCredential.GetToken] will return [ErrAuthenticationRequired] when user + // interaction is necessary to acquire a token. + disableAutomaticAuthentication bool + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or - // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making // the application responsible for ensuring the configured authority is valid and trustworthy. DisableInstanceDiscovery bool - // TenantID is the Azure Active Directory tenant the credential authenticates in. Defaults to the + + // TenantID is the Microsoft Entra tenant the credential authenticates in. Defaults to the // "organizations" tenant, which can authenticate work and school accounts. Required for single-tenant // applications. TenantID string + // tokenCachePersistenceOptions enables persistent token caching when not nil. + tokenCachePersistenceOptions *tokenCachePersistenceOptions + // UserPrompt controls how the credential presents authentication instructions. The credential calls // this function with authentication details when it receives a device code. By default, the credential // prints these details to stdout. @@ -63,14 +79,14 @@ type DeviceCodeMessage struct { UserCode string `json:"user_code"` // VerificationURL is the URL at which the user must authenticate. VerificationURL string `json:"verification_uri"` - // Message is user instruction from Azure Active Directory. + // Message is user instruction from Microsoft Entra ID. Message string `json:"message"` } // DeviceCodeCredential acquires tokens for a user via the device code flow, which has the -// user browse to an Azure Active Directory URL, enter a code, and authenticate. It's useful +// user browse to a Microsoft Entra URL, enter a code, and authenticate. It's useful // for authenticating a user in an environment without a web browser, such as an SSH session. -// If a web browser is available, InteractiveBrowserCredential is more convenient because it +// If a web browser is available, [InteractiveBrowserCredential] is more convenient because it // automatically opens a browser to the login page. type DeviceCodeCredential struct { client *publicClient @@ -84,10 +100,13 @@ func NewDeviceCodeCredential(options *DeviceCodeCredentialOptions) (*DeviceCodeC } cp.init() msalOpts := publicClientOptions{ - AdditionallyAllowedTenants: cp.AdditionallyAllowedTenants, - ClientOptions: cp.ClientOptions, - DeviceCodePrompt: cp.UserPrompt, - DisableInstanceDiscovery: cp.DisableInstanceDiscovery, + AdditionallyAllowedTenants: cp.AdditionallyAllowedTenants, + ClientOptions: cp.ClientOptions, + DeviceCodePrompt: cp.UserPrompt, + DisableAutomaticAuthentication: cp.disableAutomaticAuthentication, + DisableInstanceDiscovery: cp.DisableInstanceDiscovery, + Record: cp.authenticationRecord, + TokenCachePersistenceOptions: cp.tokenCachePersistenceOptions, } c, err := newPublicClient(cp.TenantID, cp.ClientID, credNameDeviceCode, msalOpts) if err != nil { @@ -97,10 +116,23 @@ func NewDeviceCodeCredential(options *DeviceCodeCredentialOptions) (*DeviceCodeC return &DeviceCodeCredential{client: c}, nil } -// GetToken requests an access token from Azure Active Directory. It will begin the device code flow and poll until the user completes authentication. +// Authenticate a user via the device code flow. Subsequent calls to GetToken will automatically use the returned AuthenticationRecord. +func (c *DeviceCodeCredential) authenticate(ctx context.Context, opts *policy.TokenRequestOptions) (authenticationRecord, error) { + var err error + ctx, endSpan := runtime.StartSpan(ctx, credNameDeviceCode+"."+traceOpAuthenticate, c.client.azClient.Tracer(), nil) + defer func() { endSpan(err) }() + tk, err := c.client.Authenticate(ctx, opts) + return tk, err +} + +// GetToken requests an access token from Microsoft Entra ID. It will begin the device code flow and poll until the user completes authentication. // This method is called automatically by Azure SDK clients. func (c *DeviceCodeCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { - return c.client.GetToken(ctx, opts) + var err error + ctx, endSpan := runtime.StartSpan(ctx, credNameDeviceCode+"."+traceOpGetToken, c.client.azClient.Tracer(), nil) + defer func() { endSpan(err) }() + tk, err := c.client.GetToken(ctx, opts) + return tk, err } var _ azcore.TokenCredential = (*DeviceCodeCredential)(nil) diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go index 7ecd928e0245..42f84875e23a 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go @@ -25,7 +25,7 @@ type EnvironmentCredentialOptions struct { azcore.ClientOptions // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or - // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making // the application responsible for ensuring the configured authority is valid and trustworthy. DisableInstanceDiscovery bool @@ -156,7 +156,7 @@ func NewEnvironmentCredential(options *EnvironmentCredentialOptions) (*Environme return nil, errors.New("incomplete environment variable configuration. Only AZURE_TENANT_ID and AZURE_CLIENT_ID are set") } -// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. +// GetToken requests an access token from Microsoft Entra ID. This method is called automatically by Azure SDK clients. func (c *EnvironmentCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { return c.cred.GetToken(ctx, opts) } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go index e1a21e0030a9..335d2b7dcf24 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go @@ -18,6 +18,10 @@ import ( msal "github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors" ) +// errAuthenticationRequired indicates a credential's Authenticate method must be called to acquire a token +// because user interaction is required and the credential is configured not to automatically prompt the user. +var errAuthenticationRequired error = &credentialUnavailableError{"can't acquire a token without user interaction. Call Authenticate to interactively authenticate a user"} + // getResponseFromError retrieves the response carried by // an AuthenticationFailedError or MSAL CallErr, if any func getResponseFromError(err error) *http.Response { @@ -53,7 +57,13 @@ func (e *AuthenticationFailedError) Error() string { } msg := &bytes.Buffer{} fmt.Fprintf(msg, e.credType+" authentication failed\n") - fmt.Fprintf(msg, "%s %s://%s%s\n", e.RawResponse.Request.Method, e.RawResponse.Request.URL.Scheme, e.RawResponse.Request.URL.Host, e.RawResponse.Request.URL.Path) + if e.RawResponse.Request != nil { + fmt.Fprintf(msg, "%s %s://%s%s\n", e.RawResponse.Request.Method, e.RawResponse.Request.URL.Scheme, e.RawResponse.Request.URL.Host, e.RawResponse.Request.URL.Path) + } else { + // this happens when the response is created from a custom HTTP transporter, + // which doesn't guarantee to bind the original request to the response + fmt.Fprintln(msg, "Request information not available") + } fmt.Fprintln(msg, "--------------------------------------------------------------------------------") fmt.Fprintf(msg, "RESPONSE %s\n", e.RawResponse.Status) fmt.Fprintln(msg, "--------------------------------------------------------------------------------") @@ -74,6 +84,8 @@ func (e *AuthenticationFailedError) Error() string { switch e.credType { case credNameAzureCLI: anchor = "azure-cli" + case credNameAzureDeveloperCLI: + anchor = "azd" case credNameCert: anchor = "client-cert" case credNameSecret: diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work new file mode 100644 index 000000000000..04ea962b422d --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work @@ -0,0 +1,6 @@ +go 1.18 + +use ( + . + ./cache +) diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work.sum b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work.sum new file mode 100644 index 000000000000..65bcba7dfea4 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work.sum @@ -0,0 +1,41 @@ +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0-beta.1 h1:ODs3brnqQM99Tq1PffODpAViYv3Bf8zOg464MU7p5ew= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0-beta.1/go.mod h1:3Ug6Qzto9anB6mGlEdgYMDF5zHQ+wwhEaYR4s17PHMw= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0 h1:fb8kj/Dh4CSwgsOzHeZY4Xh68cFVbzXx+ONXGMY//4w= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0/go.mod h1:uReU2sSxZExRPBAg3qKzmAucSi51+SP1OhohieR821Q= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/montanaflynn/stats v0.7.0/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= +golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= +golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= +golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= +golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= +golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= +golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= +golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go index 08f3efbf3ec4..bd829698375c 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go @@ -11,6 +11,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" ) const credNameBrowser = "InteractiveBrowserCredential" @@ -22,26 +23,40 @@ type InteractiveBrowserCredentialOptions struct { // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire // tokens. Add the wildcard value "*" to allow the credential to acquire tokens for any tenant. AdditionallyAllowedTenants []string + + // authenticationRecord returned by a call to a credential's Authenticate method. Set this option + // to enable the credential to use data from a previous authentication. + authenticationRecord authenticationRecord + // ClientID is the ID of the application users will authenticate to. // Defaults to the ID of an Azure development application. ClientID string + // disableAutomaticAuthentication prevents the credential from automatically prompting the user to authenticate. + // When this option is true, [InteractiveBrowserCredential.GetToken] will return [ErrAuthenticationRequired] when + // user interaction is necessary to acquire a token. + disableAutomaticAuthentication bool + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or - // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making // the application responsible for ensuring the configured authority is valid and trustworthy. DisableInstanceDiscovery bool // LoginHint pre-populates the account prompt with a username. Users may choose to authenticate a different account. LoginHint string - // RedirectURL is the URL Azure Active Directory will redirect to with the access token. This is required + + // RedirectURL is the URL Microsoft Entra ID will redirect to with the access token. This is required // only when setting ClientID, and must match a redirect URI in the application's registration. // Applications which have registered "http://localhost" as a redirect URI need not set this option. RedirectURL string - // TenantID is the Azure Active Directory tenant the credential authenticates in. Defaults to the + // TenantID is the Microsoft Entra tenant the credential authenticates in. Defaults to the // "organizations" tenant, which can authenticate work and school accounts. TenantID string + + // tokenCachePersistenceOptions enables persistent token caching when not nil. + tokenCachePersistenceOptions *tokenCachePersistenceOptions } func (o *InteractiveBrowserCredentialOptions) init() { @@ -66,10 +81,14 @@ func NewInteractiveBrowserCredential(options *InteractiveBrowserCredentialOption } cp.init() msalOpts := publicClientOptions{ - ClientOptions: cp.ClientOptions, - DisableInstanceDiscovery: cp.DisableInstanceDiscovery, - LoginHint: cp.LoginHint, - RedirectURL: cp.RedirectURL, + AdditionallyAllowedTenants: cp.AdditionallyAllowedTenants, + ClientOptions: cp.ClientOptions, + DisableAutomaticAuthentication: cp.disableAutomaticAuthentication, + DisableInstanceDiscovery: cp.DisableInstanceDiscovery, + LoginHint: cp.LoginHint, + Record: cp.authenticationRecord, + RedirectURL: cp.RedirectURL, + TokenCachePersistenceOptions: cp.tokenCachePersistenceOptions, } c, err := newPublicClient(cp.TenantID, cp.ClientID, credNameBrowser, msalOpts) if err != nil { @@ -78,9 +97,22 @@ func NewInteractiveBrowserCredential(options *InteractiveBrowserCredentialOption return &InteractiveBrowserCredential{client: c}, nil } -// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. +// Authenticate a user via the default browser. Subsequent calls to GetToken will automatically use the returned AuthenticationRecord. +func (c *InteractiveBrowserCredential) authenticate(ctx context.Context, opts *policy.TokenRequestOptions) (authenticationRecord, error) { + var err error + ctx, endSpan := runtime.StartSpan(ctx, credNameBrowser+"."+traceOpAuthenticate, c.client.azClient.Tracer(), nil) + defer func() { endSpan(err) }() + tk, err := c.client.Authenticate(ctx, opts) + return tk, err +} + +// GetToken requests an access token from Microsoft Entra ID. This method is called automatically by Azure SDK clients. func (c *InteractiveBrowserCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { - return c.client.GetToken(ctx, opts) + var err error + ctx, endSpan := runtime.StartSpan(ctx, credNameBrowser+"."+traceOpGetToken, c.client.azClient.Tracer(), nil) + defer func() { endSpan(err) }() + tk, err := c.client.GetToken(ctx, opts) + return tk, err } var _ azcore.TokenCredential = (*InteractiveBrowserCredential)(nil) diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/exported.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/exported.go new file mode 100644 index 000000000000..b1b4d5c8bd35 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/exported.go @@ -0,0 +1,18 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package internal + +// TokenCachePersistenceOptions contains options for persistent token caching +type TokenCachePersistenceOptions struct { + // AllowUnencryptedStorage controls whether the cache should fall back to storing its data in plain text + // when encryption isn't possible. Setting this true doesn't disable encryption. The cache always attempts + // encryption before falling back to plaintext storage. + AllowUnencryptedStorage bool + + // Name identifies the cache. Set this to isolate data from other applications. + Name string +} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/internal.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/internal.go new file mode 100644 index 000000000000..c1498b464471 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/internal.go @@ -0,0 +1,31 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package internal + +import ( + "errors" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache" +) + +var errMissingImport = errors.New("import github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache to enable persistent caching") + +// NewCache constructs a persistent token cache when "o" isn't nil. Applications that intend to +// use a persistent cache must first import the cache module, which will replace this function +// with a platform-specific implementation. +var NewCache = func(o *TokenCachePersistenceOptions, enableCAE bool) (cache.ExportReplace, error) { + if o == nil { + return nil, nil + } + return nil, errMissingImport +} + +// CacheFilePath returns the path to the cache file for the given name. +// Defining it in this package makes it available to azidentity tests. +var CacheFilePath = func(name string) (string, error) { + return "", errMissingImport +} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go index fdc3c1f67760..d129a1e91c2a 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go @@ -28,18 +28,20 @@ import ( const ( arcIMDSEndpoint = "IMDS_ENDPOINT" + defaultIdentityClientID = "DEFAULT_IDENTITY_CLIENT_ID" identityEndpoint = "IDENTITY_ENDPOINT" identityHeader = "IDENTITY_HEADER" identityServerThumbprint = "IDENTITY_SERVER_THUMBPRINT" headerMetadata = "Metadata" imdsEndpoint = "http://169.254.169.254/metadata/identity/oauth2/token" + miResID = "mi_res_id" msiEndpoint = "MSI_ENDPOINT" + msiResID = "msi_res_id" + msiSecret = "MSI_SECRET" imdsAPIVersion = "2018-02-01" azureArcAPIVersion = "2019-08-15" + qpClientID = "client_id" serviceFabricAPIVersion = "2019-07-01-preview" - - qpClientID = "client_id" - qpResID = "mi_res_id" ) type msiType int @@ -47,6 +49,7 @@ type msiType int const ( msiTypeAppService msiType = iota msiTypeAzureArc + msiTypeAzureML msiTypeCloudShell msiTypeIMDS msiTypeServiceFabric @@ -55,7 +58,7 @@ const ( // managedIdentityClient provides the base for authenticating in managed identity environments // This type includes an runtime.Pipeline and TokenCredentialOptions. type managedIdentityClient struct { - pipeline runtime.Pipeline + azClient *azcore.Client msiType msiType endpoint string id ManagedIDKind @@ -135,13 +138,27 @@ func newManagedIdentityClient(options *ManagedIdentityCredentialOptions) (*manag c.msiType = msiTypeAzureArc } } else if endpoint, ok := os.LookupEnv(msiEndpoint); ok { - env = "Cloud Shell" c.endpoint = endpoint - c.msiType = msiTypeCloudShell + if _, ok := os.LookupEnv(msiSecret); ok { + env = "Azure ML" + c.msiType = msiTypeAzureML + } else { + env = "Cloud Shell" + c.msiType = msiTypeCloudShell + } } else { setIMDSRetryOptionDefaults(&cp.Retry) } - c.pipeline = runtime.NewPipeline(component, version, runtime.PipelineOptions{}, &cp) + + client, err := azcore.NewClient(module, version, runtime.PipelineOptions{ + Tracing: runtime.TracingOptions{ + Namespace: traceNamespace, + }, + }, &cp) + if err != nil { + return nil, err + } + c.azClient = client if log.Should(EventAuthentication) { log.Writef(EventAuthentication, "Managed Identity Credential will use %s managed identity", env) @@ -168,7 +185,7 @@ func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKi return azcore.AccessToken{}, err } - resp, err := c.pipeline.Do(msg) + resp, err := c.azClient.Pipeline().Do(msg) if err != nil { return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, err.Error(), nil, err) } @@ -247,6 +264,8 @@ func (c *managedIdentityClient) createAuthRequest(ctx context.Context, id Manage return nil, newAuthenticationFailedError(credNameManagedIdentity, msg, nil, err) } return c.createAzureArcAuthRequest(ctx, id, scopes, key) + case msiTypeAzureML: + return c.createAzureMLAuthRequest(ctx, id, scopes) case msiTypeServiceFabric: return c.createServiceFabricAuthRequest(ctx, id, scopes) case msiTypeCloudShell: @@ -267,7 +286,7 @@ func (c *managedIdentityClient) createIMDSAuthRequest(ctx context.Context, id Ma q.Add("resource", strings.Join(scopes, " ")) if id != nil { if id.idKind() == miResourceID { - q.Add(qpResID, id.String()) + q.Add(msiResID, id.String()) } else { q.Add(qpClientID, id.String()) } @@ -287,7 +306,7 @@ func (c *managedIdentityClient) createAppServiceAuthRequest(ctx context.Context, q.Add("resource", scopes[0]) if id != nil { if id.idKind() == miResourceID { - q.Add(qpResID, id.String()) + q.Add(miResID, id.String()) } else { q.Add(qpClientID, id.String()) } @@ -296,6 +315,29 @@ func (c *managedIdentityClient) createAppServiceAuthRequest(ctx context.Context, return request, nil } +func (c *managedIdentityClient) createAzureMLAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) { + request, err := runtime.NewRequest(ctx, http.MethodGet, c.endpoint) + if err != nil { + return nil, err + } + request.Raw().Header.Set("secret", os.Getenv(msiSecret)) + q := request.Raw().URL.Query() + q.Add("api-version", "2017-09-01") + q.Add("resource", strings.Join(scopes, " ")) + q.Add("clientid", os.Getenv(defaultIdentityClientID)) + if id != nil { + if id.idKind() == miResourceID { + log.Write(EventAuthentication, "WARNING: Azure ML doesn't support specifying a managed identity by resource ID") + q.Set("clientid", "") + q.Set(miResID, id.String()) + } else { + q.Set("clientid", id.String()) + } + } + request.Raw().URL.RawQuery = q.Encode() + return request, nil +} + func (c *managedIdentityClient) createServiceFabricAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) { request, err := runtime.NewRequest(ctx, http.MethodGet, c.endpoint) if err != nil { @@ -309,7 +351,7 @@ func (c *managedIdentityClient) createServiceFabricAuthRequest(ctx context.Conte if id != nil { log.Write(EventAuthentication, "WARNING: Service Fabric doesn't support selecting a user-assigned identity at runtime") if id.idKind() == miResourceID { - q.Add(qpResID, id.String()) + q.Add(miResID, id.String()) } else { q.Add(qpClientID, id.String()) } @@ -330,7 +372,7 @@ func (c *managedIdentityClient) getAzureArcSecretKey(ctx context.Context, resour q.Add("resource", strings.Join(resources, " ")) request.Raw().URL.RawQuery = q.Encode() // send the initial request to get the short-lived secret key - response, err := c.pipeline.Do(request) + response, err := c.azClient.Pipeline().Do(request) if err != nil { return "", err } @@ -369,7 +411,7 @@ func (c *managedIdentityClient) createAzureArcAuthRequest(ctx context.Context, i if id != nil { log.Write(EventAuthentication, "WARNING: Azure Arc doesn't support user-assigned managed identities") if id.idKind() == miResourceID { - q.Add(qpResID, id.String()) + q.Add(miResID, id.String()) } else { q.Add(qpClientID, id.String()) } @@ -395,7 +437,7 @@ func (c *managedIdentityClient) createCloudShellAuthRequest(ctx context.Context, log.Write(EventAuthentication, "WARNING: Cloud Shell doesn't support user-assigned managed identities") q := request.Raw().URL.Query() if id.idKind() == miResourceID { - q.Add(qpResID, id.String()) + q.Add(miResID, id.String()) } else { q.Add(qpClientID, id.String()) } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go index 35c5e6725cda..dcd278befa16 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go @@ -13,6 +13,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" ) @@ -67,8 +68,8 @@ type ManagedIdentityCredentialOptions struct { // ManagedIdentityCredential authenticates an Azure managed identity in any hosting environment supporting managed identities. // This credential authenticates a system-assigned identity by default. Use ManagedIdentityCredentialOptions.ID to specify a -// user-assigned identity. See Azure Active Directory documentation for more information about managed identities: -// https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/overview +// user-assigned identity. See Microsoft Entra ID documentation for more information about managed identities: +// https://learn.microsoft.com/azure/active-directory/managed-identities-azure-resources/overview type ManagedIdentityCredential struct { client *confidentialClient mic *managedIdentityClient @@ -92,7 +93,9 @@ func NewManagedIdentityCredential(options *ManagedIdentityCredentialOptions) (*M clientID = options.ID.String() } // similarly, it's okay to give MSAL an incorrect tenant because MSAL won't use the value - c, err := newConfidentialClient("common", clientID, credNameManagedIdentity, cred, confidentialClientOptions{}) + c, err := newConfidentialClient("common", clientID, credNameManagedIdentity, cred, confidentialClientOptions{ + ClientOptions: options.ClientOptions, + }) if err != nil { return nil, err } @@ -101,13 +104,18 @@ func NewManagedIdentityCredential(options *ManagedIdentityCredentialOptions) (*M // GetToken requests an access token from the hosting environment. This method is called automatically by Azure SDK clients. func (c *ManagedIdentityCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + var err error + ctx, endSpan := runtime.StartSpan(ctx, credNameManagedIdentity+"."+traceOpGetToken, c.client.azClient.Tracer(), nil) + defer func() { endSpan(err) }() + if len(opts.Scopes) != 1 { - err := fmt.Errorf("%s.GetToken() requires exactly one scope", credNameManagedIdentity) + err = fmt.Errorf("%s.GetToken() requires exactly one scope", credNameManagedIdentity) return azcore.AccessToken{}, err } - // managed identity endpoints require an AADv1 resource (i.e. token audience), not a v2 scope, so we remove "/.default" here + // managed identity endpoints require a Microsoft Entra ID v1 resource (i.e. token audience), not a v2 scope, so we remove "/.default" here opts.Scopes = []string{strings.TrimSuffix(opts.Scopes[0], defaultSuffix)} - return c.client.GetToken(ctx, opts) + tk, err := c.client.GetToken(ctx, opts) + return tk, err } var _ azcore.TokenCredential = (*ManagedIdentityCredential)(nil) diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/on_behalf_of_credential.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/on_behalf_of_credential.go index 2b360b681df1..5e67cf02145d 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/on_behalf_of_credential.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/on_behalf_of_credential.go @@ -13,6 +13,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" ) @@ -21,9 +22,9 @@ const credNameOBO = "OnBehalfOfCredential" // OnBehalfOfCredential authenticates a service principal via the on-behalf-of flow. This is typically used by // middle-tier services that authorize requests to other services with a delegated user identity. Because this // is not an interactive authentication flow, an application using it must have admin consent for any delegated -// permissions before requesting tokens for them. See [Azure Active Directory documentation] for more details. +// permissions before requesting tokens for them. See [Microsoft Entra ID documentation] for more details. // -// [Azure Active Directory documentation]: https://docs.microsoft.com/azure/active-directory/develop/v2-oauth2-on-behalf-of-flow +// [Microsoft Entra ID documentation]: https://learn.microsoft.com/azure/active-directory/develop/v2-oauth2-on-behalf-of-flow type OnBehalfOfCredential struct { client *confidentialClient } @@ -36,11 +37,13 @@ type OnBehalfOfCredentialOptions struct { // Add the wildcard value "*" to allow the credential to acquire tokens for any tenant in which the // application is registered. AdditionallyAllowedTenants []string + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or - // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making // the application responsible for ensuring the configured authority is valid and trustworthy. DisableInstanceDiscovery bool + // SendCertificateChain applies only when the credential is configured to authenticate with a certificate. // This setting controls whether the credential sends the public certificate chain in the x5c header of each // token request's JWT. This is required for, and only used in, Subject Name/Issuer (SNI) authentication. @@ -84,9 +87,13 @@ func newOnBehalfOfCredential(tenantID, clientID, userAssertion string, cred conf return &OnBehalfOfCredential{c}, nil } -// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. +// GetToken requests an access token from Microsoft Entra ID. This method is called automatically by Azure SDK clients. func (o *OnBehalfOfCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { - return o.client.GetToken(ctx, opts) + var err error + ctx, endSpan := runtime.StartSpan(ctx, credNameOBO+"."+traceOpGetToken, o.client.azClient.Tracer(), nil) + defer func() { endSpan(err) }() + tk, err := o.client.GetToken(ctx, opts) + return tk, err } var _ azcore.TokenCredential = (*OnBehalfOfCredential)(nil) diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go index 6512d3e25fd8..63c31190d188 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go @@ -8,38 +8,52 @@ package azidentity import ( "context" + "errors" "fmt" + "net/http" "strings" "sync" "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal" "github.com/Azure/azure-sdk-for-go/sdk/internal/log" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/public" + + // this import ensures well-known configurations in azcore/cloud have ARM audiences for Authenticate() + _ "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime" ) type publicClientOptions struct { azcore.ClientOptions - AdditionallyAllowedTenants []string - DeviceCodePrompt func(context.Context, DeviceCodeMessage) error - DisableInstanceDiscovery bool - LoginHint, RedirectURL string - Username, Password string + AdditionallyAllowedTenants []string + DeviceCodePrompt func(context.Context, DeviceCodeMessage) error + DisableAutomaticAuthentication bool + DisableInstanceDiscovery bool + LoginHint, RedirectURL string + Record authenticationRecord + TokenCachePersistenceOptions *tokenCachePersistenceOptions + Username, Password string } // publicClient wraps the MSAL public client type publicClient struct { - account public.Account cae, noCAE msalPublicClient caeMu, noCAEMu, clientMu *sync.Mutex clientID, tenantID string + defaultScope []string host string name string opts publicClientOptions + record authenticationRecord + azClient *azcore.Client } +var errScopeRequired = errors.New("authenticating in this environment requires specifying a scope in TokenRequestOptions") + func newPublicClient(tenantID, clientID, name string, o publicClientOptions) (*publicClient, error) { if !validTenantID(tenantID) { return nil, errInvalidTenantID @@ -48,19 +62,76 @@ func newPublicClient(tenantID, clientID, name string, o publicClientOptions) (*p if err != nil { return nil, err } + // if the application specified a cloud configuration, use its ARM audience as the default scope for Authenticate() + audience := o.Cloud.Services[cloud.ResourceManager].Audience + if audience == "" { + // no cloud configuration, or no ARM audience, specified; try to map the host to a well-known one (all of which have a trailing slash) + if !strings.HasSuffix(host, "/") { + host += "/" + } + switch host { + case cloud.AzureChina.ActiveDirectoryAuthorityHost: + audience = cloud.AzureChina.Services[cloud.ResourceManager].Audience + case cloud.AzureGovernment.ActiveDirectoryAuthorityHost: + audience = cloud.AzureGovernment.Services[cloud.ResourceManager].Audience + case cloud.AzurePublic.ActiveDirectoryAuthorityHost: + audience = cloud.AzurePublic.Services[cloud.ResourceManager].Audience + } + } + // if we didn't come up with an audience, the application will have to specify a scope for Authenticate() + var defaultScope []string + if audience != "" { + defaultScope = []string{audience + defaultSuffix} + } + client, err := azcore.NewClient(module, version, runtime.PipelineOptions{ + Tracing: runtime.TracingOptions{ + Namespace: traceNamespace, + }, + }, &o.ClientOptions) + if err != nil { + return nil, err + } o.AdditionallyAllowedTenants = resolveAdditionalTenants(o.AdditionallyAllowedTenants) return &publicClient{ - caeMu: &sync.Mutex{}, - clientID: clientID, - clientMu: &sync.Mutex{}, - host: host, - name: name, - noCAEMu: &sync.Mutex{}, - opts: o, - tenantID: tenantID, + caeMu: &sync.Mutex{}, + clientID: clientID, + clientMu: &sync.Mutex{}, + defaultScope: defaultScope, + host: host, + name: name, + noCAEMu: &sync.Mutex{}, + opts: o, + record: o.Record, + tenantID: tenantID, + azClient: client, }, nil } +func (p *publicClient) Authenticate(ctx context.Context, tro *policy.TokenRequestOptions) (authenticationRecord, error) { + if tro == nil { + tro = &policy.TokenRequestOptions{} + } + if len(tro.Scopes) == 0 { + if p.defaultScope == nil { + return authenticationRecord{}, errScopeRequired + } + tro.Scopes = p.defaultScope + } + client, mu, err := p.client(*tro) + if err != nil { + return authenticationRecord{}, err + } + mu.Lock() + defer mu.Unlock() + _, err = p.reqToken(ctx, client, *tro) + if err == nil { + scope := strings.Join(tro.Scopes, ", ") + msg := fmt.Sprintf("%s.Authenticate() acquired a token for scope %q", p.name, scope) + log.Write(EventAuthentication, msg) + } + return p.record, err +} + // GetToken requests an access token from MSAL, checking the cache first. func (p *publicClient) GetToken(ctx context.Context, tro policy.TokenRequestOptions) (azcore.AccessToken, error) { if len(tro.Scopes) < 1 { @@ -76,10 +147,13 @@ func (p *publicClient) GetToken(ctx context.Context, tro policy.TokenRequestOpti } mu.Lock() defer mu.Unlock() - ar, err := client.AcquireTokenSilent(ctx, tro.Scopes, public.WithSilentAccount(p.account), public.WithClaims(tro.Claims), public.WithTenantID(tenant)) + ar, err := client.AcquireTokenSilent(ctx, tro.Scopes, public.WithSilentAccount(p.record.account()), public.WithClaims(tro.Claims), public.WithTenantID(tenant)) if err == nil { return p.token(ar, err) } + if p.opts.DisableAutomaticAuthentication { + return azcore.AccessToken{}, errAuthenticationRequired + } at, err := p.reqToken(ctx, client, tro) if err == nil { msg := fmt.Sprintf("%s.GetToken() acquired a token for scope %q", p.name, strings.Join(ar.GrantedScopes, ", ")) @@ -148,9 +222,14 @@ func (p *publicClient) client(tro policy.TokenRequestOptions) (msalPublicClient, } func (p *publicClient) newMSALClient(enableCAE bool) (msalPublicClient, error) { + cache, err := internal.NewCache(p.opts.TokenCachePersistenceOptions, enableCAE) + if err != nil { + return nil, err + } o := []public.Option{ public.WithAuthority(runtime.JoinPaths(p.host, p.tenantID)), - public.WithHTTPClient(newPipelineAdapter(&p.opts.ClientOptions)), + public.WithCache(cache), + public.WithHTTPClient(p), } if enableCAE { o = append(o, public.WithClientCapabilities(cp1)) @@ -163,7 +242,7 @@ func (p *publicClient) newMSALClient(enableCAE bool) (msalPublicClient, error) { func (p *publicClient) token(ar public.AuthResult, err error) (azcore.AccessToken, error) { if err == nil { - p.account = ar.Account + p.record, err = newAuthenticationRecord(ar) } else { res := getResponseFromError(err) err = newAuthenticationFailedError(p.name, err.Error(), res, err) @@ -171,8 +250,24 @@ func (p *publicClient) token(ar public.AuthResult, err error) (azcore.AccessToke return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err } -// resolveTenant returns the correct tenant for a token request given the client's +// resolveTenant returns the correct WithTenantID() argument for a token request given the client's // configuration, or an error when that configuration doesn't allow the specified tenant func (p *publicClient) resolveTenant(specified string) (string, error) { - return resolveTenant(p.tenantID, specified, p.name, p.opts.AdditionallyAllowedTenants) + t, err := resolveTenant(p.tenantID, specified, p.name, p.opts.AdditionallyAllowedTenants) + if t == p.tenantID { + // callers pass this value to MSAL's WithTenantID(). There's no need to redundantly specify + // the client's default tenant and doing so is an error when that tenant is "organizations" + t = "" + } + return t, err +} + +// these methods satisfy the MSAL ops.HTTPClient interface + +func (p *publicClient) CloseIdleConnections() { + // do nothing +} + +func (p *publicClient) Do(r *http.Request) (*http.Response, error) { + return doForClient(p.azClient, r) } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go index f787ec0ce18f..294ed81e951c 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go @@ -11,6 +11,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" ) const credNameUserPassword = "UsernamePasswordCredential" @@ -23,11 +24,19 @@ type UsernamePasswordCredentialOptions struct { // Add the wildcard value "*" to allow the credential to acquire tokens for any tenant in which the // application is registered. AdditionallyAllowedTenants []string + + // authenticationRecord returned by a call to a credential's Authenticate method. Set this option + // to enable the credential to use data from a previous authentication. + authenticationRecord authenticationRecord + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or - // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making // the application responsible for ensuring the configured authority is valid and trustworthy. DisableInstanceDiscovery bool + + // tokenCachePersistenceOptions enables persistent token caching when not nil. + tokenCachePersistenceOptions *tokenCachePersistenceOptions } // UsernamePasswordCredential authenticates a user with a password. Microsoft doesn't recommend this kind of authentication, @@ -45,11 +54,13 @@ func NewUsernamePasswordCredential(tenantID string, clientID string, username st options = &UsernamePasswordCredentialOptions{} } opts := publicClientOptions{ - AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, - ClientOptions: options.ClientOptions, - DisableInstanceDiscovery: options.DisableInstanceDiscovery, - Password: password, - Username: username, + AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, + ClientOptions: options.ClientOptions, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, + Password: password, + Record: options.authenticationRecord, + TokenCachePersistenceOptions: options.tokenCachePersistenceOptions, + Username: username, } c, err := newPublicClient(tenantID, clientID, credNameUserPassword, opts) if err != nil { @@ -58,9 +69,22 @@ func NewUsernamePasswordCredential(tenantID string, clientID string, username st return &UsernamePasswordCredential{client: c}, err } -// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. +// Authenticate the user. Subsequent calls to GetToken will automatically use the returned AuthenticationRecord. +func (c *UsernamePasswordCredential) authenticate(ctx context.Context, opts *policy.TokenRequestOptions) (authenticationRecord, error) { + var err error + ctx, endSpan := runtime.StartSpan(ctx, credNameUserPassword+"."+traceOpAuthenticate, c.client.azClient.Tracer(), nil) + defer func() { endSpan(err) }() + tk, err := c.client.Authenticate(ctx, opts) + return tk, err +} + +// GetToken requests an access token from Microsoft Entra ID. This method is called automatically by Azure SDK clients. func (c *UsernamePasswordCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { - return c.client.GetToken(ctx, opts) + var err error + ctx, endSpan := runtime.StartSpan(ctx, credNameUserPassword+"."+traceOpGetToken, c.client.azClient.Tracer(), nil) + defer func() { endSpan(err) }() + tk, err := c.client.GetToken(ctx, opts) + return tk, err } var _ azcore.TokenCredential = (*UsernamePasswordCredential)(nil) diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go index 65e74e31e3b1..9b9d7ae0d206 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go @@ -10,6 +10,9 @@ const ( // UserAgent is the string to be used in the user agent string when making requests. component = "azidentity" + // module is the fully qualified name of the module used in telemetry and distributed tracing. + module = "github.com/Azure/azure-sdk-for-go/sdk/" + component + // Version is the semantic version (see http://semver.org) of this module. - version = "v1.4.0" + version = "v1.5.2" ) diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/workload_identity.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/workload_identity.go index 7e016324d229..3e43e788e931 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/workload_identity.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/workload_identity.go @@ -15,6 +15,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" ) const credNameWorkloadIdentity = "WorkloadIdentityCredential" @@ -41,7 +42,7 @@ type WorkloadIdentityCredentialOptions struct { // ClientID of the service principal. Defaults to the value of the environment variable AZURE_CLIENT_ID. ClientID string // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or - // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making // the application responsible for ensuring the configured authority is valid and trustworthy. DisableInstanceDiscovery bool @@ -93,9 +94,13 @@ func NewWorkloadIdentityCredential(options *WorkloadIdentityCredentialOptions) ( return &w, nil } -// GetToken requests an access token from Azure Active Directory. Azure SDK clients call this method automatically. +// GetToken requests an access token from Microsoft Entra ID. Azure SDK clients call this method automatically. func (w *WorkloadIdentityCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { - return w.cred.GetToken(ctx, opts) + var err error + ctx, endSpan := runtime.StartSpan(ctx, credNameWorkloadIdentity+"."+traceOpGetToken, w.cred.client.azClient.Tracer(), nil) + defer func() { endSpan(err) }() + tk, err := w.cred.GetToken(ctx, opts) + return tk, err } // getAssertion returns the specified file's content, which is expected to be a Kubernetes service account token. diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/CHANGELOG.md b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/CHANGELOG.md new file mode 100644 index 000000000000..8a5c4cc9f12a --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/CHANGELOG.md @@ -0,0 +1,110 @@ +# Release History + +## 0.12.0 (2023-04-13) + +### Features Added +* upgraded to api version 7.4 + +### Breaking Changes +* This module is now DEPRECATED. The latest supported version of this module is at github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets + +## 0.11.0 (2022-11-08) + +### Breaking Changes +* `NewClient` returns an `error` + +## 0.10.1 (2022-09-20) + +### Features Added +* Added `ClientOptions.DisableChallengeResourceVerification`. + See https://aka.ms/azsdk/blog/vault-uri for more information. + +## 0.10.0 (2022-09-12) + +### Breaking Changes +* Verify the challenge resource matches the vault domain. + +## 0.9.0 (2022-08-09) + +### Breaking Changes +* Changed type of `NewClient` options parameter to `azsecrets.ClientOptions`, which embeds + the former type, `azcore.ClientOptions` + +## 0.8.0 (2022-07-07) + +### Breaking Changes +* The `Client` API now corresponds more directly to the Key Vault REST API. + Most method signatures and types have changed. See the + [module documentation](https://aka.ms/azsdk/go/keyvault-secrets/docs) + for updated code examples and more details. + +### Other Changes +* Upgrade to latest `azcore` + +## 0.7.1 (2022-05-12) + +### Other Changes +* Updated to latest `azcore` and `internal` modules. + +## 0.7.0 (2022-04-06) + +### Features Added +* Added `PossibleDeletionRecoveryLevelValues` to iterate over all valid `DeletionRecoveryLevel` values +* Implemented generic pagers from `runtime.Pager` for all List operations +* Added `Name *string` to `DeletedSecret`, `Properties`, `Secret`, `SecretItem`, and `SecretItem` +* Added `Client.VaultURL` to determine the vault URL for debugging +* Adding `ResumeToken` method to pollers for resuming polling at a later date by using the added `ResumeToken` optional parameter on client polling methods + +### Breaking Changes +* Requires a minimum version of go 1.18 +* Removed `RawResponse` from pollers +* Removed `DeletionRecoveryLevel` +* Polling operations return a Poller struct directly instead of a Response envelope +* Removed `ToPtr` methods +* `Client.UpdateSecretProperties` takes a `Secret` +* Renamed `Client.ListSecrets` to `Client.ListPropertiesOfSecrets` +* Renamed `Client.ListSecretVersions` to `Client.ListPropertiesOfSecretVersions` +* Renamed `DeletedDate` to `DeletedOn` and `Managed` to `IsManaged` +* Moved `ContentType`, `Tags`, `KeyID`, and `IsManaged` to `Properties` + +## 0.6.0 (2022-03-08) + +### Breaking Changes +* Changes `Attributes` to `Properties` +* Changes `Secret.KID` to `Secret.KeyID` +* Changes `DeletedSecretBundle` to `DeletedSecret` +* Changes `DeletedDate` to `DeletedOn`, `Created` to `CreatedOn`, and `Updated` to `UpdatedOn` +* Changes the signature of `Client.UpdateSecretProperties` to have all alterable properties in the `UpdateSecretPropertiesOptions` parameter, removing the `parameters Properties` parameter. +* Changes `Item` to `SecretItem` +* Pollers and pagers are structs instead of interfaces +* Prefixed all `DeletionRecoveryLevel` constants with "DeletionRecoveryLevel" +* Changed pager APIs for `ListSecretVersionsPager`, `ListDeletedSecretsPager`, and `ListSecretsPager` + * Use the `More()` method to determine if there are more pages to fetch + * Use the `NextPage(context.Context)` to fetch the next page of results +* Removed all `RawResponse *http.Response` fields from response structs. + +## 0.5.0 (2022-02-08) + +### Breaking Changes +* Fixes a bug where `UpdateSecretProperties` will delete properties that are not explicitly set each time. This is only a breaking change at runtime, where the request body will change. + +## 0.4.0 (2022-01-11) + +### Other Changes +* Bumps `azcore` dependency from `v0.20.0` to `v0.21.0` + +## 0.3.0 (2021-11-09) + +### Features Added +* Clients can now connect to Key Vaults in any cloud + +## 0.2.0 (2021-11-02) + +### Other Changes +* Bumps `azcore` dependency to `v0.20.0` and `azidentity` to `v0.12.0` + +## 0.1.1 (2021-10-06) +* Adds the MIT License for redistribution + +## 0.1.0 (2021-10-05) +* This is the initial release of the `azsecrets` library diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/LICENSE.txt b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/LICENSE.txt new file mode 100644 index 000000000000..d1ca00f20a89 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/LICENSE.txt @@ -0,0 +1,21 @@ + MIT License + + Copyright (c) Microsoft Corporation. All rights reserved. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE \ No newline at end of file diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/README.md b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/README.md new file mode 100644 index 000000000000..06a23c841c05 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/README.md @@ -0,0 +1,144 @@ +# Azure Key Vault Secrets client module for Go +> Deprecated: use github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets instead + +Azure Key Vault helps solve the following problems: +* Secrets management (this module) - securely store and control access to tokens, passwords, certificates, API keys, and other secrets +* Cryptographic key management ([azkeys](https://azsdk/go/keyvault-keys/docs)) - create, store, and control access to the keys used to encrypt your data +* Certificate management ([azcertificates](https://aka.ms/azsdk/go/keyvault-certificates/docs)) - create, manage, and deploy public and private SSL/TLS certificates + +[Source code][module_source] | [Package (pkg.go.dev)][reference_docs] | [Product documentation][keyvault_docs] | [Samples][secrets_samples] + +## Getting started + +### Install packages + +Install `azsecrets` and `azidentity` with `go get`: +``` +go get github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets +go get github.com/Azure/azure-sdk-for-go/sdk/azidentity +``` +[azidentity][azure_identity] is used for Azure Active Directory authentication as demonstrated below. + + +### Prerequisites + +* An [Azure subscription][azure_sub] +* A supported Go version (the Azure SDK supports the two most recent Go releases) +* A key vault. If you need to create one, see the Key Vault documentation for instructions on doing so in the [Azure Portal][azure_keyvault_portal] or with the [Azure CLI][azure_keyvault_cli]. + +### Authentication + +This document demonstrates using [azidentity.NewDefaultAzureCredential][default_cred_ref] to authenticate. This credential type works in both local development and production environments. We recommend using a [managed identity][managed_identity] in production. + +[Client][client_docs] accepts any [azidentity][azure_identity] credential. See the [azidentity][azure_identity] documentation for more information about other credential types. + +#### Create a client + +Constructing the client also requires your vault's URL, which you can get from the Azure CLI or the Azure Portal. + +```golang +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets" +) + +func main() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + // TODO: handle error + } + + client := azsecrets.NewClient("https://.vault.azure.net", cred, nil) +} +``` + +## Key concepts + +### Secret + +A secret consists of a secret value and its associated metadata and management information. This library handles secret values as strings, but Azure Key Vault doesn't store them as such. For more information about secrets and how Key Vault stores and manages them, see the [Key Vault documentation](https://docs.microsoft.com/azure/key-vault/general/about-keys-secrets-certificates). + +`azseecrets.Client` can set secret values in the vault, update secret metadata, and delete secrets, as shown in the examples below. + +## Examples + +Get started with our [examples](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets#pkg-examples). + +## Troubleshooting + +### Error Handling + +All methods which send HTTP requests return `*azcore.ResponseError` when these requests fail. `ResponseError` has error details and the raw response from Key Vault. + +```go +import "github.com/Azure/azure-sdk-for-go/sdk/azcore" + +resp, err := client.GetSecret(context.Background(), "secretName", nil) +if err != nil { + var httpErr *azcore.ResponseError + if errors.As(err, &httpErr) { + // TODO: investigate httpErr + } else { + // TODO: not an HTTP error + } +} +``` + +### Logging + +This module uses the logging implementation in `azcore`. To turn on logging for all Azure SDK modules, set `AZURE_SDK_GO_LOGGING` to `all`. By default the logger writes to stderr. Use the `azcore/log` package to control log output. For example, logging only HTTP request and response events, and printing them to stdout: + +```go +import azlog "github.com/Azure/azure-sdk-for-go/sdk/azcore/log" + +// Print log events to stdout +azlog.SetListener(func(cls azlog.Event, msg string) { + fmt.Println(msg) +}) + +// Includes only requests and responses in credential logs +azlog.SetEvents(azlog.EventRequest, azlog.EventResponse) +``` + +### Accessing `http.Response` + +You can access the raw `*http.Response` returned by Key Vault using the `runtime.WithCaptureResponse` method and a context passed to any client method. + +```go +import "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + +var response *http.Response +ctx := runtime.WithCaptureResponse(context.TODO(), &response) +_, err = client.GetSecret(ctx, "secretName", nil) +if err != nil { + // TODO: handle error +} +// TODO: do something with response +``` + +### Additional Documentation + +See the [API reference documentation][reference_docs] for complete documentation of this module. + +## Contributing + +This project welcomes contributions and suggestions. Most contributions require you to agree to a Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us the rights to use your contribution. For details, visit https://cla.microsoft.com. + +When you submit a pull request, a CLA-bot will automatically determine whether you need to provide a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the instructions provided by the bot. You will only need to do this once across all repos using our CLA. + +This project has adopted the [Microsoft Open Source Code of Conduct][code_of_conduct]. For more information, see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact opencode@microsoft.com with any additional questions or comments. + +[azure_identity]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity +[azure_keyvault_cli]: https://docs.microsoft.com/azure/key-vault/general/quick-create-cli +[azure_keyvault_portal]: https://docs.microsoft.com/azure/key-vault/general/quick-create-portal +[azure_sub]: https://azure.microsoft.com/free/ +[code_of_conduct]: https://opensource.microsoft.com/codeofconduct/ +[default_cred_ref]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/azidentity#defaultazurecredential +[keyvault_docs]: https://docs.microsoft.com/azure/key-vault/ +[managed_identity]: https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/overview +[reference_docs]: https://aka.ms/azsdk/go/keyvault-secrets/docs +[client_docs]: https://aka.ms/azsdk/go/keyvault-secrets/docs#Client +[module_source]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/keyvault/azsecrets +[secrets_samples]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/keyvault/azsecrets/example_test.go + +![Impressions](https://azure-sdk-impressions.azurewebsites.net/api/impressions/azure-sdk-for-go%2Fsdk%2Fkeyvault%2Fazsecrets%2FREADME.png) diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/TROUBLESHOOTING.md b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/TROUBLESHOOTING.md new file mode 100644 index 000000000000..72269c7a0e22 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/TROUBLESHOOTING.md @@ -0,0 +1,4 @@ +# Troubleshoot Azure Key Vault Secrets Client Module Issues + +See our [Azure Key Vault SDK Troubleshooting Guide](https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/keyvault/TROUBLESHOOTING.md) +to troubleshoot issues common to Azure Key Vault client modules. diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/autorest.md b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/autorest.md new file mode 100644 index 000000000000..e6cfb9f8c65b --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/autorest.md @@ -0,0 +1,96 @@ +## Go + +These settings apply only when `--go` is specified on the command line. + +```yaml +clear-output-folder: false +export-clients: true +go: true +input-file: https://github.com/Azure/azure-rest-api-specs/blob/551275acb80e1f8b39036b79dfc35a8f63b601a7/specification/keyvault/data-plane/Microsoft.KeyVault/stable/7.4/secrets.json +license-header: MICROSOFT_MIT_NO_VERSION +module: github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets +openapi-type: "data-plane" +output-folder: ../azsecrets +override-client-name: Client +security: "AADToken" +security-scopes: "https://vault.azure.net/.default" +use: "@autorest/go@4.0.0-preview.46" +version: "^3.0.0" + +directive: + # delete unused model + - remove-model: SecretProperties + + # make vault URL a parameter of the client constructor + - from: swagger-document + where: $["x-ms-parameterized-host"] + transform: $.parameters[0]["x-ms-parameter-location"] = "client" + + # rename parameter models to match their methods + - rename-model: + from: SecretRestoreParameters + to: RestoreSecretParameters + - rename-model: + from: SecretSetParameters + to: SetSecretParameters + - rename-model: + from: SecretUpdateParameters + to: UpdateSecretParameters + + # rename paged operations from Get* to List* + - rename-operation: + from: GetDeletedSecrets + to: ListDeletedSecrets + - rename-operation: + from: GetSecrets + to: ListSecrets + - rename-operation: + from: GetSecretVersions + to: ListSecretVersions + + # delete unused error models + - from: models.go + where: $ + transform: return $.replace(/(?:\/\/.*\s)+type (?:Error|KeyVaultError).+\{(?:\s.+\s)+\}\s/g, ""); + - from: models_serde.go + where: $ + transform: return $.replace(/(?:\/\/.*\s)+func \(\w \*?(?:Error|KeyVaultError)\).*\{\s(?:.+\s)+\}\s/g, ""); + + # delete the Attributes model defined in common.json (it's used only with allOf) + - from: models.go + where: $ + transform: return $.replace(/(?:\/\/.*\s)+type Attributes.+\{(?:\s.+\s)+\}\s/g, ""); + - from: models_serde.go + where: $ + transform: return $.replace(/(?:\/\/.*\s)+func \(a \*?Attributes\).*\{\s(?:.+\s)+\}\s/g, ""); + + # delete the version path param check (version == "" is legal for Key Vault but indescribable by OpenAPI) + - from: client.go + where: $ + transform: return $.replace(/\sif secretVersion == "" \{\s+.+secretVersion cannot be empty"\)\s+\}\s/g, ""); + + # delete client name prefix from method options and response types + - from: + - client.go + - models.go + - response_types.go + where: $ + transform: return $.replace(/Client(\w+)((?:Options|Response))/g, "$1$2"); + + # make secret IDs a convenience type so we can add parsing methods + - from: models.go + where: $ + transform: return $.replace(/(\sID \*)string(\s+.*)/g, "$1ID$2") + + # Maxresults -> MaxResults + - from: + - client.go + - models.go + where: $ + transform: return $.replace(/Maxresults/g, "MaxResults") + + # secretName, secretVersion -> name, version + - from: client.go + - where: $ + - transform: return $.replace(/secretName/g, "name").replace(/secretVersion/g, "version") +``` diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/build.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/build.go new file mode 100644 index 000000000000..d6e487f2c39a --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/build.go @@ -0,0 +1,10 @@ +//go:build go1.18 +// +build go1.18 + +//go:generate autorest ./autorest.md +//go:generate gofmt -w . + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package azsecrets diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/ci.yml b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/ci.yml new file mode 100644 index 000000000000..3e196e0c80e4 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/ci.yml @@ -0,0 +1,29 @@ + +# NOTE: Please refer to https://aka.ms/azsdk/engsys/ci-yaml before editing this file. +trigger: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/keyvault/azsecrets + +pr: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/keyvault/azsecrets + +stages: +- template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml + parameters: + ServiceDirectory: 'keyvault/azsecrets' + RunLiveTests: true diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/client.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/client.go new file mode 100644 index 000000000000..fdfbabf96e9f --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/client.go @@ -0,0 +1,650 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package azsecrets + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strconv" + "strings" +) + +// Client contains the methods for the Client group. +// Don't use this type directly, use a constructor function instead. +type Client struct { + internal *azcore.Client + endpoint string +} + +// BackupSecret - Requests that a backup of the specified secret be downloaded to the client. All versions of the secret will +// be downloaded. This operation requires the secrets/backup permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.4 +// - name - The name of the secret. +// - options - BackupSecretOptions contains the optional parameters for the Client.BackupSecret method. +func (client *Client) BackupSecret(ctx context.Context, name string, options *BackupSecretOptions) (BackupSecretResponse, error) { + req, err := client.backupSecretCreateRequest(ctx, name, options) + if err != nil { + return BackupSecretResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BackupSecretResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return BackupSecretResponse{}, runtime.NewResponseError(resp) + } + return client.backupSecretHandleResponse(resp) +} + +// backupSecretCreateRequest creates the BackupSecret request. +func (client *Client) backupSecretCreateRequest(ctx context.Context, name string, options *BackupSecretOptions) (*policy.Request, error) { + urlPath := "/secrets/{secret-name}/backup" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{secret-name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.4") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// backupSecretHandleResponse handles the BackupSecret response. +func (client *Client) backupSecretHandleResponse(resp *http.Response) (BackupSecretResponse, error) { + result := BackupSecretResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.BackupSecretResult); err != nil { + return BackupSecretResponse{}, err + } + return result, nil +} + +// DeleteSecret - The DELETE operation applies to any secret stored in Azure Key Vault. DELETE cannot be applied to an individual +// version of a secret. This operation requires the secrets/delete permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.4 +// - name - The name of the secret. +// - options - DeleteSecretOptions contains the optional parameters for the Client.DeleteSecret method. +func (client *Client) DeleteSecret(ctx context.Context, name string, options *DeleteSecretOptions) (DeleteSecretResponse, error) { + req, err := client.deleteSecretCreateRequest(ctx, name, options) + if err != nil { + return DeleteSecretResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return DeleteSecretResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return DeleteSecretResponse{}, runtime.NewResponseError(resp) + } + return client.deleteSecretHandleResponse(resp) +} + +// deleteSecretCreateRequest creates the DeleteSecret request. +func (client *Client) deleteSecretCreateRequest(ctx context.Context, name string, options *DeleteSecretOptions) (*policy.Request, error) { + urlPath := "/secrets/{secret-name}" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{secret-name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.4") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// deleteSecretHandleResponse handles the DeleteSecret response. +func (client *Client) deleteSecretHandleResponse(resp *http.Response) (DeleteSecretResponse, error) { + result := DeleteSecretResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.DeletedSecretBundle); err != nil { + return DeleteSecretResponse{}, err + } + return result, nil +} + +// GetDeletedSecret - The Get Deleted Secret operation returns the specified deleted secret along with its attributes. This +// operation requires the secrets/get permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.4 +// - name - The name of the secret. +// - options - GetDeletedSecretOptions contains the optional parameters for the Client.GetDeletedSecret method. +func (client *Client) GetDeletedSecret(ctx context.Context, name string, options *GetDeletedSecretOptions) (GetDeletedSecretResponse, error) { + req, err := client.getDeletedSecretCreateRequest(ctx, name, options) + if err != nil { + return GetDeletedSecretResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return GetDeletedSecretResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return GetDeletedSecretResponse{}, runtime.NewResponseError(resp) + } + return client.getDeletedSecretHandleResponse(resp) +} + +// getDeletedSecretCreateRequest creates the GetDeletedSecret request. +func (client *Client) getDeletedSecretCreateRequest(ctx context.Context, name string, options *GetDeletedSecretOptions) (*policy.Request, error) { + urlPath := "/deletedsecrets/{secret-name}" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{secret-name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.4") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getDeletedSecretHandleResponse handles the GetDeletedSecret response. +func (client *Client) getDeletedSecretHandleResponse(resp *http.Response) (GetDeletedSecretResponse, error) { + result := GetDeletedSecretResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.DeletedSecretBundle); err != nil { + return GetDeletedSecretResponse{}, err + } + return result, nil +} + +// GetSecret - The GET operation is applicable to any secret stored in Azure Key Vault. This operation requires the secrets/get +// permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.4 +// - name - The name of the secret. +// - version - The version of the secret. This URI fragment is optional. If not specified, the latest version of the secret +// is returned. +// - options - GetSecretOptions contains the optional parameters for the Client.GetSecret method. +func (client *Client) GetSecret(ctx context.Context, name string, version string, options *GetSecretOptions) (GetSecretResponse, error) { + req, err := client.getSecretCreateRequest(ctx, name, version, options) + if err != nil { + return GetSecretResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return GetSecretResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return GetSecretResponse{}, runtime.NewResponseError(resp) + } + return client.getSecretHandleResponse(resp) +} + +// getSecretCreateRequest creates the GetSecret request. +func (client *Client) getSecretCreateRequest(ctx context.Context, name string, version string, options *GetSecretOptions) (*policy.Request, error) { + urlPath := "/secrets/{secret-name}/{secret-version}" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{secret-name}", url.PathEscape(name)) + urlPath = strings.ReplaceAll(urlPath, "{secret-version}", url.PathEscape(version)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.4") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getSecretHandleResponse handles the GetSecret response. +func (client *Client) getSecretHandleResponse(resp *http.Response) (GetSecretResponse, error) { + result := GetSecretResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.SecretBundle); err != nil { + return GetSecretResponse{}, err + } + return result, nil +} + +// NewListDeletedSecretsPager - The Get Deleted Secrets operation returns the secrets that have been deleted for a vault enabled +// for soft-delete. This operation requires the secrets/list permission. +// +// Generated from API version 7.4 +// - options - ListDeletedSecretsOptions contains the optional parameters for the Client.NewListDeletedSecretsPager method. +func (client *Client) NewListDeletedSecretsPager(options *ListDeletedSecretsOptions) *runtime.Pager[ListDeletedSecretsResponse] { + return runtime.NewPager(runtime.PagingHandler[ListDeletedSecretsResponse]{ + More: func(page ListDeletedSecretsResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *ListDeletedSecretsResponse) (ListDeletedSecretsResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listDeletedSecretsCreateRequest(ctx, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return ListDeletedSecretsResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ListDeletedSecretsResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ListDeletedSecretsResponse{}, runtime.NewResponseError(resp) + } + return client.listDeletedSecretsHandleResponse(resp) + }, + }) +} + +// listDeletedSecretsCreateRequest creates the ListDeletedSecrets request. +func (client *Client) listDeletedSecretsCreateRequest(ctx context.Context, options *ListDeletedSecretsOptions) (*policy.Request, error) { + urlPath := "/deletedsecrets" + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.MaxResults != nil { + reqQP.Set("maxresults", strconv.FormatInt(int64(*options.MaxResults), 10)) + } + reqQP.Set("api-version", "7.4") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listDeletedSecretsHandleResponse handles the ListDeletedSecrets response. +func (client *Client) listDeletedSecretsHandleResponse(resp *http.Response) (ListDeletedSecretsResponse, error) { + result := ListDeletedSecretsResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.DeletedSecretListResult); err != nil { + return ListDeletedSecretsResponse{}, err + } + return result, nil +} + +// NewListSecretVersionsPager - The full secret identifier and attributes are provided in the response. No values are returned +// for the secrets. This operations requires the secrets/list permission. +// +// Generated from API version 7.4 +// - name - The name of the secret. +// - options - ListSecretVersionsOptions contains the optional parameters for the Client.NewListSecretVersionsPager method. +func (client *Client) NewListSecretVersionsPager(name string, options *ListSecretVersionsOptions) *runtime.Pager[ListSecretVersionsResponse] { + return runtime.NewPager(runtime.PagingHandler[ListSecretVersionsResponse]{ + More: func(page ListSecretVersionsResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *ListSecretVersionsResponse) (ListSecretVersionsResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listSecretVersionsCreateRequest(ctx, name, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return ListSecretVersionsResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ListSecretVersionsResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ListSecretVersionsResponse{}, runtime.NewResponseError(resp) + } + return client.listSecretVersionsHandleResponse(resp) + }, + }) +} + +// listSecretVersionsCreateRequest creates the ListSecretVersions request. +func (client *Client) listSecretVersionsCreateRequest(ctx context.Context, name string, options *ListSecretVersionsOptions) (*policy.Request, error) { + urlPath := "/secrets/{secret-name}/versions" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{secret-name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.MaxResults != nil { + reqQP.Set("maxresults", strconv.FormatInt(int64(*options.MaxResults), 10)) + } + reqQP.Set("api-version", "7.4") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listSecretVersionsHandleResponse handles the ListSecretVersions response. +func (client *Client) listSecretVersionsHandleResponse(resp *http.Response) (ListSecretVersionsResponse, error) { + result := ListSecretVersionsResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.SecretListResult); err != nil { + return ListSecretVersionsResponse{}, err + } + return result, nil +} + +// NewListSecretsPager - The Get Secrets operation is applicable to the entire vault. However, only the base secret identifier +// and its attributes are provided in the response. Individual secret versions are not listed in the +// response. This operation requires the secrets/list permission. +// +// Generated from API version 7.4 +// - options - ListSecretsOptions contains the optional parameters for the Client.NewListSecretsPager method. +func (client *Client) NewListSecretsPager(options *ListSecretsOptions) *runtime.Pager[ListSecretsResponse] { + return runtime.NewPager(runtime.PagingHandler[ListSecretsResponse]{ + More: func(page ListSecretsResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *ListSecretsResponse) (ListSecretsResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listSecretsCreateRequest(ctx, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return ListSecretsResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ListSecretsResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ListSecretsResponse{}, runtime.NewResponseError(resp) + } + return client.listSecretsHandleResponse(resp) + }, + }) +} + +// listSecretsCreateRequest creates the ListSecrets request. +func (client *Client) listSecretsCreateRequest(ctx context.Context, options *ListSecretsOptions) (*policy.Request, error) { + urlPath := "/secrets" + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.MaxResults != nil { + reqQP.Set("maxresults", strconv.FormatInt(int64(*options.MaxResults), 10)) + } + reqQP.Set("api-version", "7.4") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listSecretsHandleResponse handles the ListSecrets response. +func (client *Client) listSecretsHandleResponse(resp *http.Response) (ListSecretsResponse, error) { + result := ListSecretsResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.SecretListResult); err != nil { + return ListSecretsResponse{}, err + } + return result, nil +} + +// PurgeDeletedSecret - The purge deleted secret operation removes the secret permanently, without the possibility of recovery. +// This operation can only be enabled on a soft-delete enabled vault. This operation requires the +// secrets/purge permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.4 +// - name - The name of the secret. +// - options - PurgeDeletedSecretOptions contains the optional parameters for the Client.PurgeDeletedSecret method. +func (client *Client) PurgeDeletedSecret(ctx context.Context, name string, options *PurgeDeletedSecretOptions) (PurgeDeletedSecretResponse, error) { + req, err := client.purgeDeletedSecretCreateRequest(ctx, name, options) + if err != nil { + return PurgeDeletedSecretResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return PurgeDeletedSecretResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusNoContent) { + return PurgeDeletedSecretResponse{}, runtime.NewResponseError(resp) + } + return PurgeDeletedSecretResponse{}, nil +} + +// purgeDeletedSecretCreateRequest creates the PurgeDeletedSecret request. +func (client *Client) purgeDeletedSecretCreateRequest(ctx context.Context, name string, options *PurgeDeletedSecretOptions) (*policy.Request, error) { + urlPath := "/deletedsecrets/{secret-name}" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{secret-name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.4") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// RecoverDeletedSecret - Recovers the deleted secret in the specified vault. This operation can only be performed on a soft-delete +// enabled vault. This operation requires the secrets/recover permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.4 +// - name - The name of the deleted secret. +// - options - RecoverDeletedSecretOptions contains the optional parameters for the Client.RecoverDeletedSecret method. +func (client *Client) RecoverDeletedSecret(ctx context.Context, name string, options *RecoverDeletedSecretOptions) (RecoverDeletedSecretResponse, error) { + req, err := client.recoverDeletedSecretCreateRequest(ctx, name, options) + if err != nil { + return RecoverDeletedSecretResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return RecoverDeletedSecretResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return RecoverDeletedSecretResponse{}, runtime.NewResponseError(resp) + } + return client.recoverDeletedSecretHandleResponse(resp) +} + +// recoverDeletedSecretCreateRequest creates the RecoverDeletedSecret request. +func (client *Client) recoverDeletedSecretCreateRequest(ctx context.Context, name string, options *RecoverDeletedSecretOptions) (*policy.Request, error) { + urlPath := "/deletedsecrets/{secret-name}/recover" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{secret-name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.4") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// recoverDeletedSecretHandleResponse handles the RecoverDeletedSecret response. +func (client *Client) recoverDeletedSecretHandleResponse(resp *http.Response) (RecoverDeletedSecretResponse, error) { + result := RecoverDeletedSecretResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.SecretBundle); err != nil { + return RecoverDeletedSecretResponse{}, err + } + return result, nil +} + +// RestoreSecret - Restores a backed up secret, and all its versions, to a vault. This operation requires the secrets/restore +// permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.4 +// - parameters - The parameters to restore the secret. +// - options - RestoreSecretOptions contains the optional parameters for the Client.RestoreSecret method. +func (client *Client) RestoreSecret(ctx context.Context, parameters RestoreSecretParameters, options *RestoreSecretOptions) (RestoreSecretResponse, error) { + req, err := client.restoreSecretCreateRequest(ctx, parameters, options) + if err != nil { + return RestoreSecretResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return RestoreSecretResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return RestoreSecretResponse{}, runtime.NewResponseError(resp) + } + return client.restoreSecretHandleResponse(resp) +} + +// restoreSecretCreateRequest creates the RestoreSecret request. +func (client *Client) restoreSecretCreateRequest(ctx context.Context, parameters RestoreSecretParameters, options *RestoreSecretOptions) (*policy.Request, error) { + urlPath := "/secrets/restore" + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.4") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, parameters) +} + +// restoreSecretHandleResponse handles the RestoreSecret response. +func (client *Client) restoreSecretHandleResponse(resp *http.Response) (RestoreSecretResponse, error) { + result := RestoreSecretResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.SecretBundle); err != nil { + return RestoreSecretResponse{}, err + } + return result, nil +} + +// SetSecret - The SET operation adds a secret to the Azure Key Vault. If the named secret already exists, Azure Key Vault +// creates a new version of that secret. This operation requires the secrets/set permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.4 +// - name - The name of the secret. The value you provide may be copied globally for the purpose of running the service. +// The value provided should not include personally identifiable or sensitive information. +// - parameters - The parameters for setting the secret. +// - options - SetSecretOptions contains the optional parameters for the Client.SetSecret method. +func (client *Client) SetSecret(ctx context.Context, name string, parameters SetSecretParameters, options *SetSecretOptions) (SetSecretResponse, error) { + req, err := client.setSecretCreateRequest(ctx, name, parameters, options) + if err != nil { + return SetSecretResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return SetSecretResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return SetSecretResponse{}, runtime.NewResponseError(resp) + } + return client.setSecretHandleResponse(resp) +} + +// setSecretCreateRequest creates the SetSecret request. +func (client *Client) setSecretCreateRequest(ctx context.Context, name string, parameters SetSecretParameters, options *SetSecretOptions) (*policy.Request, error) { + urlPath := "/secrets/{secret-name}" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{secret-name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.4") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, parameters) +} + +// setSecretHandleResponse handles the SetSecret response. +func (client *Client) setSecretHandleResponse(resp *http.Response) (SetSecretResponse, error) { + result := SetSecretResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.SecretBundle); err != nil { + return SetSecretResponse{}, err + } + return result, nil +} + +// UpdateSecret - The UPDATE operation changes specified attributes of an existing stored secret. Attributes that are not +// specified in the request are left unchanged. The value of a secret itself cannot be changed. +// This operation requires the secrets/set permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.4 +// - name - The name of the secret. +// - version - The version of the secret. +// - parameters - The parameters for update secret operation. +// - options - UpdateSecretOptions contains the optional parameters for the Client.UpdateSecret method. +func (client *Client) UpdateSecret(ctx context.Context, name string, version string, parameters UpdateSecretParameters, options *UpdateSecretOptions) (UpdateSecretResponse, error) { + req, err := client.updateSecretCreateRequest(ctx, name, version, parameters, options) + if err != nil { + return UpdateSecretResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return UpdateSecretResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return UpdateSecretResponse{}, runtime.NewResponseError(resp) + } + return client.updateSecretHandleResponse(resp) +} + +// updateSecretCreateRequest creates the UpdateSecret request. +func (client *Client) updateSecretCreateRequest(ctx context.Context, name string, version string, parameters UpdateSecretParameters, options *UpdateSecretOptions) (*policy.Request, error) { + urlPath := "/secrets/{secret-name}/{secret-version}" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{secret-name}", url.PathEscape(name)) + urlPath = strings.ReplaceAll(urlPath, "{secret-version}", url.PathEscape(version)) + req, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.4") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, parameters) +} + +// updateSecretHandleResponse handles the UpdateSecret response. +func (client *Client) updateSecretHandleResponse(resp *http.Response) (UpdateSecretResponse, error) { + result := UpdateSecretResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.SecretBundle); err != nil { + return UpdateSecretResponse{}, err + } + return result, nil +} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/constants.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/constants.go new file mode 100644 index 000000000000..d897d67a1241 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/constants.go @@ -0,0 +1,63 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package azsecrets + +// DeletionRecoveryLevel - Reflects the deletion recovery level currently in effect for secrets in the current vault. If it +// contains 'Purgeable', the secret can be permanently deleted by a privileged user; otherwise, only the +// system can purge the secret, at the end of the retention interval. +type DeletionRecoveryLevel string + +const ( + // DeletionRecoveryLevelCustomizedRecoverable - Denotes a vault state in which deletion is recoverable without the possibility + // for immediate and permanent deletion (i.e. purge when 7<= SoftDeleteRetentionInDays < 90).This level guarantees the recoverability + // of the deleted entity during the retention interval and while the subscription is still available. + DeletionRecoveryLevelCustomizedRecoverable DeletionRecoveryLevel = "CustomizedRecoverable" + // DeletionRecoveryLevelCustomizedRecoverableProtectedSubscription - Denotes a vault and subscription state in which deletion + // is recoverable, immediate and permanent deletion (i.e. purge) is not permitted, and in which the subscription itself cannot + // be permanently canceled when 7<= SoftDeleteRetentionInDays < 90. This level guarantees the recoverability of the deleted + // entity during the retention interval, and also reflects the fact that the subscription itself cannot be cancelled. + DeletionRecoveryLevelCustomizedRecoverableProtectedSubscription DeletionRecoveryLevel = "CustomizedRecoverable+ProtectedSubscription" + // DeletionRecoveryLevelCustomizedRecoverablePurgeable - Denotes a vault state in which deletion is recoverable, and which + // also permits immediate and permanent deletion (i.e. purge when 7<= SoftDeleteRetentionInDays < 90). This level guarantees + // the recoverability of the deleted entity during the retention interval, unless a Purge operation is requested, or the subscription + // is cancelled. + DeletionRecoveryLevelCustomizedRecoverablePurgeable DeletionRecoveryLevel = "CustomizedRecoverable+Purgeable" + // DeletionRecoveryLevelPurgeable - Denotes a vault state in which deletion is an irreversible operation, without the possibility + // for recovery. This level corresponds to no protection being available against a Delete operation; the data is irretrievably + // lost upon accepting a Delete operation at the entity level or higher (vault, resource group, subscription etc.) + DeletionRecoveryLevelPurgeable DeletionRecoveryLevel = "Purgeable" + // DeletionRecoveryLevelRecoverable - Denotes a vault state in which deletion is recoverable without the possibility for immediate + // and permanent deletion (i.e. purge). This level guarantees the recoverability of the deleted entity during the retention + // interval(90 days) and while the subscription is still available. System wil permanently delete it after 90 days, if not + // recovered + DeletionRecoveryLevelRecoverable DeletionRecoveryLevel = "Recoverable" + // DeletionRecoveryLevelRecoverableProtectedSubscription - Denotes a vault and subscription state in which deletion is recoverable + // within retention interval (90 days), immediate and permanent deletion (i.e. purge) is not permitted, and in which the subscription + // itself cannot be permanently canceled. System wil permanently delete it after 90 days, if not recovered + DeletionRecoveryLevelRecoverableProtectedSubscription DeletionRecoveryLevel = "Recoverable+ProtectedSubscription" + // DeletionRecoveryLevelRecoverablePurgeable - Denotes a vault state in which deletion is recoverable, and which also permits + // immediate and permanent deletion (i.e. purge). This level guarantees the recoverability of the deleted entity during the + // retention interval (90 days), unless a Purge operation is requested, or the subscription is cancelled. System wil permanently + // delete it after 90 days, if not recovered + DeletionRecoveryLevelRecoverablePurgeable DeletionRecoveryLevel = "Recoverable+Purgeable" +) + +// PossibleDeletionRecoveryLevelValues returns the possible values for the DeletionRecoveryLevel const type. +func PossibleDeletionRecoveryLevelValues() []DeletionRecoveryLevel { + return []DeletionRecoveryLevel{ + DeletionRecoveryLevelCustomizedRecoverable, + DeletionRecoveryLevelCustomizedRecoverableProtectedSubscription, + DeletionRecoveryLevelCustomizedRecoverablePurgeable, + DeletionRecoveryLevelPurgeable, + DeletionRecoveryLevelRecoverable, + DeletionRecoveryLevelRecoverableProtectedSubscription, + DeletionRecoveryLevelRecoverablePurgeable, + } +} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/custom_client.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/custom_client.go new file mode 100644 index 000000000000..04500bd08042 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/custom_client.go @@ -0,0 +1,63 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package azsecrets + +// this file contains handwritten additions to the generated code + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal" +) + +// ClientOptions contains optional settings for Client. +type ClientOptions struct { + azcore.ClientOptions + + // DisableChallengeResourceVerification controls whether the policy requires the + // authentication challenge resource to match the Key Vault or Managed HSM domain. + // See https://aka.ms/azsdk/blog/vault-uri for more information. + DisableChallengeResourceVerification bool +} + +// NewClient creates a client that accesses a Key Vault's secrets. You should validate that +// vaultURL references a valid Key Vault. See https://aka.ms/azsdk/blog/vault-uri for details. +func NewClient(vaultURL string, credential azcore.TokenCredential, options *ClientOptions) (*Client, error) { + if options == nil { + options = &ClientOptions{} + } + authPolicy := internal.NewKeyVaultChallengePolicy( + credential, + &internal.KeyVaultChallengePolicyOptions{ + DisableChallengeResourceVerification: options.DisableChallengeResourceVerification, + }, + ) + azcoreClient, err := azcore.NewClient("azsecrets.Client", version, runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}}, &options.ClientOptions) + if err != nil { + return nil, err + } + return &Client{endpoint: vaultURL, internal: azcoreClient}, nil +} + +// ID is a secret's unique ID, containing its name and version. +type ID string + +// Name of the secret. +func (i *ID) Name() string { + _, name, _ := internal.ParseID((*string)(i)) + return *name +} + +// Version of the secret. This returns an empty string when the ID contains no version. +func (i *ID) Version() string { + _, _, version := internal.ParseID((*string)(i)) + if version == nil { + return "" + } + return *version +} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/models.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/models.go new file mode 100644 index 000000000000..87be9ab52cc6 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/models.go @@ -0,0 +1,268 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package azsecrets + +import "time" + +// BackupSecretResult - The backup secret result, containing the backup blob. +type BackupSecretResult struct { + // READ-ONLY; The backup blob containing the backed up secret. + Value []byte `json:"value,omitempty" azure:"ro"` +} + +// BackupSecretOptions contains the optional parameters for the Client.BackupSecret method. +type BackupSecretOptions struct { + // placeholder for future optional parameters +} + +// DeleteSecretOptions contains the optional parameters for the Client.DeleteSecret method. +type DeleteSecretOptions struct { + // placeholder for future optional parameters +} + +// GetDeletedSecretOptions contains the optional parameters for the Client.GetDeletedSecret method. +type GetDeletedSecretOptions struct { + // placeholder for future optional parameters +} + +// GetSecretOptions contains the optional parameters for the Client.GetSecret method. +type GetSecretOptions struct { + // placeholder for future optional parameters +} + +// ListDeletedSecretsOptions contains the optional parameters for the Client.NewListDeletedSecretsPager method. +type ListDeletedSecretsOptions struct { + // Maximum number of results to return in a page. If not specified the service will return up to 25 results. + MaxResults *int32 +} + +// ListSecretVersionsOptions contains the optional parameters for the Client.NewListSecretVersionsPager method. +type ListSecretVersionsOptions struct { + // Maximum number of results to return in a page. If not specified, the service will return up to 25 results. + MaxResults *int32 +} + +// ListSecretsOptions contains the optional parameters for the Client.NewListSecretsPager method. +type ListSecretsOptions struct { + // Maximum number of results to return in a page. If not specified, the service will return up to 25 results. + MaxResults *int32 +} + +// PurgeDeletedSecretOptions contains the optional parameters for the Client.PurgeDeletedSecret method. +type PurgeDeletedSecretOptions struct { + // placeholder for future optional parameters +} + +// RecoverDeletedSecretOptions contains the optional parameters for the Client.RecoverDeletedSecret method. +type RecoverDeletedSecretOptions struct { + // placeholder for future optional parameters +} + +// RestoreSecretOptions contains the optional parameters for the Client.RestoreSecret method. +type RestoreSecretOptions struct { + // placeholder for future optional parameters +} + +// SetSecretOptions contains the optional parameters for the Client.SetSecret method. +type SetSecretOptions struct { + // placeholder for future optional parameters +} + +// UpdateSecretOptions contains the optional parameters for the Client.UpdateSecret method. +type UpdateSecretOptions struct { + // placeholder for future optional parameters +} + +// DeletedSecretBundle - A Deleted Secret consisting of its previous id, attributes and its tags, as well as information on +// when it will be purged. +type DeletedSecretBundle struct { + // The secret management attributes. + Attributes *SecretAttributes `json:"attributes,omitempty"` + + // The content type of the secret. + ContentType *string `json:"contentType,omitempty"` + + // The secret id. + ID *ID `json:"id,omitempty"` + + // The url of the recovery object, used to identify and recover the deleted secret. + RecoveryID *string `json:"recoveryId,omitempty"` + + // Application specific metadata in the form of key-value pairs. + Tags map[string]*string `json:"tags,omitempty"` + + // The secret value. + Value *string `json:"value,omitempty"` + + // READ-ONLY; The time when the secret was deleted, in UTC + DeletedDate *time.Time `json:"deletedDate,omitempty" azure:"ro"` + + // READ-ONLY; If this is a secret backing a KV certificate, then this field specifies the corresponding key backing the KV + // certificate. + Kid *string `json:"kid,omitempty" azure:"ro"` + + // READ-ONLY; True if the secret's lifetime is managed by key vault. If this is a secret backing a certificate, then managed + // will be true. + Managed *bool `json:"managed,omitempty" azure:"ro"` + + // READ-ONLY; The time when the secret is scheduled to be purged, in UTC + ScheduledPurgeDate *time.Time `json:"scheduledPurgeDate,omitempty" azure:"ro"` +} + +// DeletedSecretItem - The deleted secret item containing metadata about the deleted secret. +type DeletedSecretItem struct { + // The secret management attributes. + Attributes *SecretAttributes `json:"attributes,omitempty"` + + // Type of the secret value such as a password. + ContentType *string `json:"contentType,omitempty"` + + // Secret identifier. + ID *ID `json:"id,omitempty"` + + // The url of the recovery object, used to identify and recover the deleted secret. + RecoveryID *string `json:"recoveryId,omitempty"` + + // Application specific metadata in the form of key-value pairs. + Tags map[string]*string `json:"tags,omitempty"` + + // READ-ONLY; The time when the secret was deleted, in UTC + DeletedDate *time.Time `json:"deletedDate,omitempty" azure:"ro"` + + // READ-ONLY; True if the secret's lifetime is managed by key vault. If this is a key backing a certificate, then managed + // will be true. + Managed *bool `json:"managed,omitempty" azure:"ro"` + + // READ-ONLY; The time when the secret is scheduled to be purged, in UTC + ScheduledPurgeDate *time.Time `json:"scheduledPurgeDate,omitempty" azure:"ro"` +} + +// DeletedSecretListResult - The deleted secret list result +type DeletedSecretListResult struct { + // READ-ONLY; The URL to get the next set of deleted secrets. + NextLink *string `json:"nextLink,omitempty" azure:"ro"` + + // READ-ONLY; A response message containing a list of the deleted secrets in the vault along with a link to the next page + // of deleted secrets + Value []*DeletedSecretItem `json:"value,omitempty" azure:"ro"` +} + +// RestoreSecretParameters - The secret restore parameters. +type RestoreSecretParameters struct { + // REQUIRED; The backup blob associated with a secret bundle. + SecretBundleBackup []byte `json:"value,omitempty"` +} + +// SecretAttributes - The secret management attributes. +type SecretAttributes struct { + // Determines whether the object is enabled. + Enabled *bool `json:"enabled,omitempty"` + + // Expiry date in UTC. + Expires *time.Time `json:"exp,omitempty"` + + // Not before date in UTC. + NotBefore *time.Time `json:"nbf,omitempty"` + + // READ-ONLY; Creation time in UTC. + Created *time.Time `json:"created,omitempty" azure:"ro"` + + // READ-ONLY; softDelete data retention days. Value should be >=7 and <=90 when softDelete enabled, otherwise 0. + RecoverableDays *int32 `json:"recoverableDays,omitempty" azure:"ro"` + + // READ-ONLY; Reflects the deletion recovery level currently in effect for secrets in the current vault. If it contains 'Purgeable', + // the secret can be permanently deleted by a privileged user; otherwise, only the + // system can purge the secret, at the end of the retention interval. + RecoveryLevel *DeletionRecoveryLevel `json:"recoveryLevel,omitempty" azure:"ro"` + + // READ-ONLY; Last updated time in UTC. + Updated *time.Time `json:"updated,omitempty" azure:"ro"` +} + +// SecretBundle - A secret consisting of a value, id and its attributes. +type SecretBundle struct { + // The secret management attributes. + Attributes *SecretAttributes `json:"attributes,omitempty"` + + // The content type of the secret. + ContentType *string `json:"contentType,omitempty"` + + // The secret id. + ID *ID `json:"id,omitempty"` + + // Application specific metadata in the form of key-value pairs. + Tags map[string]*string `json:"tags,omitempty"` + + // The secret value. + Value *string `json:"value,omitempty"` + + // READ-ONLY; If this is a secret backing a KV certificate, then this field specifies the corresponding key backing the KV + // certificate. + Kid *string `json:"kid,omitempty" azure:"ro"` + + // READ-ONLY; True if the secret's lifetime is managed by key vault. If this is a secret backing a certificate, then managed + // will be true. + Managed *bool `json:"managed,omitempty" azure:"ro"` +} + +// SecretItem - The secret item containing secret metadata. +type SecretItem struct { + // The secret management attributes. + Attributes *SecretAttributes `json:"attributes,omitempty"` + + // Type of the secret value such as a password. + ContentType *string `json:"contentType,omitempty"` + + // Secret identifier. + ID *ID `json:"id,omitempty"` + + // Application specific metadata in the form of key-value pairs. + Tags map[string]*string `json:"tags,omitempty"` + + // READ-ONLY; True if the secret's lifetime is managed by key vault. If this is a key backing a certificate, then managed + // will be true. + Managed *bool `json:"managed,omitempty" azure:"ro"` +} + +// SecretListResult - The secret list result. +type SecretListResult struct { + // READ-ONLY; The URL to get the next set of secrets. + NextLink *string `json:"nextLink,omitempty" azure:"ro"` + + // READ-ONLY; A response message containing a list of secrets in the key vault along with a link to the next page of secrets. + Value []*SecretItem `json:"value,omitempty" azure:"ro"` +} + +// SetSecretParameters - The secret set parameters. +type SetSecretParameters struct { + // REQUIRED; The value of the secret. + Value *string `json:"value,omitempty"` + + // Type of the secret value such as a password. + ContentType *string `json:"contentType,omitempty"` + + // The secret management attributes. + SecretAttributes *SecretAttributes `json:"attributes,omitempty"` + + // Application specific metadata in the form of key-value pairs. + Tags map[string]*string `json:"tags,omitempty"` +} + +// UpdateSecretParameters - The secret update parameters. +type UpdateSecretParameters struct { + // Type of the secret value such as a password. + ContentType *string `json:"contentType,omitempty"` + + // The secret management attributes. + SecretAttributes *SecretAttributes `json:"attributes,omitempty"` + + // Application specific metadata in the form of key-value pairs. + Tags map[string]*string `json:"tags,omitempty"` +} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/models_serde.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/models_serde.go new file mode 100644 index 000000000000..72d55412716b --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/models_serde.go @@ -0,0 +1,501 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package azsecrets + +import ( + "encoding/json" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "reflect" +) + +// MarshalJSON implements the json.Marshaller interface for type BackupSecretResult. +func (b BackupSecretResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populateByteArray(objectMap, "value", b.Value, runtime.Base64URLFormat) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type BackupSecretResult. +func (b *BackupSecretResult) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", b, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "value": + err = runtime.DecodeByteArray(string(val), &b.Value, runtime.Base64URLFormat) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", b, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type DeletedSecretBundle. +func (d DeletedSecretBundle) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "attributes", d.Attributes) + populate(objectMap, "contentType", d.ContentType) + populateTimeUnix(objectMap, "deletedDate", d.DeletedDate) + populate(objectMap, "id", d.ID) + populate(objectMap, "kid", d.Kid) + populate(objectMap, "managed", d.Managed) + populate(objectMap, "recoveryId", d.RecoveryID) + populateTimeUnix(objectMap, "scheduledPurgeDate", d.ScheduledPurgeDate) + populate(objectMap, "tags", d.Tags) + populate(objectMap, "value", d.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type DeletedSecretBundle. +func (d *DeletedSecretBundle) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", d, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "attributes": + err = unpopulate(val, "Attributes", &d.Attributes) + delete(rawMsg, key) + case "contentType": + err = unpopulate(val, "ContentType", &d.ContentType) + delete(rawMsg, key) + case "deletedDate": + err = unpopulateTimeUnix(val, "DeletedDate", &d.DeletedDate) + delete(rawMsg, key) + case "id": + err = unpopulate(val, "ID", &d.ID) + delete(rawMsg, key) + case "kid": + err = unpopulate(val, "Kid", &d.Kid) + delete(rawMsg, key) + case "managed": + err = unpopulate(val, "Managed", &d.Managed) + delete(rawMsg, key) + case "recoveryId": + err = unpopulate(val, "RecoveryID", &d.RecoveryID) + delete(rawMsg, key) + case "scheduledPurgeDate": + err = unpopulateTimeUnix(val, "ScheduledPurgeDate", &d.ScheduledPurgeDate) + delete(rawMsg, key) + case "tags": + err = unpopulate(val, "Tags", &d.Tags) + delete(rawMsg, key) + case "value": + err = unpopulate(val, "Value", &d.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", d, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type DeletedSecretItem. +func (d DeletedSecretItem) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "attributes", d.Attributes) + populate(objectMap, "contentType", d.ContentType) + populateTimeUnix(objectMap, "deletedDate", d.DeletedDate) + populate(objectMap, "id", d.ID) + populate(objectMap, "managed", d.Managed) + populate(objectMap, "recoveryId", d.RecoveryID) + populateTimeUnix(objectMap, "scheduledPurgeDate", d.ScheduledPurgeDate) + populate(objectMap, "tags", d.Tags) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type DeletedSecretItem. +func (d *DeletedSecretItem) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", d, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "attributes": + err = unpopulate(val, "Attributes", &d.Attributes) + delete(rawMsg, key) + case "contentType": + err = unpopulate(val, "ContentType", &d.ContentType) + delete(rawMsg, key) + case "deletedDate": + err = unpopulateTimeUnix(val, "DeletedDate", &d.DeletedDate) + delete(rawMsg, key) + case "id": + err = unpopulate(val, "ID", &d.ID) + delete(rawMsg, key) + case "managed": + err = unpopulate(val, "Managed", &d.Managed) + delete(rawMsg, key) + case "recoveryId": + err = unpopulate(val, "RecoveryID", &d.RecoveryID) + delete(rawMsg, key) + case "scheduledPurgeDate": + err = unpopulateTimeUnix(val, "ScheduledPurgeDate", &d.ScheduledPurgeDate) + delete(rawMsg, key) + case "tags": + err = unpopulate(val, "Tags", &d.Tags) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", d, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type DeletedSecretListResult. +func (d DeletedSecretListResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "nextLink", d.NextLink) + populate(objectMap, "value", d.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type DeletedSecretListResult. +func (d *DeletedSecretListResult) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", d, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "nextLink": + err = unpopulate(val, "NextLink", &d.NextLink) + delete(rawMsg, key) + case "value": + err = unpopulate(val, "Value", &d.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", d, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type RestoreSecretParameters. +func (r RestoreSecretParameters) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populateByteArray(objectMap, "value", r.SecretBundleBackup, runtime.Base64URLFormat) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type RestoreSecretParameters. +func (r *RestoreSecretParameters) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", r, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "value": + err = runtime.DecodeByteArray(string(val), &r.SecretBundleBackup, runtime.Base64URLFormat) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", r, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type SecretAttributes. +func (s SecretAttributes) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populateTimeUnix(objectMap, "created", s.Created) + populate(objectMap, "enabled", s.Enabled) + populateTimeUnix(objectMap, "exp", s.Expires) + populateTimeUnix(objectMap, "nbf", s.NotBefore) + populate(objectMap, "recoverableDays", s.RecoverableDays) + populate(objectMap, "recoveryLevel", s.RecoveryLevel) + populateTimeUnix(objectMap, "updated", s.Updated) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type SecretAttributes. +func (s *SecretAttributes) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "created": + err = unpopulateTimeUnix(val, "Created", &s.Created) + delete(rawMsg, key) + case "enabled": + err = unpopulate(val, "Enabled", &s.Enabled) + delete(rawMsg, key) + case "exp": + err = unpopulateTimeUnix(val, "Expires", &s.Expires) + delete(rawMsg, key) + case "nbf": + err = unpopulateTimeUnix(val, "NotBefore", &s.NotBefore) + delete(rawMsg, key) + case "recoverableDays": + err = unpopulate(val, "RecoverableDays", &s.RecoverableDays) + delete(rawMsg, key) + case "recoveryLevel": + err = unpopulate(val, "RecoveryLevel", &s.RecoveryLevel) + delete(rawMsg, key) + case "updated": + err = unpopulateTimeUnix(val, "Updated", &s.Updated) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type SecretBundle. +func (s SecretBundle) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "attributes", s.Attributes) + populate(objectMap, "contentType", s.ContentType) + populate(objectMap, "id", s.ID) + populate(objectMap, "kid", s.Kid) + populate(objectMap, "managed", s.Managed) + populate(objectMap, "tags", s.Tags) + populate(objectMap, "value", s.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type SecretBundle. +func (s *SecretBundle) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "attributes": + err = unpopulate(val, "Attributes", &s.Attributes) + delete(rawMsg, key) + case "contentType": + err = unpopulate(val, "ContentType", &s.ContentType) + delete(rawMsg, key) + case "id": + err = unpopulate(val, "ID", &s.ID) + delete(rawMsg, key) + case "kid": + err = unpopulate(val, "Kid", &s.Kid) + delete(rawMsg, key) + case "managed": + err = unpopulate(val, "Managed", &s.Managed) + delete(rawMsg, key) + case "tags": + err = unpopulate(val, "Tags", &s.Tags) + delete(rawMsg, key) + case "value": + err = unpopulate(val, "Value", &s.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type SecretItem. +func (s SecretItem) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "attributes", s.Attributes) + populate(objectMap, "contentType", s.ContentType) + populate(objectMap, "id", s.ID) + populate(objectMap, "managed", s.Managed) + populate(objectMap, "tags", s.Tags) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type SecretItem. +func (s *SecretItem) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "attributes": + err = unpopulate(val, "Attributes", &s.Attributes) + delete(rawMsg, key) + case "contentType": + err = unpopulate(val, "ContentType", &s.ContentType) + delete(rawMsg, key) + case "id": + err = unpopulate(val, "ID", &s.ID) + delete(rawMsg, key) + case "managed": + err = unpopulate(val, "Managed", &s.Managed) + delete(rawMsg, key) + case "tags": + err = unpopulate(val, "Tags", &s.Tags) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type SecretListResult. +func (s SecretListResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "nextLink", s.NextLink) + populate(objectMap, "value", s.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type SecretListResult. +func (s *SecretListResult) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "nextLink": + err = unpopulate(val, "NextLink", &s.NextLink) + delete(rawMsg, key) + case "value": + err = unpopulate(val, "Value", &s.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type SetSecretParameters. +func (s SetSecretParameters) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "contentType", s.ContentType) + populate(objectMap, "attributes", s.SecretAttributes) + populate(objectMap, "tags", s.Tags) + populate(objectMap, "value", s.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type SetSecretParameters. +func (s *SetSecretParameters) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "contentType": + err = unpopulate(val, "ContentType", &s.ContentType) + delete(rawMsg, key) + case "attributes": + err = unpopulate(val, "SecretAttributes", &s.SecretAttributes) + delete(rawMsg, key) + case "tags": + err = unpopulate(val, "Tags", &s.Tags) + delete(rawMsg, key) + case "value": + err = unpopulate(val, "Value", &s.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type UpdateSecretParameters. +func (u UpdateSecretParameters) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "contentType", u.ContentType) + populate(objectMap, "attributes", u.SecretAttributes) + populate(objectMap, "tags", u.Tags) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type UpdateSecretParameters. +func (u *UpdateSecretParameters) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", u, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "contentType": + err = unpopulate(val, "ContentType", &u.ContentType) + delete(rawMsg, key) + case "attributes": + err = unpopulate(val, "SecretAttributes", &u.SecretAttributes) + delete(rawMsg, key) + case "tags": + err = unpopulate(val, "Tags", &u.Tags) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", u, err) + } + } + return nil +} + +func populate(m map[string]any, k string, v any) { + if v == nil { + return + } else if azcore.IsNullValue(v) { + m[k] = nil + } else if !reflect.ValueOf(v).IsNil() { + m[k] = v + } +} + +func populateByteArray(m map[string]any, k string, b []byte, f runtime.Base64Encoding) { + if azcore.IsNullValue(b) { + m[k] = nil + } else if len(b) == 0 { + return + } else { + m[k] = runtime.EncodeByteArray(b, f) + } +} + +func unpopulate(data json.RawMessage, fn string, v any) error { + if data == nil { + return nil + } + if err := json.Unmarshal(data, v); err != nil { + return fmt.Errorf("struct field %s: %v", fn, err) + } + return nil +} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/response_types.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/response_types.go new file mode 100644 index 000000000000..517acaabb98b --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/response_types.go @@ -0,0 +1,70 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package azsecrets + +// BackupSecretResponse contains the response from method Client.BackupSecret. +type BackupSecretResponse struct { + BackupSecretResult +} + +// DeleteSecretResponse contains the response from method Client.DeleteSecret. +type DeleteSecretResponse struct { + DeletedSecretBundle +} + +// GetDeletedSecretResponse contains the response from method Client.GetDeletedSecret. +type GetDeletedSecretResponse struct { + DeletedSecretBundle +} + +// GetSecretResponse contains the response from method Client.GetSecret. +type GetSecretResponse struct { + SecretBundle +} + +// ListDeletedSecretsResponse contains the response from method Client.NewListDeletedSecretsPager. +type ListDeletedSecretsResponse struct { + DeletedSecretListResult +} + +// ListSecretVersionsResponse contains the response from method Client.NewListSecretVersionsPager. +type ListSecretVersionsResponse struct { + SecretListResult +} + +// ListSecretsResponse contains the response from method Client.NewListSecretsPager. +type ListSecretsResponse struct { + SecretListResult +} + +// PurgeDeletedSecretResponse contains the response from method Client.PurgeDeletedSecret. +type PurgeDeletedSecretResponse struct { + // placeholder for future response values +} + +// RecoverDeletedSecretResponse contains the response from method Client.RecoverDeletedSecret. +type RecoverDeletedSecretResponse struct { + SecretBundle +} + +// RestoreSecretResponse contains the response from method Client.RestoreSecret. +type RestoreSecretResponse struct { + SecretBundle +} + +// SetSecretResponse contains the response from method Client.SetSecret. +type SetSecretResponse struct { + SecretBundle +} + +// UpdateSecretResponse contains the response from method Client.UpdateSecret. +type UpdateSecretResponse struct { + SecretBundle +} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/test-resources.json b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/test-resources.json new file mode 100644 index 000000000000..20f726f33227 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/test-resources.json @@ -0,0 +1,331 @@ +{ + "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "parameters": { + "baseName": { + "type": "string", + "defaultValue": "[resourceGroup().name]", + "metadata": { + "description": "The base resource name." + } + }, + "tenantId": { + "type": "string", + "defaultValue": "72f988bf-86f1-41af-91ab-2d7cd011db47", + "metadata": { + "description": "The tenant ID to which the application and resources belong." + } + }, + "testApplicationOid": { + "type": "string", + "metadata": { + "description": "The client OID to grant access to test resources." + } + }, + "provisionerApplicationOid": { + "type": "string", + "metadata": { + "description": "The provisioner OID to grant access to test resources." + } + }, + "location": { + "type": "string", + "defaultValue": "[resourceGroup().location]", + "metadata": { + "description": "The location of the resource. By default, this is the same as the resource group." + } + }, + "hsmLocation": { + "type": "string", + "defaultValue": "southcentralus", + "allowedValues": [ + "australiacentral", + "canadacentral", + "centralus", + "eastasia", + "eastus2", + "koreacentral", + "northeurope", + "southafricanorth", + "southcentralus", + "southeastasia", + "switzerlandnorth", + "uksouth", + "westeurope", + "westus" + ], + "metadata": { + "description": "The location of the Managed HSM. By default, this is 'southcentralus'." + } + }, + "enableHsm": { + "type": "bool", + "defaultValue": false, + "metadata": { + "description": "Whether to enable deployment of Managed HSM. The default is false." + } + }, + "keyVaultSku": { + "type": "string", + "defaultValue": "premium", + "metadata": { + "description": "Key Vault SKU to deploy. The default is 'premium'" + } + }, + "attestationImage": { + "type": "string", + "defaultValue": "keyvault-mock-attestation:latest", + "metadata": { + "description": "The container image name and tag to use for the attestation mock service." + } + } + }, + "variables": { + "attestationFarm": "[concat(parameters('baseName'), 'farm')]", + "attestationSite": "[concat(parameters('baseName'), 'site')]", + "attestationUri": "[concat('DOCKER|azsdkengsys.azurecr.io/', parameters('attestationImage'))]", + "kvApiVersion": "2019-09-01", + "kvName": "[parameters('baseName')]", + "hsmApiVersion": "2021-04-01-preview", + "hsmName": "[concat(parameters('baseName'), 'hsm')]", + "mgmtApiVersion": "2019-04-01", + "blobContainerName": "backup", + "primaryAccountName": "[concat(parameters('baseName'), 'prim')]", + "encryption": { + "services": { + "blob": { + "enabled": true + } + }, + "keySource": "Microsoft.Storage" + }, + "networkAcls": { + "bypass": "AzureServices", + "virtualNetworkRules": [], + "ipRules": [], + "defaultAction": "Allow" + } + }, + "resources": [ + { + "type": "Microsoft.KeyVault/vaults", + "apiVersion": "[variables('kvApiVersion')]", + "name": "[variables('kvName')]", + "location": "[parameters('location')]", + "properties": { + "sku": { + "family": "A", + "name": "[parameters('keyVaultSku')]" + }, + "tenantId": "[parameters('tenantId')]", + "accessPolicies": [ + { + "tenantId": "[parameters('tenantId')]", + "objectId": "[parameters('testApplicationOid')]", + "permissions": { + "keys": [ + "backup", + "create", + "decrypt", + "delete", + "encrypt", + "get", + "import", + "list", + "purge", + "recover", + "release", + "restore", + "rotate", + "sign", + "unwrapKey", + "update", + "verify", + "wrapKey" + ], + "secrets": [ + "backup", + "delete", + "get", + "list", + "purge", + "recover", + "restore", + "set" + ], + "certificates": [ + "backup", + "create", + "delete", + "deleteissuers", + "get", + "getissuers", + "import", + "list", + "listissuers", + "managecontacts", + "manageissuers", + "purge", + "recover", + "restore", + "setissuers", + "update" + ] + } + } + ], + "enabledForDeployment": false, + "enabledForDiskEncryption": false, + "enabledForTemplateDeployment": false, + "enableSoftDelete": true, + "softDeleteRetentionInDays": 7 + } + }, + { + "type": "Microsoft.KeyVault/managedHSMs", + "apiVersion": "[variables('hsmApiVersion')]", + "name": "[variables('hsmName')]", + "condition": "[parameters('enableHsm')]", + "location": "[parameters('hsmLocation')]", + "sku": { + "family": "B", + "name": "Standard_B1" + }, + "properties": { + "tenantId": "[parameters('tenantId')]", + "initialAdminObjectIds": "[union(array(parameters('testApplicationOid')), array(parameters('provisionerApplicationOid')))]", + "enablePurgeProtection": false, + "enableSoftDelete": true, + "softDeleteRetentionInDays": 7, + "publicNetworkAccess": "Enabled", + "networkAcls": "[variables('networkAcls')]" + } + }, + { + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('primaryAccountName')]", + "location": "[parameters('location')]", + "sku": { + "name": "Standard_RAGRS", + "tier": "Standard" + }, + "kind": "StorageV2", + "properties": { + "networkAcls": "[variables('networkAcls')]", + "supportsHttpsTrafficOnly": true, + "encryption": "[variables('encryption')]", + "accessTier": "Hot" + } + }, + { + "type": "Microsoft.Storage/storageAccounts/blobServices", + "apiVersion": "2019-06-01", + "name": "[concat(variables('primaryAccountName'), '/default')]", + "dependsOn": [ + "[resourceId('Microsoft.Storage/storageAccounts', variables('primaryAccountName'))]" + ], + "sku": { + "name": "Standard_RAGRS", + "tier": "Standard" + }, + "properties": { + "cors": { + "corsRules": [] + }, + "deleteRetentionPolicy": { + "enabled": false + } + } + }, + { + "type": "Microsoft.Storage/storageAccounts/blobServices/containers", + "apiVersion": "2019-06-01", + "name": "[concat(variables('primaryAccountName'), '/default/', variables('blobContainerName'))]", + "dependsOn": [ + "[resourceId('Microsoft.Storage/storageAccounts/blobServices', variables('primaryAccountName'), 'default')]", + "[resourceId('Microsoft.Storage/storageAccounts', variables('primaryAccountName'))]" + ], + "properties": { + "publicAccess": "None" + } + }, + { + + "type": "Microsoft.Web/serverfarms", + "apiVersion": "2020-12-01", + "name": "[variables('attestationFarm')]", + "condition": "[parameters('enableHsm')]", + "location": "[parameters('location')]", + "kind": "linux", + "sku": { + "name": "B1" + }, + "properties": { + "reserved": true + } + }, + { + + "type": "Microsoft.Web/sites", + "apiVersion": "2020-12-01", + "name": "[variables('attestationSite')]", + "condition": "[parameters('enableHsm')]", + "dependsOn": [ + "[resourceId('Microsoft.Web/serverfarms', variables('attestationFarm'))]" + ], + "location": "[parameters('location')]", + "properties": { + "httpsOnly": true, + "serverFarmId": "[resourceId('Microsoft.Web/serverfarms', variables('attestationFarm'))]", + "siteConfig": { + "name": "[variables('attestationSite')]", + "alwaysOn": true, + "linuxFxVersion": "[variables('attestationUri')]", + "appSettings": [ + { + "name": "WEBSITES_ENABLE_APP_SERVICE_STORAGE", + "value": "false" + } + ] + } + } + } + ], + "outputs": { + "AZURE_KEYVAULT_URL": { + "type": "string", + "value": "[reference(variables('kvName')).vaultUri]" + }, + "AZURE_MANAGEDHSM_URL": { + "type": "string", + "condition": "[parameters('enableHsm')]", + "value": "[reference(variables('hsmName')).hsmUri]" + }, + "KEYVAULT_SKU": { + "type": "string", + "value": "[reference(parameters('baseName')).sku.name]" + }, + "CLIENT_OBJECTID": { + "type": "string", + "value": "[parameters('testApplicationOid')]" + }, + "BLOB_STORAGE_ACCOUNT_NAME": { + "type": "string", + "value": "[variables('primaryAccountName')]" + }, + "BLOB_PRIMARY_STORAGE_ACCOUNT_KEY": { + "type": "string", + "value": "[listKeys(variables('primaryAccountName'), variables('mgmtApiVersion')).keys[0].value]" + }, + "BLOB_CONTAINER_NAME" : { + "type": "string", + "value": "[variables('blobContainerName')]" + }, + "AZURE_KEYVAULT_ATTESTATION_URL": { + "type": "string", + "condition": "[parameters('enableHsm')]", + "value": "[format('https://{0}/', reference(variables('attestationSite')).defaultHostName)]" + } + } +} \ No newline at end of file diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/time_unix.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/time_unix.go new file mode 100644 index 000000000000..ed8ce0f9dd23 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/time_unix.go @@ -0,0 +1,62 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package azsecrets + +import ( + "encoding/json" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "reflect" + "strings" + "time" +) + +type timeUnix time.Time + +func (t timeUnix) MarshalJSON() ([]byte, error) { + return json.Marshal(time.Time(t).Unix()) +} + +func (t *timeUnix) UnmarshalJSON(data []byte) error { + var seconds int64 + if err := json.Unmarshal(data, &seconds); err != nil { + return err + } + *t = timeUnix(time.Unix(seconds, 0)) + return nil +} + +func (t timeUnix) String() string { + return fmt.Sprintf("%d", time.Time(t).Unix()) +} + +func populateTimeUnix(m map[string]any, k string, t *time.Time) { + if t == nil { + return + } else if azcore.IsNullValue(t) { + m[k] = nil + return + } else if reflect.ValueOf(t).IsNil() { + return + } + m[k] = (*timeUnix)(t) +} + +func unpopulateTimeUnix(data json.RawMessage, fn string, t **time.Time) error { + if data == nil || strings.EqualFold(string(data), "null") { + return nil + } + var aux timeUnix + if err := json.Unmarshal(data, &aux); err != nil { + return fmt.Errorf("struct field %s: %v", fn, err) + } + *t = (*time.Time)(&aux) + return nil +} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/version.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/version.go new file mode 100644 index 000000000000..885158bcbe94 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/version.go @@ -0,0 +1,12 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azsecrets + +const ( + moduleName = "azsecrets" + version = "v0.12.0" +) diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal/CHANGELOG.md b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal/CHANGELOG.md new file mode 100644 index 000000000000..95d77dc9521c --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal/CHANGELOG.md @@ -0,0 +1,54 @@ +# Release History + +## 0.7.1 (2022-11-14) + +### Bugs Fixed +* `KeyVaultChallengePolicy` uses incorrect authentication scope when challenge verification is disabled + +## 0.7.0 (2022-09-20) + +### Breaking Changes +* Added `*KeyVaultChallengePolicyOptions` parameter to `NewKeyVaultChallengePolicy` + +## 0.6.0 (2022-09-12) + +### Breaking Changes +* Verify the challenge resource matches the vault domain. See https://aka.ms/azsdk/blog/vault-uri for more information. +* `ParseID()` no longer appends a trailing slash to vault URLs + +## 0.5.0 (2022-05-12) + +### Breaking Changes +* Removed `ExpiringResource` and its dependencies in favor of shared implementation from `internal/temporal`. + +### Other Changes +* Updated to latest versions of `azcore` and `internal`. + +## 0.4.0 (2022-04-22) + +### Breaking Changes +* Updated `ExpiringResource` and its dependent types to use generics. + +### Other Changes +* Remove reference to `TokenRequestOptions.TenantID` as it's been removed and wasn't working anyways. + +## 0.3.0 (2022-04-04) + +### Features Added +* Adds the `ParseKeyvaultID` function to parse an ID into the Key Vault URL, item name, and item version + +### Breaking Changes +* Updates to azcore v0.23.0 + +## 0.2.1 (2022-01-31) + +### Bugs Fixed +* Avoid retries on terminal failures (#16932) + +## 0.2.0 (2022-01-12) + +### Bugs Fixed +* Fixes a bug with Managed HSMs that prevented correctly authorizing requests. + +## 0.1.0 (2021-11-09) +* This is the initial release of the `internal` library for KeyVault diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal/LICENSE.txt b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal/LICENSE.txt new file mode 100644 index 000000000000..d1ca00f20a89 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal/LICENSE.txt @@ -0,0 +1,21 @@ + MIT License + + Copyright (c) Microsoft Corporation. All rights reserved. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE \ No newline at end of file diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal/README.md b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal/README.md new file mode 100644 index 000000000000..bd4826705d5f --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal/README.md @@ -0,0 +1,23 @@ +# Key Vault Internal Module for Go + +[![PkgGoDev](https://pkg.go.dev/badge/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal)](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal) + +This module contains shared code for all the Key Vault SDKs, mainly the challenge authentication policy. + +## Contributing +This project welcomes contributions and suggestions. Most contributions require +you to agree to a Contributor License Agreement (CLA) declaring that you have +the right to, and actually do, grant us the rights to use your contribution. +For details, visit [https://cla.microsoft.com](https://cla.microsoft.com). + +When you submit a pull request, a CLA-bot will automatically determine whether +you need to provide a CLA and decorate the PR appropriately (e.g., label, +comment). Simply follow the instructions provided by the bot. You will only +need to do this once across all repos using our CLA. + +This project has adopted the +[Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). +For more information, see the +[Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) +or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any +additional questions or comments. diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal/challenge_policy.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal/challenge_policy.go new file mode 100644 index 000000000000..4cc1e429a645 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal/challenge_policy.go @@ -0,0 +1,255 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package internal + +import ( + "bytes" + "errors" + "fmt" + "net/http" + "net/url" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo" + "github.com/Azure/azure-sdk-for-go/sdk/internal/temporal" +) + +const ( + headerAuthorization = "Authorization" + challengeMatchError = `challenge resource "%s" doesn't match the requested domain. Set DisableChallengeResourceVerification to true in your client options to disable. See https://aka.ms/azsdk/blog/vault-uri for more information` + bearerHeader = "Bearer " +) + +type KeyVaultChallengePolicyOptions struct { + // DisableChallengeResourceVerification controls whether the policy requires the + // authentication challenge resource to match the Key Vault or Managed HSM domain + DisableChallengeResourceVerification bool +} + +type KeyVaultChallengePolicy struct { + // mainResource is the resource to be retrieved using the tenant specified in the credential + mainResource *temporal.Resource[azcore.AccessToken, acquiringResourceState] + cred azcore.TokenCredential + scope *string + tenantID *string + verifyChallengeResource bool +} + +func NewKeyVaultChallengePolicy(cred azcore.TokenCredential, opts *KeyVaultChallengePolicyOptions) *KeyVaultChallengePolicy { + if opts == nil { + opts = &KeyVaultChallengePolicyOptions{} + } + return &KeyVaultChallengePolicy{ + cred: cred, + mainResource: temporal.NewResource(acquire), + verifyChallengeResource: !opts.DisableChallengeResourceVerification, + } +} + +func (k *KeyVaultChallengePolicy) Do(req *policy.Request) (*http.Response, error) { + as := acquiringResourceState{ + p: k, + req: req, + } + + if k.scope == nil || k.tenantID == nil { + // First request, get both to get the token + challengeReq, err := k.getChallengeRequest(*req) + if err != nil { + return nil, err + } + + resp, err := challengeReq.Next() + if err != nil { + return nil, err + } + + if resp.StatusCode > 399 && resp.StatusCode != http.StatusUnauthorized { + // the request failed for some other reason, don't try any further + return resp, nil + } + err = k.findScopeAndTenant(resp, req.Raw()) + if err != nil { + return nil, err + } + } + + tk, err := k.mainResource.Get(as) + if err != nil { + return nil, err + } + + req.Raw().Header.Set( + headerAuthorization, + fmt.Sprintf("%s%s", bearerHeader, tk.Token), + ) + + // send a copy of the request + cloneReq := req.Clone(req.Raw().Context()) + resp, cloneReqErr := cloneReq.Next() + if cloneReqErr != nil { + return nil, cloneReqErr + } + + // If it fails and has a 401, try it with a new token + if resp.StatusCode == 401 { + // Force a new token + k.mainResource.Expire() + + // Find the scope and tenant again in case they have changed + err := k.findScopeAndTenant(resp, req.Raw()) + if err != nil { + // Error parsing challenge, doomed to fail. Return + return resp, cloneReqErr + } + + tk, err := k.mainResource.Get(as) + if err != nil { + return resp, err + } + + req.Raw().Header.Set( + headerAuthorization, + bearerHeader+tk.Token, + ) + + // send the original request now + return req.Next() + } + + return resp, err +} + +// parses Tenant ID from auth challenge +// https://login.microsoftonline.com/00000000-0000-0000-0000-000000000000 +func parseTenant(url string) *string { + if url == "" { + return to.Ptr("") + } + parts := strings.Split(url, "/") + tenant := parts[3] + tenant = strings.ReplaceAll(tenant, ",", "") + return &tenant +} + +type challengePolicyError struct { + err error +} + +func (c *challengePolicyError) Error() string { + return c.err.Error() +} + +func (*challengePolicyError) NonRetriable() { + // marker method +} + +func (c *challengePolicyError) Unwrap() error { + return c.err +} + +var _ errorinfo.NonRetriable = (*challengePolicyError)(nil) + +// sets the k.scope and k.tenantID from the WWW-Authenticate header +func (k *KeyVaultChallengePolicy) findScopeAndTenant(resp *http.Response, req *http.Request) error { + authHeader := resp.Header.Get("WWW-Authenticate") + if authHeader == "" { + return &challengePolicyError{err: errors.New("response has no WWW-Authenticate header for challenge authentication")} + } + + // Strip down to auth and resource + // Format is "Bearer authorization=\"\" resource=\"\"" OR + // "Bearer authorization=\"\" scope=\"\" resource=\"\"" + authHeader = strings.ReplaceAll(authHeader, "Bearer ", "") + + parts := strings.Split(authHeader, " ") + + vals := map[string]string{} + for _, part := range parts { + subParts := strings.Split(part, "=") + if len(subParts) == 2 { + stripped := strings.ReplaceAll(subParts[1], "\"", "") + stripped = strings.TrimSuffix(stripped, ",") + vals[subParts[0]] = stripped + } + } + + k.tenantID = parseTenant(vals["authorization"]) + scope := "" + if v, ok := vals["scope"]; ok { + scope = v + } else if v, ok := vals["resource"]; ok { + scope = v + } + if scope == "" { + return &challengePolicyError{err: errors.New("could not find a valid resource in the WWW-Authenticate header")} + } + if k.verifyChallengeResource { + // the challenge resource's host must match the requested vault's host + parsed, err := url.Parse(scope) + if err != nil { + return &challengePolicyError{err: fmt.Errorf(`invalid challenge resource "%s": %v`, scope, err)} + } + if !strings.HasSuffix(req.URL.Host, "."+parsed.Host) { + return &challengePolicyError{err: fmt.Errorf(challengeMatchError, scope)} + } + } + if !strings.HasSuffix(scope, "/.default") { + scope += "/.default" + } + k.scope = &scope + return nil +} + +func (k KeyVaultChallengePolicy) getChallengeRequest(orig policy.Request) (*policy.Request, error) { + req, err := runtime.NewRequest(orig.Raw().Context(), orig.Raw().Method, orig.Raw().URL.String()) + if err != nil { + return nil, &challengePolicyError{err: err} + } + + req.Raw().Header = orig.Raw().Header + req.Raw().Header.Set("Content-Length", "0") + req.Raw().ContentLength = 0 + + copied := orig.Clone(orig.Raw().Context()) + copied.Raw().Body = req.Body() + copied.Raw().ContentLength = 0 + copied.Raw().Header.Set("Content-Length", "0") + err = copied.SetBody(streaming.NopCloser(bytes.NewReader([]byte{})), "application/json") + if err != nil { + return nil, &challengePolicyError{err: err} + } + copied.Raw().Header.Del("Content-Type") + + return copied, err +} + +type acquiringResourceState struct { + req *policy.Request + p *KeyVaultChallengePolicy +} + +// acquire acquires or updates the resource; only one +// thread/goroutine at a time ever calls this function +func acquire(state acquiringResourceState) (newResource azcore.AccessToken, newExpiration time.Time, err error) { + tk, err := state.p.cred.GetToken( + state.req.Raw().Context(), + policy.TokenRequestOptions{ + Scopes: []string{*state.p.scope}, + }, + ) + if err != nil { + return azcore.AccessToken{}, time.Time{}, err + } + return tk, tk.ExpiresOn, nil +} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal/ci.keyvault.yml b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal/ci.keyvault.yml new file mode 100644 index 000000000000..d72c650135e4 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal/ci.keyvault.yml @@ -0,0 +1,28 @@ +# NOTE: Please refer to https://aka.ms/azsdk/engsys/ci-yaml before editing this file. +trigger: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/keyvault/internal + +pr: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/keyvault/internal + +stages: +- template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml + parameters: + ServiceDirectory: 'keyvault/internal' + RunLiveTests: false diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal/constants.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal/constants.go new file mode 100644 index 000000000000..cd94eb0d834c --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal/constants.go @@ -0,0 +1,11 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package internal + +const ( + version = "v0.7.1" //nolint +) diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal/doc.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal/doc.go new file mode 100644 index 000000000000..d8f93492f51d --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal/doc.go @@ -0,0 +1,7 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package internal diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal/parse.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal/parse.go new file mode 100644 index 000000000000..8511832d27c0 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal/parse.go @@ -0,0 +1,37 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +package internal + +import ( + "fmt" + "net/url" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" +) + +// ParseID parses "https://myvaultname.vault.azure.net/keys/key1053998307/b86c2e6ad9054f4abf69cc185b99aa60" +// into "https://myvaultname.managedhsm.azure.net/", "key1053998307", and "b86c2e6ad9054f4abf69cc185b99aa60" +func ParseID(id *string) (*string, *string, *string) { + if id == nil { + return nil, nil, nil + } + parsed, err := url.Parse(*id) + if err != nil { + return nil, nil, nil + } + + url := fmt.Sprintf("%s://%s", parsed.Scheme, parsed.Host) + split := strings.Split(strings.TrimPrefix(parsed.Path, "/"), "/") + if len(split) < 3 { + if len(split) == 2 { + return &url, to.Ptr(split[1]), nil + } + return &url, nil, nil + } + + return &url, to.Ptr(split[1]), to.Ptr(split[2]) +} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/CHANGELOG.md b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/CHANGELOG.md index b19244d4651e..9aeb8a37e965 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/CHANGELOG.md +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/CHANGELOG.md @@ -1,5 +1,19 @@ # Release History +## 5.6.0 (2024-03-22) +### Features Added + +- New field `VirtualMachineID` in struct `GalleryArtifactVersionFullSource` + + +## 5.5.0 (2024-01-26) +### Features Added + +- New value `DiskSecurityTypesConfidentialVMNonPersistedTPM` added to enum type `DiskSecurityTypes` +- New enum type `ProvisionedBandwidthCopyOption` with values `ProvisionedBandwidthCopyOptionEnhanced`, `ProvisionedBandwidthCopyOptionNone` +- New field `ProvisionedBandwidthCopySpeed` in struct `CreationData` + + ## 5.4.0 (2023-12-22) ### Features Added diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/README.md b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/README.md index 7c8d1e3e1389..c4bf8ab344eb 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/README.md +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/README.md @@ -57,7 +57,7 @@ clientFactory, err := armcompute.NewClientFactory(, cred, &opti A client groups a set of related APIs, providing access to its functionality. Create one or more clients to access the APIs you require using client factory. ```go -client := clientFactory.NewLogAnalyticsClient() +client := clientFactory.NewAvailabilitySetsClient() ``` ## Fakes diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/assets.json b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/assets.json index 1feae4c5caa2..9a0140774f2d 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/assets.json +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "go", "TagPrefix": "go/resourcemanager/compute/armcompute", - "Tag": "go/resourcemanager/compute/armcompute_323718962d" + "Tag": "go/resourcemanager/compute/armcompute_79d4095593" } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/autorest.md b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/autorest.md index 9d88c8a6d6d4..71b871d52495 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/autorest.md +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/autorest.md @@ -5,9 +5,9 @@ ``` yaml azure-arm: true require: -- https://github.com/Azure/azure-rest-api-specs/blob/60679ee3db06e93eb73faa0587fed93ed843d6dc/specification/compute/resource-manager/readme.md -- https://github.com/Azure/azure-rest-api-specs/blob/60679ee3db06e93eb73faa0587fed93ed843d6dc/specification/compute/resource-manager/readme.go.md +- https://github.com/Azure/azure-rest-api-specs/blob/e4009d2f8d3bf0271757e522c7d1c1997e193d44/specification/compute/resource-manager/readme.md +- https://github.com/Azure/azure-rest-api-specs/blob/e4009d2f8d3bf0271757e522c7d1c1997e193d44/specification/compute/resource-manager/readme.go.md license-header: MICROSOFT_MIT_NO_VERSION -module-version: 5.4.0 -tag: package-2023-09-01 +module-version: 5.6.0 +tag: package-2023-07-03 ``` diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/availabilitysets_client.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/availabilitysets_client.go index 3b568f54ae99..55572022a97f 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/availabilitysets_client.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/availabilitysets_client.go @@ -398,10 +398,10 @@ func (client *AvailabilitySetsClient) listBySubscriptionCreateRequest(ctx contex return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") if options != nil && options.Expand != nil { reqQP.Set("$expand", *options.Expand) } + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/capacityreservationgroups_client.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/capacityreservationgroups_client.go index 96e32e9668f9..cc26cd0d4420 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/capacityreservationgroups_client.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/capacityreservationgroups_client.go @@ -286,10 +286,10 @@ func (client *CapacityReservationGroupsClient) listByResourceGroupCreateRequest( return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") if options != nil && options.Expand != nil { reqQP.Set("$expand", string(*options.Expand)) } + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -345,10 +345,10 @@ func (client *CapacityReservationGroupsClient) listBySubscriptionCreateRequest(c return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") if options != nil && options.Expand != nil { reqQP.Set("$expand", string(*options.Expand)) } + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/ci.yml b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/ci.yml index 084ed1b49e83..c93d36fd4ccc 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/ci.yml +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/ci.yml @@ -21,8 +21,8 @@ pr: include: - sdk/resourcemanager/compute/armcompute/ -stages: -- template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml +extends: + template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml parameters: IncludeRelease: true ServiceDirectory: 'resourcemanager/compute/armcompute' diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/client_factory.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/client_factory.go index b8600b614a39..dab27c87cb64 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/client_factory.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/client_factory.go @@ -17,8 +17,7 @@ import ( // Don't use this type directly, use NewClientFactory instead. type ClientFactory struct { subscriptionID string - credential azcore.TokenCredential - options *arm.ClientOptions + internal *arm.Client } // NewClientFactory creates a new instance of ClientFactory with the specified values. @@ -28,306 +27,403 @@ type ClientFactory struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewClientFactory(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*ClientFactory, error) { - _, err := arm.NewClient(moduleName, moduleVersion, credential, options) + internal, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } return &ClientFactory{ - subscriptionID: subscriptionID, credential: credential, - options: options.Clone(), + subscriptionID: subscriptionID, + internal: internal, }, nil } // NewAvailabilitySetsClient creates a new instance of AvailabilitySetsClient. func (c *ClientFactory) NewAvailabilitySetsClient() *AvailabilitySetsClient { - subClient, _ := NewAvailabilitySetsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &AvailabilitySetsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewCapacityReservationGroupsClient creates a new instance of CapacityReservationGroupsClient. func (c *ClientFactory) NewCapacityReservationGroupsClient() *CapacityReservationGroupsClient { - subClient, _ := NewCapacityReservationGroupsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &CapacityReservationGroupsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewCapacityReservationsClient creates a new instance of CapacityReservationsClient. func (c *ClientFactory) NewCapacityReservationsClient() *CapacityReservationsClient { - subClient, _ := NewCapacityReservationsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &CapacityReservationsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewCloudServiceOperatingSystemsClient creates a new instance of CloudServiceOperatingSystemsClient. func (c *ClientFactory) NewCloudServiceOperatingSystemsClient() *CloudServiceOperatingSystemsClient { - subClient, _ := NewCloudServiceOperatingSystemsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &CloudServiceOperatingSystemsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewCloudServiceRoleInstancesClient creates a new instance of CloudServiceRoleInstancesClient. func (c *ClientFactory) NewCloudServiceRoleInstancesClient() *CloudServiceRoleInstancesClient { - subClient, _ := NewCloudServiceRoleInstancesClient(c.subscriptionID, c.credential, c.options) - return subClient + return &CloudServiceRoleInstancesClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewCloudServiceRolesClient creates a new instance of CloudServiceRolesClient. func (c *ClientFactory) NewCloudServiceRolesClient() *CloudServiceRolesClient { - subClient, _ := NewCloudServiceRolesClient(c.subscriptionID, c.credential, c.options) - return subClient + return &CloudServiceRolesClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewCloudServicesClient creates a new instance of CloudServicesClient. func (c *ClientFactory) NewCloudServicesClient() *CloudServicesClient { - subClient, _ := NewCloudServicesClient(c.subscriptionID, c.credential, c.options) - return subClient + return &CloudServicesClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewCloudServicesUpdateDomainClient creates a new instance of CloudServicesUpdateDomainClient. func (c *ClientFactory) NewCloudServicesUpdateDomainClient() *CloudServicesUpdateDomainClient { - subClient, _ := NewCloudServicesUpdateDomainClient(c.subscriptionID, c.credential, c.options) - return subClient + return &CloudServicesUpdateDomainClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewCommunityGalleriesClient creates a new instance of CommunityGalleriesClient. func (c *ClientFactory) NewCommunityGalleriesClient() *CommunityGalleriesClient { - subClient, _ := NewCommunityGalleriesClient(c.subscriptionID, c.credential, c.options) - return subClient + return &CommunityGalleriesClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewCommunityGalleryImageVersionsClient creates a new instance of CommunityGalleryImageVersionsClient. func (c *ClientFactory) NewCommunityGalleryImageVersionsClient() *CommunityGalleryImageVersionsClient { - subClient, _ := NewCommunityGalleryImageVersionsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &CommunityGalleryImageVersionsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewCommunityGalleryImagesClient creates a new instance of CommunityGalleryImagesClient. func (c *ClientFactory) NewCommunityGalleryImagesClient() *CommunityGalleryImagesClient { - subClient, _ := NewCommunityGalleryImagesClient(c.subscriptionID, c.credential, c.options) - return subClient + return &CommunityGalleryImagesClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewDedicatedHostGroupsClient creates a new instance of DedicatedHostGroupsClient. func (c *ClientFactory) NewDedicatedHostGroupsClient() *DedicatedHostGroupsClient { - subClient, _ := NewDedicatedHostGroupsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &DedicatedHostGroupsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewDedicatedHostsClient creates a new instance of DedicatedHostsClient. func (c *ClientFactory) NewDedicatedHostsClient() *DedicatedHostsClient { - subClient, _ := NewDedicatedHostsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &DedicatedHostsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewDiskAccessesClient creates a new instance of DiskAccessesClient. func (c *ClientFactory) NewDiskAccessesClient() *DiskAccessesClient { - subClient, _ := NewDiskAccessesClient(c.subscriptionID, c.credential, c.options) - return subClient + return &DiskAccessesClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewDiskEncryptionSetsClient creates a new instance of DiskEncryptionSetsClient. func (c *ClientFactory) NewDiskEncryptionSetsClient() *DiskEncryptionSetsClient { - subClient, _ := NewDiskEncryptionSetsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &DiskEncryptionSetsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewDiskRestorePointClient creates a new instance of DiskRestorePointClient. func (c *ClientFactory) NewDiskRestorePointClient() *DiskRestorePointClient { - subClient, _ := NewDiskRestorePointClient(c.subscriptionID, c.credential, c.options) - return subClient + return &DiskRestorePointClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewDisksClient creates a new instance of DisksClient. func (c *ClientFactory) NewDisksClient() *DisksClient { - subClient, _ := NewDisksClient(c.subscriptionID, c.credential, c.options) - return subClient + return &DisksClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewGalleriesClient creates a new instance of GalleriesClient. func (c *ClientFactory) NewGalleriesClient() *GalleriesClient { - subClient, _ := NewGalleriesClient(c.subscriptionID, c.credential, c.options) - return subClient + return &GalleriesClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewGalleryApplicationVersionsClient creates a new instance of GalleryApplicationVersionsClient. func (c *ClientFactory) NewGalleryApplicationVersionsClient() *GalleryApplicationVersionsClient { - subClient, _ := NewGalleryApplicationVersionsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &GalleryApplicationVersionsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewGalleryApplicationsClient creates a new instance of GalleryApplicationsClient. func (c *ClientFactory) NewGalleryApplicationsClient() *GalleryApplicationsClient { - subClient, _ := NewGalleryApplicationsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &GalleryApplicationsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewGalleryImageVersionsClient creates a new instance of GalleryImageVersionsClient. func (c *ClientFactory) NewGalleryImageVersionsClient() *GalleryImageVersionsClient { - subClient, _ := NewGalleryImageVersionsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &GalleryImageVersionsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewGalleryImagesClient creates a new instance of GalleryImagesClient. func (c *ClientFactory) NewGalleryImagesClient() *GalleryImagesClient { - subClient, _ := NewGalleryImagesClient(c.subscriptionID, c.credential, c.options) - return subClient + return &GalleryImagesClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewGallerySharingProfileClient creates a new instance of GallerySharingProfileClient. func (c *ClientFactory) NewGallerySharingProfileClient() *GallerySharingProfileClient { - subClient, _ := NewGallerySharingProfileClient(c.subscriptionID, c.credential, c.options) - return subClient + return &GallerySharingProfileClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewImagesClient creates a new instance of ImagesClient. func (c *ClientFactory) NewImagesClient() *ImagesClient { - subClient, _ := NewImagesClient(c.subscriptionID, c.credential, c.options) - return subClient + return &ImagesClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewLogAnalyticsClient creates a new instance of LogAnalyticsClient. func (c *ClientFactory) NewLogAnalyticsClient() *LogAnalyticsClient { - subClient, _ := NewLogAnalyticsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &LogAnalyticsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewOperationsClient creates a new instance of OperationsClient. func (c *ClientFactory) NewOperationsClient() *OperationsClient { - subClient, _ := NewOperationsClient(c.credential, c.options) - return subClient + return &OperationsClient{ + internal: c.internal, + } } // NewProximityPlacementGroupsClient creates a new instance of ProximityPlacementGroupsClient. func (c *ClientFactory) NewProximityPlacementGroupsClient() *ProximityPlacementGroupsClient { - subClient, _ := NewProximityPlacementGroupsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &ProximityPlacementGroupsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewResourceSKUsClient creates a new instance of ResourceSKUsClient. func (c *ClientFactory) NewResourceSKUsClient() *ResourceSKUsClient { - subClient, _ := NewResourceSKUsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &ResourceSKUsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewRestorePointCollectionsClient creates a new instance of RestorePointCollectionsClient. func (c *ClientFactory) NewRestorePointCollectionsClient() *RestorePointCollectionsClient { - subClient, _ := NewRestorePointCollectionsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &RestorePointCollectionsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewRestorePointsClient creates a new instance of RestorePointsClient. func (c *ClientFactory) NewRestorePointsClient() *RestorePointsClient { - subClient, _ := NewRestorePointsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &RestorePointsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewSSHPublicKeysClient creates a new instance of SSHPublicKeysClient. func (c *ClientFactory) NewSSHPublicKeysClient() *SSHPublicKeysClient { - subClient, _ := NewSSHPublicKeysClient(c.subscriptionID, c.credential, c.options) - return subClient + return &SSHPublicKeysClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewSharedGalleriesClient creates a new instance of SharedGalleriesClient. func (c *ClientFactory) NewSharedGalleriesClient() *SharedGalleriesClient { - subClient, _ := NewSharedGalleriesClient(c.subscriptionID, c.credential, c.options) - return subClient + return &SharedGalleriesClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewSharedGalleryImageVersionsClient creates a new instance of SharedGalleryImageVersionsClient. func (c *ClientFactory) NewSharedGalleryImageVersionsClient() *SharedGalleryImageVersionsClient { - subClient, _ := NewSharedGalleryImageVersionsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &SharedGalleryImageVersionsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewSharedGalleryImagesClient creates a new instance of SharedGalleryImagesClient. func (c *ClientFactory) NewSharedGalleryImagesClient() *SharedGalleryImagesClient { - subClient, _ := NewSharedGalleryImagesClient(c.subscriptionID, c.credential, c.options) - return subClient + return &SharedGalleryImagesClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewSnapshotsClient creates a new instance of SnapshotsClient. func (c *ClientFactory) NewSnapshotsClient() *SnapshotsClient { - subClient, _ := NewSnapshotsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &SnapshotsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewUsageClient creates a new instance of UsageClient. func (c *ClientFactory) NewUsageClient() *UsageClient { - subClient, _ := NewUsageClient(c.subscriptionID, c.credential, c.options) - return subClient + return &UsageClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewVirtualMachineExtensionImagesClient creates a new instance of VirtualMachineExtensionImagesClient. func (c *ClientFactory) NewVirtualMachineExtensionImagesClient() *VirtualMachineExtensionImagesClient { - subClient, _ := NewVirtualMachineExtensionImagesClient(c.subscriptionID, c.credential, c.options) - return subClient + return &VirtualMachineExtensionImagesClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewVirtualMachineExtensionsClient creates a new instance of VirtualMachineExtensionsClient. func (c *ClientFactory) NewVirtualMachineExtensionsClient() *VirtualMachineExtensionsClient { - subClient, _ := NewVirtualMachineExtensionsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &VirtualMachineExtensionsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewVirtualMachineImagesClient creates a new instance of VirtualMachineImagesClient. func (c *ClientFactory) NewVirtualMachineImagesClient() *VirtualMachineImagesClient { - subClient, _ := NewVirtualMachineImagesClient(c.subscriptionID, c.credential, c.options) - return subClient + return &VirtualMachineImagesClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewVirtualMachineImagesEdgeZoneClient creates a new instance of VirtualMachineImagesEdgeZoneClient. func (c *ClientFactory) NewVirtualMachineImagesEdgeZoneClient() *VirtualMachineImagesEdgeZoneClient { - subClient, _ := NewVirtualMachineImagesEdgeZoneClient(c.subscriptionID, c.credential, c.options) - return subClient + return &VirtualMachineImagesEdgeZoneClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewVirtualMachineRunCommandsClient creates a new instance of VirtualMachineRunCommandsClient. func (c *ClientFactory) NewVirtualMachineRunCommandsClient() *VirtualMachineRunCommandsClient { - subClient, _ := NewVirtualMachineRunCommandsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &VirtualMachineRunCommandsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewVirtualMachineScaleSetExtensionsClient creates a new instance of VirtualMachineScaleSetExtensionsClient. func (c *ClientFactory) NewVirtualMachineScaleSetExtensionsClient() *VirtualMachineScaleSetExtensionsClient { - subClient, _ := NewVirtualMachineScaleSetExtensionsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &VirtualMachineScaleSetExtensionsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewVirtualMachineScaleSetRollingUpgradesClient creates a new instance of VirtualMachineScaleSetRollingUpgradesClient. func (c *ClientFactory) NewVirtualMachineScaleSetRollingUpgradesClient() *VirtualMachineScaleSetRollingUpgradesClient { - subClient, _ := NewVirtualMachineScaleSetRollingUpgradesClient(c.subscriptionID, c.credential, c.options) - return subClient + return &VirtualMachineScaleSetRollingUpgradesClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewVirtualMachineScaleSetVMExtensionsClient creates a new instance of VirtualMachineScaleSetVMExtensionsClient. func (c *ClientFactory) NewVirtualMachineScaleSetVMExtensionsClient() *VirtualMachineScaleSetVMExtensionsClient { - subClient, _ := NewVirtualMachineScaleSetVMExtensionsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &VirtualMachineScaleSetVMExtensionsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewVirtualMachineScaleSetVMRunCommandsClient creates a new instance of VirtualMachineScaleSetVMRunCommandsClient. func (c *ClientFactory) NewVirtualMachineScaleSetVMRunCommandsClient() *VirtualMachineScaleSetVMRunCommandsClient { - subClient, _ := NewVirtualMachineScaleSetVMRunCommandsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &VirtualMachineScaleSetVMRunCommandsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewVirtualMachineScaleSetVMsClient creates a new instance of VirtualMachineScaleSetVMsClient. func (c *ClientFactory) NewVirtualMachineScaleSetVMsClient() *VirtualMachineScaleSetVMsClient { - subClient, _ := NewVirtualMachineScaleSetVMsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &VirtualMachineScaleSetVMsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewVirtualMachineScaleSetsClient creates a new instance of VirtualMachineScaleSetsClient. func (c *ClientFactory) NewVirtualMachineScaleSetsClient() *VirtualMachineScaleSetsClient { - subClient, _ := NewVirtualMachineScaleSetsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &VirtualMachineScaleSetsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewVirtualMachineSizesClient creates a new instance of VirtualMachineSizesClient. func (c *ClientFactory) NewVirtualMachineSizesClient() *VirtualMachineSizesClient { - subClient, _ := NewVirtualMachineSizesClient(c.subscriptionID, c.credential, c.options) - return subClient + return &VirtualMachineSizesClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewVirtualMachinesClient creates a new instance of VirtualMachinesClient. func (c *ClientFactory) NewVirtualMachinesClient() *VirtualMachinesClient { - subClient, _ := NewVirtualMachinesClient(c.subscriptionID, c.credential, c.options) - return subClient + return &VirtualMachinesClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/cloudserviceroleinstances_client.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/cloudserviceroleinstances_client.go index b845301668a6..1f7cdb7c382e 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/cloudserviceroleinstances_client.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/cloudserviceroleinstances_client.go @@ -180,10 +180,10 @@ func (client *CloudServiceRoleInstancesClient) getCreateRequest(ctx context.Cont return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-09-04") if options != nil && options.Expand != nil { reqQP.Set("$expand", string(*options.Expand)) } + reqQP.Set("api-version", "2022-09-04") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -380,10 +380,10 @@ func (client *CloudServiceRoleInstancesClient) listCreateRequest(ctx context.Con return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-09-04") if options != nil && options.Expand != nil { reqQP.Set("$expand", string(*options.Expand)) } + reqQP.Set("api-version", "2022-09-04") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/communitygalleries_client.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/communitygalleries_client.go index 2bfc798f355d..431a26cdef24 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/communitygalleries_client.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/communitygalleries_client.go @@ -47,7 +47,7 @@ func NewCommunityGalleriesClient(subscriptionID string, credential azcore.TokenC // Get - Get a community gallery by gallery public name. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - location - Resource location. // - publicGalleryName - The public name of the community gallery. // - options - CommunityGalleriesClientGetOptions contains the optional parameters for the CommunityGalleriesClient.Get method. @@ -93,7 +93,7 @@ func (client *CommunityGalleriesClient) getCreateRequest(ctx context.Context, lo return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/communitygalleryimages_client.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/communitygalleryimages_client.go index 25b118f072ae..4ecc005caafe 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/communitygalleryimages_client.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/communitygalleryimages_client.go @@ -47,7 +47,7 @@ func NewCommunityGalleryImagesClient(subscriptionID string, credential azcore.To // Get - Get a community gallery image. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - location - Resource location. // - publicGalleryName - The public name of the community gallery. // - galleryImageName - The name of the community gallery image definition. @@ -99,7 +99,7 @@ func (client *CommunityGalleryImagesClient) getCreateRequest(ctx context.Context return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -116,7 +116,7 @@ func (client *CommunityGalleryImagesClient) getHandleResponse(resp *http.Respons // NewListPager - List community gallery images inside a gallery. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - location - Resource location. // - publicGalleryName - The public name of the community gallery. // - options - CommunityGalleryImagesClientListOptions contains the optional parameters for the CommunityGalleryImagesClient.NewListPager @@ -164,7 +164,7 @@ func (client *CommunityGalleryImagesClient) listCreateRequest(ctx context.Contex return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/communitygalleryimageversions_client.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/communitygalleryimageversions_client.go index 5e382b36d1de..e98cfa506c7d 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/communitygalleryimageversions_client.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/communitygalleryimageversions_client.go @@ -47,7 +47,7 @@ func NewCommunityGalleryImageVersionsClient(subscriptionID string, credential az // Get - Get a community gallery image version. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - location - Resource location. // - publicGalleryName - The public name of the community gallery. // - galleryImageName - The name of the community gallery image definition. @@ -106,7 +106,7 @@ func (client *CommunityGalleryImageVersionsClient) getCreateRequest(ctx context. return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -123,7 +123,7 @@ func (client *CommunityGalleryImageVersionsClient) getHandleResponse(resp *http. // NewListPager - List community gallery image versions inside an image. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - location - Resource location. // - publicGalleryName - The public name of the community gallery. // - galleryImageName - The name of the community gallery image definition. @@ -176,7 +176,7 @@ func (client *CommunityGalleryImageVersionsClient) listCreateRequest(ctx context return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/constants.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/constants.go index d635d4f51f74..73f3548e459c 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/constants.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/constants.go @@ -10,7 +10,7 @@ package armcompute const ( moduleName = "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute" - moduleVersion = "v5.4.0" + moduleVersion = "v5.6.0" ) type AccessLevel string @@ -543,6 +543,9 @@ const ( // DiskSecurityTypesConfidentialVMDiskEncryptedWithPlatformKey - Indicates Confidential VM disk with both OS disk and VM guest // state encrypted with a platform managed key DiskSecurityTypesConfidentialVMDiskEncryptedWithPlatformKey DiskSecurityTypes = "ConfidentialVM_DiskEncryptedWithPlatformKey" + // DiskSecurityTypesConfidentialVMNonPersistedTPM - Indicates Confidential VM disk with a ephemeral vTPM. vTPM state is not + // persisted across VM reboots. + DiskSecurityTypesConfidentialVMNonPersistedTPM DiskSecurityTypes = "ConfidentialVM_NonPersistedTPM" // DiskSecurityTypesConfidentialVMVmguestStateOnlyEncryptedWithPlatformKey - Indicates Confidential VM disk with only VM guest // state encrypted DiskSecurityTypesConfidentialVMVmguestStateOnlyEncryptedWithPlatformKey DiskSecurityTypes = "ConfidentialVM_VMGuestStateOnlyEncryptedWithPlatformKey" @@ -556,6 +559,7 @@ func PossibleDiskSecurityTypesValues() []DiskSecurityTypes { return []DiskSecurityTypes{ DiskSecurityTypesConfidentialVMDiskEncryptedWithCustomerKey, DiskSecurityTypesConfidentialVMDiskEncryptedWithPlatformKey, + DiskSecurityTypesConfidentialVMNonPersistedTPM, DiskSecurityTypesConfidentialVMVmguestStateOnlyEncryptedWithPlatformKey, DiskSecurityTypesTrustedLaunch, } @@ -1500,6 +1504,23 @@ func PossibleProtocolTypesValues() []ProtocolTypes { } } +// ProvisionedBandwidthCopyOption - If this field is set on a snapshot and createOption is CopyStart, the snapshot will be +// copied at a quicker speed. +type ProvisionedBandwidthCopyOption string + +const ( + ProvisionedBandwidthCopyOptionEnhanced ProvisionedBandwidthCopyOption = "Enhanced" + ProvisionedBandwidthCopyOptionNone ProvisionedBandwidthCopyOption = "None" +) + +// PossibleProvisionedBandwidthCopyOptionValues returns the possible values for the ProvisionedBandwidthCopyOption const type. +func PossibleProvisionedBandwidthCopyOptionValues() []ProvisionedBandwidthCopyOption { + return []ProvisionedBandwidthCopyOption{ + ProvisionedBandwidthCopyOptionEnhanced, + ProvisionedBandwidthCopyOptionNone, + } +} + // ProximityPlacementGroupType - Specifies the type of the proximity placement group. Possible values are: Standard : Co-locate // resources within an Azure region or Availability Zone. Ultra : For future use. type ProximityPlacementGroupType string diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/diskaccesses_client.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/diskaccesses_client.go index b189a03400d2..0bea05f3e375 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/diskaccesses_client.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/diskaccesses_client.go @@ -47,7 +47,7 @@ func NewDiskAccessesClient(subscriptionID string, credential azcore.TokenCredent // BeginCreateOrUpdate - Creates or updates a disk access resource // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - diskAccessName - The name of the disk access resource that is being created. The name can't be changed after the disk encryption // set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The @@ -75,7 +75,7 @@ func (client *DiskAccessesClient) BeginCreateOrUpdate(ctx context.Context, resou // CreateOrUpdate - Creates or updates a disk access resource // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 func (client *DiskAccessesClient) createOrUpdate(ctx context.Context, resourceGroupName string, diskAccessName string, diskAccess DiskAccess, options *DiskAccessesClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error const operationName = "DiskAccessesClient.BeginCreateOrUpdate" @@ -117,7 +117,7 @@ func (client *DiskAccessesClient) createOrUpdateCreateRequest(ctx context.Contex return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, diskAccess); err != nil { @@ -129,7 +129,7 @@ func (client *DiskAccessesClient) createOrUpdateCreateRequest(ctx context.Contex // BeginDelete - Deletes a disk access resource. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - diskAccessName - The name of the disk access resource that is being created. The name can't be changed after the disk encryption // set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The @@ -156,7 +156,7 @@ func (client *DiskAccessesClient) BeginDelete(ctx context.Context, resourceGroup // Delete - Deletes a disk access resource. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 func (client *DiskAccessesClient) deleteOperation(ctx context.Context, resourceGroupName string, diskAccessName string, options *DiskAccessesClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "DiskAccessesClient.BeginDelete" @@ -198,7 +198,7 @@ func (client *DiskAccessesClient) deleteCreateRequest(ctx context.Context, resou return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -207,7 +207,7 @@ func (client *DiskAccessesClient) deleteCreateRequest(ctx context.Context, resou // BeginDeleteAPrivateEndpointConnection - Deletes a private endpoint connection under a disk access resource. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - diskAccessName - The name of the disk access resource that is being created. The name can't be changed after the disk encryption // set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The @@ -235,7 +235,7 @@ func (client *DiskAccessesClient) BeginDeleteAPrivateEndpointConnection(ctx cont // DeleteAPrivateEndpointConnection - Deletes a private endpoint connection under a disk access resource. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 func (client *DiskAccessesClient) deleteAPrivateEndpointConnection(ctx context.Context, resourceGroupName string, diskAccessName string, privateEndpointConnectionName string, options *DiskAccessesClientBeginDeleteAPrivateEndpointConnectionOptions) (*http.Response, error) { var err error const operationName = "DiskAccessesClient.BeginDeleteAPrivateEndpointConnection" @@ -281,7 +281,7 @@ func (client *DiskAccessesClient) deleteAPrivateEndpointConnectionCreateRequest( return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -290,7 +290,7 @@ func (client *DiskAccessesClient) deleteAPrivateEndpointConnectionCreateRequest( // Get - Gets information about a disk access resource. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - diskAccessName - The name of the disk access resource that is being created. The name can't be changed after the disk encryption // set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The @@ -338,7 +338,7 @@ func (client *DiskAccessesClient) getCreateRequest(ctx context.Context, resource return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -356,7 +356,7 @@ func (client *DiskAccessesClient) getHandleResponse(resp *http.Response) (DiskAc // GetAPrivateEndpointConnection - Gets information about a private endpoint connection under a disk access resource. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - diskAccessName - The name of the disk access resource that is being created. The name can't be changed after the disk encryption // set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The @@ -410,7 +410,7 @@ func (client *DiskAccessesClient) getAPrivateEndpointConnectionCreateRequest(ctx return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -428,7 +428,7 @@ func (client *DiskAccessesClient) getAPrivateEndpointConnectionHandleResponse(re // GetPrivateLinkResources - Gets the private link resources possible under disk access resource // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - diskAccessName - The name of the disk access resource that is being created. The name can't be changed after the disk encryption // set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The @@ -477,7 +477,7 @@ func (client *DiskAccessesClient) getPrivateLinkResourcesCreateRequest(ctx conte return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -494,7 +494,7 @@ func (client *DiskAccessesClient) getPrivateLinkResourcesHandleResponse(resp *ht // NewListPager - Lists all the disk access resources under a subscription. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - options - DiskAccessesClientListOptions contains the optional parameters for the DiskAccessesClient.NewListPager method. func (client *DiskAccessesClient) NewListPager(options *DiskAccessesClientListOptions) *runtime.Pager[DiskAccessesClientListResponse] { return runtime.NewPager(runtime.PagingHandler[DiskAccessesClientListResponse]{ @@ -531,7 +531,7 @@ func (client *DiskAccessesClient) listCreateRequest(ctx context.Context, options return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -548,7 +548,7 @@ func (client *DiskAccessesClient) listHandleResponse(resp *http.Response) (DiskA // NewListByResourceGroupPager - Lists all the disk access resources under a resource group. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - options - DiskAccessesClientListByResourceGroupOptions contains the optional parameters for the DiskAccessesClient.NewListByResourceGroupPager // method. @@ -591,7 +591,7 @@ func (client *DiskAccessesClient) listByResourceGroupCreateRequest(ctx context.C return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -608,7 +608,7 @@ func (client *DiskAccessesClient) listByResourceGroupHandleResponse(resp *http.R // NewListPrivateEndpointConnectionsPager - List information about private endpoint connections under a disk access resource // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - diskAccessName - The name of the disk access resource that is being created. The name can't be changed after the disk encryption // set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The @@ -658,7 +658,7 @@ func (client *DiskAccessesClient) listPrivateEndpointConnectionsCreateRequest(ct return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -676,7 +676,7 @@ func (client *DiskAccessesClient) listPrivateEndpointConnectionsHandleResponse(r // BeginUpdate - Updates (patches) a disk access resource. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - diskAccessName - The name of the disk access resource that is being created. The name can't be changed after the disk encryption // set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The @@ -704,7 +704,7 @@ func (client *DiskAccessesClient) BeginUpdate(ctx context.Context, resourceGroup // Update - Updates (patches) a disk access resource. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 func (client *DiskAccessesClient) update(ctx context.Context, resourceGroupName string, diskAccessName string, diskAccess DiskAccessUpdate, options *DiskAccessesClientBeginUpdateOptions) (*http.Response, error) { var err error const operationName = "DiskAccessesClient.BeginUpdate" @@ -746,7 +746,7 @@ func (client *DiskAccessesClient) updateCreateRequest(ctx context.Context, resou return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, diskAccess); err != nil { @@ -759,7 +759,7 @@ func (client *DiskAccessesClient) updateCreateRequest(ctx context.Context, resou // can't be used to create a new private endpoint connection. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - diskAccessName - The name of the disk access resource that is being created. The name can't be changed after the disk encryption // set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The @@ -790,7 +790,7 @@ func (client *DiskAccessesClient) BeginUpdateAPrivateEndpointConnection(ctx cont // be used to create a new private endpoint connection. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 func (client *DiskAccessesClient) updateAPrivateEndpointConnection(ctx context.Context, resourceGroupName string, diskAccessName string, privateEndpointConnectionName string, privateEndpointConnection PrivateEndpointConnection, options *DiskAccessesClientBeginUpdateAPrivateEndpointConnectionOptions) (*http.Response, error) { var err error const operationName = "DiskAccessesClient.BeginUpdateAPrivateEndpointConnection" @@ -836,7 +836,7 @@ func (client *DiskAccessesClient) updateAPrivateEndpointConnectionCreateRequest( return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, privateEndpointConnection); err != nil { diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/diskencryptionsets_client.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/diskencryptionsets_client.go index 9306f9c0d7c5..59d34d1e2d97 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/diskencryptionsets_client.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/diskencryptionsets_client.go @@ -47,7 +47,7 @@ func NewDiskEncryptionSetsClient(subscriptionID string, credential azcore.TokenC // BeginCreateOrUpdate - Creates or updates a disk encryption set // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - diskEncryptionSetName - The name of the disk encryption set that is being created. The name can't be changed after the // disk encryption set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The maximum @@ -75,7 +75,7 @@ func (client *DiskEncryptionSetsClient) BeginCreateOrUpdate(ctx context.Context, // CreateOrUpdate - Creates or updates a disk encryption set // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 func (client *DiskEncryptionSetsClient) createOrUpdate(ctx context.Context, resourceGroupName string, diskEncryptionSetName string, diskEncryptionSet DiskEncryptionSet, options *DiskEncryptionSetsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error const operationName = "DiskEncryptionSetsClient.BeginCreateOrUpdate" @@ -117,7 +117,7 @@ func (client *DiskEncryptionSetsClient) createOrUpdateCreateRequest(ctx context. return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, diskEncryptionSet); err != nil { @@ -129,7 +129,7 @@ func (client *DiskEncryptionSetsClient) createOrUpdateCreateRequest(ctx context. // BeginDelete - Deletes a disk encryption set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - diskEncryptionSetName - The name of the disk encryption set that is being created. The name can't be changed after the // disk encryption set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The maximum @@ -156,7 +156,7 @@ func (client *DiskEncryptionSetsClient) BeginDelete(ctx context.Context, resourc // Delete - Deletes a disk encryption set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 func (client *DiskEncryptionSetsClient) deleteOperation(ctx context.Context, resourceGroupName string, diskEncryptionSetName string, options *DiskEncryptionSetsClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "DiskEncryptionSetsClient.BeginDelete" @@ -198,7 +198,7 @@ func (client *DiskEncryptionSetsClient) deleteCreateRequest(ctx context.Context, return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -207,7 +207,7 @@ func (client *DiskEncryptionSetsClient) deleteCreateRequest(ctx context.Context, // Get - Gets information about a disk encryption set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - diskEncryptionSetName - The name of the disk encryption set that is being created. The name can't be changed after the // disk encryption set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The maximum @@ -255,7 +255,7 @@ func (client *DiskEncryptionSetsClient) getCreateRequest(ctx context.Context, re return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -272,7 +272,7 @@ func (client *DiskEncryptionSetsClient) getHandleResponse(resp *http.Response) ( // NewListPager - Lists all the disk encryption sets under a subscription. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - options - DiskEncryptionSetsClientListOptions contains the optional parameters for the DiskEncryptionSetsClient.NewListPager // method. func (client *DiskEncryptionSetsClient) NewListPager(options *DiskEncryptionSetsClientListOptions) *runtime.Pager[DiskEncryptionSetsClientListResponse] { @@ -310,7 +310,7 @@ func (client *DiskEncryptionSetsClient) listCreateRequest(ctx context.Context, o return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -327,7 +327,7 @@ func (client *DiskEncryptionSetsClient) listHandleResponse(resp *http.Response) // NewListAssociatedResourcesPager - Lists all resources that are encrypted with this disk encryption set. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - diskEncryptionSetName - The name of the disk encryption set that is being created. The name can't be changed after the // disk encryption set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The maximum @@ -377,7 +377,7 @@ func (client *DiskEncryptionSetsClient) listAssociatedResourcesCreateRequest(ctx return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -394,7 +394,7 @@ func (client *DiskEncryptionSetsClient) listAssociatedResourcesHandleResponse(re // NewListByResourceGroupPager - Lists all the disk encryption sets under a resource group. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - options - DiskEncryptionSetsClientListByResourceGroupOptions contains the optional parameters for the DiskEncryptionSetsClient.NewListByResourceGroupPager // method. @@ -437,7 +437,7 @@ func (client *DiskEncryptionSetsClient) listByResourceGroupCreateRequest(ctx con return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -455,7 +455,7 @@ func (client *DiskEncryptionSetsClient) listByResourceGroupHandleResponse(resp * // BeginUpdate - Updates (patches) a disk encryption set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - diskEncryptionSetName - The name of the disk encryption set that is being created. The name can't be changed after the // disk encryption set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The maximum @@ -483,7 +483,7 @@ func (client *DiskEncryptionSetsClient) BeginUpdate(ctx context.Context, resourc // Update - Updates (patches) a disk encryption set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 func (client *DiskEncryptionSetsClient) update(ctx context.Context, resourceGroupName string, diskEncryptionSetName string, diskEncryptionSet DiskEncryptionSetUpdate, options *DiskEncryptionSetsClientBeginUpdateOptions) (*http.Response, error) { var err error const operationName = "DiskEncryptionSetsClient.BeginUpdate" @@ -525,7 +525,7 @@ func (client *DiskEncryptionSetsClient) updateCreateRequest(ctx context.Context, return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, diskEncryptionSet); err != nil { diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/diskrestorepoint_client.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/diskrestorepoint_client.go index 54b5769575eb..43307ca04936 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/diskrestorepoint_client.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/diskrestorepoint_client.go @@ -47,7 +47,7 @@ func NewDiskRestorePointClient(subscriptionID string, credential azcore.TokenCre // Get - Get disk restorePoint resource // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - restorePointCollectionName - The name of the restore point collection that the disk restore point belongs. // - vmRestorePointName - The name of the vm restore point that the disk disk restore point belongs. @@ -103,7 +103,7 @@ func (client *DiskRestorePointClient) getCreateRequest(ctx context.Context, reso return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -121,7 +121,7 @@ func (client *DiskRestorePointClient) getHandleResponse(resp *http.Response) (Di // BeginGrantAccess - Grants access to a diskRestorePoint. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - restorePointCollectionName - The name of the restore point collection that the disk restore point belongs. // - vmRestorePointName - The name of the vm restore point that the disk disk restore point belongs. @@ -150,7 +150,7 @@ func (client *DiskRestorePointClient) BeginGrantAccess(ctx context.Context, reso // GrantAccess - Grants access to a diskRestorePoint. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 func (client *DiskRestorePointClient) grantAccess(ctx context.Context, resourceGroupName string, restorePointCollectionName string, vmRestorePointName string, diskRestorePointName string, grantAccessData GrantAccessData, options *DiskRestorePointClientBeginGrantAccessOptions) (*http.Response, error) { var err error const operationName = "DiskRestorePointClient.BeginGrantAccess" @@ -200,7 +200,7 @@ func (client *DiskRestorePointClient) grantAccessCreateRequest(ctx context.Conte return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, grantAccessData); err != nil { @@ -211,7 +211,7 @@ func (client *DiskRestorePointClient) grantAccessCreateRequest(ctx context.Conte // NewListByRestorePointPager - Lists diskRestorePoints under a vmRestorePoint. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - restorePointCollectionName - The name of the restore point collection that the disk restore point belongs. // - vmRestorePointName - The name of the vm restore point that the disk disk restore point belongs. @@ -264,7 +264,7 @@ func (client *DiskRestorePointClient) listByRestorePointCreateRequest(ctx contex return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -282,7 +282,7 @@ func (client *DiskRestorePointClient) listByRestorePointHandleResponse(resp *htt // BeginRevokeAccess - Revokes access to a diskRestorePoint. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - restorePointCollectionName - The name of the restore point collection that the disk restore point belongs. // - vmRestorePointName - The name of the vm restore point that the disk disk restore point belongs. @@ -310,7 +310,7 @@ func (client *DiskRestorePointClient) BeginRevokeAccess(ctx context.Context, res // RevokeAccess - Revokes access to a diskRestorePoint. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 func (client *DiskRestorePointClient) revokeAccess(ctx context.Context, resourceGroupName string, restorePointCollectionName string, vmRestorePointName string, diskRestorePointName string, options *DiskRestorePointClientBeginRevokeAccessOptions) (*http.Response, error) { var err error const operationName = "DiskRestorePointClient.BeginRevokeAccess" @@ -360,7 +360,7 @@ func (client *DiskRestorePointClient) revokeAccessCreateRequest(ctx context.Cont return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/disks_client.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/disks_client.go index 854a8ffd68bc..a0ffa752bc99 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/disks_client.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/disks_client.go @@ -47,7 +47,7 @@ func NewDisksClient(subscriptionID string, credential azcore.TokenCredential, op // BeginCreateOrUpdate - Creates or updates a disk. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - diskName - The name of the managed disk that is being created. The name can't be changed after the disk is created. Supported // characters for the name are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 @@ -75,7 +75,7 @@ func (client *DisksClient) BeginCreateOrUpdate(ctx context.Context, resourceGrou // CreateOrUpdate - Creates or updates a disk. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 func (client *DisksClient) createOrUpdate(ctx context.Context, resourceGroupName string, diskName string, disk Disk, options *DisksClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error const operationName = "DisksClient.BeginCreateOrUpdate" @@ -117,7 +117,7 @@ func (client *DisksClient) createOrUpdateCreateRequest(ctx context.Context, reso return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, disk); err != nil { @@ -129,7 +129,7 @@ func (client *DisksClient) createOrUpdateCreateRequest(ctx context.Context, reso // BeginDelete - Deletes a disk. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - diskName - The name of the managed disk that is being created. The name can't be changed after the disk is created. Supported // characters for the name are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 @@ -155,7 +155,7 @@ func (client *DisksClient) BeginDelete(ctx context.Context, resourceGroupName st // Delete - Deletes a disk. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 func (client *DisksClient) deleteOperation(ctx context.Context, resourceGroupName string, diskName string, options *DisksClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "DisksClient.BeginDelete" @@ -197,7 +197,7 @@ func (client *DisksClient) deleteCreateRequest(ctx context.Context, resourceGrou return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() return req, nil } @@ -205,7 +205,7 @@ func (client *DisksClient) deleteCreateRequest(ctx context.Context, resourceGrou // Get - Gets information about a disk. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - diskName - The name of the managed disk that is being created. The name can't be changed after the disk is created. Supported // characters for the name are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 @@ -253,7 +253,7 @@ func (client *DisksClient) getCreateRequest(ctx context.Context, resourceGroupNa return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -271,7 +271,7 @@ func (client *DisksClient) getHandleResponse(resp *http.Response) (DisksClientGe // BeginGrantAccess - Grants access to a disk. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - diskName - The name of the managed disk that is being created. The name can't be changed after the disk is created. Supported // characters for the name are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 @@ -299,7 +299,7 @@ func (client *DisksClient) BeginGrantAccess(ctx context.Context, resourceGroupNa // GrantAccess - Grants access to a disk. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 func (client *DisksClient) grantAccess(ctx context.Context, resourceGroupName string, diskName string, grantAccessData GrantAccessData, options *DisksClientBeginGrantAccessOptions) (*http.Response, error) { var err error const operationName = "DisksClient.BeginGrantAccess" @@ -341,7 +341,7 @@ func (client *DisksClient) grantAccessCreateRequest(ctx context.Context, resourc return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, grantAccessData); err != nil { @@ -352,7 +352,7 @@ func (client *DisksClient) grantAccessCreateRequest(ctx context.Context, resourc // NewListPager - Lists all the disks under a subscription. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - options - DisksClientListOptions contains the optional parameters for the DisksClient.NewListPager method. func (client *DisksClient) NewListPager(options *DisksClientListOptions) *runtime.Pager[DisksClientListResponse] { return runtime.NewPager(runtime.PagingHandler[DisksClientListResponse]{ @@ -389,7 +389,7 @@ func (client *DisksClient) listCreateRequest(ctx context.Context, options *Disks return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -406,7 +406,7 @@ func (client *DisksClient) listHandleResponse(resp *http.Response) (DisksClientL // NewListByResourceGroupPager - Lists all the disks under a resource group. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - options - DisksClientListByResourceGroupOptions contains the optional parameters for the DisksClient.NewListByResourceGroupPager // method. @@ -449,7 +449,7 @@ func (client *DisksClient) listByResourceGroupCreateRequest(ctx context.Context, return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -467,7 +467,7 @@ func (client *DisksClient) listByResourceGroupHandleResponse(resp *http.Response // BeginRevokeAccess - Revokes access to a disk. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - diskName - The name of the managed disk that is being created. The name can't be changed after the disk is created. Supported // characters for the name are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 @@ -494,7 +494,7 @@ func (client *DisksClient) BeginRevokeAccess(ctx context.Context, resourceGroupN // RevokeAccess - Revokes access to a disk. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 func (client *DisksClient) revokeAccess(ctx context.Context, resourceGroupName string, diskName string, options *DisksClientBeginRevokeAccessOptions) (*http.Response, error) { var err error const operationName = "DisksClient.BeginRevokeAccess" @@ -536,7 +536,7 @@ func (client *DisksClient) revokeAccessCreateRequest(ctx context.Context, resour return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() return req, nil } @@ -544,7 +544,7 @@ func (client *DisksClient) revokeAccessCreateRequest(ctx context.Context, resour // BeginUpdate - Updates (patches) a disk. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - diskName - The name of the managed disk that is being created. The name can't be changed after the disk is created. Supported // characters for the name are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 @@ -571,7 +571,7 @@ func (client *DisksClient) BeginUpdate(ctx context.Context, resourceGroupName st // Update - Updates (patches) a disk. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 func (client *DisksClient) update(ctx context.Context, resourceGroupName string, diskName string, disk DiskUpdate, options *DisksClientBeginUpdateOptions) (*http.Response, error) { var err error const operationName = "DisksClient.BeginUpdate" @@ -613,7 +613,7 @@ func (client *DisksClient) updateCreateRequest(ctx context.Context, resourceGrou return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, disk); err != nil { diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleries_client.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleries_client.go index 8fac494c7878..f24e730a63f5 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleries_client.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleries_client.go @@ -47,7 +47,7 @@ func NewGalleriesClient(subscriptionID string, credential azcore.TokenCredential // BeginCreateOrUpdate - Create or update a Shared Image Gallery. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Image Gallery. The allowed characters are alphabets and numbers with dots and periods // allowed in the middle. The maximum length is 80 characters. @@ -74,7 +74,7 @@ func (client *GalleriesClient) BeginCreateOrUpdate(ctx context.Context, resource // CreateOrUpdate - Create or update a Shared Image Gallery. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 func (client *GalleriesClient) createOrUpdate(ctx context.Context, resourceGroupName string, galleryName string, gallery Gallery, options *GalleriesClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error const operationName = "GalleriesClient.BeginCreateOrUpdate" @@ -116,7 +116,7 @@ func (client *GalleriesClient) createOrUpdateCreateRequest(ctx context.Context, return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, gallery); err != nil { @@ -128,7 +128,7 @@ func (client *GalleriesClient) createOrUpdateCreateRequest(ctx context.Context, // BeginDelete - Delete a Shared Image Gallery. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Image Gallery to be deleted. // - options - GalleriesClientBeginDeleteOptions contains the optional parameters for the GalleriesClient.BeginDelete method. @@ -152,7 +152,7 @@ func (client *GalleriesClient) BeginDelete(ctx context.Context, resourceGroupNam // Delete - Delete a Shared Image Gallery. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 func (client *GalleriesClient) deleteOperation(ctx context.Context, resourceGroupName string, galleryName string, options *GalleriesClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "GalleriesClient.BeginDelete" @@ -194,7 +194,7 @@ func (client *GalleriesClient) deleteCreateRequest(ctx context.Context, resource return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -203,7 +203,7 @@ func (client *GalleriesClient) deleteCreateRequest(ctx context.Context, resource // Get - Retrieves information about a Shared Image Gallery. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Image Gallery. // - options - GalleriesClientGetOptions contains the optional parameters for the GalleriesClient.Get method. @@ -249,13 +249,13 @@ func (client *GalleriesClient) getCreateRequest(ctx context.Context, resourceGro return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") - if options != nil && options.Select != nil { - reqQP.Set("$select", string(*options.Select)) - } if options != nil && options.Expand != nil { reqQP.Set("$expand", string(*options.Expand)) } + if options != nil && options.Select != nil { + reqQP.Set("$select", string(*options.Select)) + } + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -272,7 +272,7 @@ func (client *GalleriesClient) getHandleResponse(resp *http.Response) (Galleries // NewListPager - List galleries under a subscription. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - options - GalleriesClientListOptions contains the optional parameters for the GalleriesClient.NewListPager method. func (client *GalleriesClient) NewListPager(options *GalleriesClientListOptions) *runtime.Pager[GalleriesClientListResponse] { return runtime.NewPager(runtime.PagingHandler[GalleriesClientListResponse]{ @@ -309,7 +309,7 @@ func (client *GalleriesClient) listCreateRequest(ctx context.Context, options *G return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -326,7 +326,7 @@ func (client *GalleriesClient) listHandleResponse(resp *http.Response) (Gallerie // NewListByResourceGroupPager - List galleries under a resource group. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - resourceGroupName - The name of the resource group. // - options - GalleriesClientListByResourceGroupOptions contains the optional parameters for the GalleriesClient.NewListByResourceGroupPager // method. @@ -369,7 +369,7 @@ func (client *GalleriesClient) listByResourceGroupCreateRequest(ctx context.Cont return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -387,7 +387,7 @@ func (client *GalleriesClient) listByResourceGroupHandleResponse(resp *http.Resp // BeginUpdate - Update a Shared Image Gallery. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Image Gallery. The allowed characters are alphabets and numbers with dots and periods // allowed in the middle. The maximum length is 80 characters. @@ -413,7 +413,7 @@ func (client *GalleriesClient) BeginUpdate(ctx context.Context, resourceGroupNam // Update - Update a Shared Image Gallery. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 func (client *GalleriesClient) update(ctx context.Context, resourceGroupName string, galleryName string, gallery GalleryUpdate, options *GalleriesClientBeginUpdateOptions) (*http.Response, error) { var err error const operationName = "GalleriesClient.BeginUpdate" @@ -455,7 +455,7 @@ func (client *GalleriesClient) updateCreateRequest(ctx context.Context, resource return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, gallery); err != nil { diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleryapplications_client.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleryapplications_client.go index 1ebec623b454..cd5d6f3ecfe6 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleryapplications_client.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleryapplications_client.go @@ -47,7 +47,7 @@ func NewGalleryApplicationsClient(subscriptionID string, credential azcore.Token // BeginCreateOrUpdate - Create or update a gallery Application Definition. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Application Gallery in which the Application Definition is to be created. // - galleryApplicationName - The name of the gallery Application Definition to be created or updated. The allowed characters @@ -76,7 +76,7 @@ func (client *GalleryApplicationsClient) BeginCreateOrUpdate(ctx context.Context // CreateOrUpdate - Create or update a gallery Application Definition. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 func (client *GalleryApplicationsClient) createOrUpdate(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, galleryApplication GalleryApplication, options *GalleryApplicationsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error const operationName = "GalleryApplicationsClient.BeginCreateOrUpdate" @@ -122,7 +122,7 @@ func (client *GalleryApplicationsClient) createOrUpdateCreateRequest(ctx context return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, galleryApplication); err != nil { @@ -134,7 +134,7 @@ func (client *GalleryApplicationsClient) createOrUpdateCreateRequest(ctx context // BeginDelete - Delete a gallery Application. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Application Gallery in which the Application Definition is to be deleted. // - galleryApplicationName - The name of the gallery Application Definition to be deleted. @@ -160,7 +160,7 @@ func (client *GalleryApplicationsClient) BeginDelete(ctx context.Context, resour // Delete - Delete a gallery Application. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 func (client *GalleryApplicationsClient) deleteOperation(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, options *GalleryApplicationsClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "GalleryApplicationsClient.BeginDelete" @@ -206,7 +206,7 @@ func (client *GalleryApplicationsClient) deleteCreateRequest(ctx context.Context return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -215,7 +215,7 @@ func (client *GalleryApplicationsClient) deleteCreateRequest(ctx context.Context // Get - Retrieves information about a gallery Application Definition. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Application Gallery from which the Application Definitions are to be retrieved. // - galleryApplicationName - The name of the gallery Application Definition to be retrieved. @@ -266,7 +266,7 @@ func (client *GalleryApplicationsClient) getCreateRequest(ctx context.Context, r return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -283,7 +283,7 @@ func (client *GalleryApplicationsClient) getHandleResponse(resp *http.Response) // NewListByGalleryPager - List gallery Application Definitions in a gallery. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Application Gallery from which Application Definitions are to be listed. // - options - GalleryApplicationsClientListByGalleryOptions contains the optional parameters for the GalleryApplicationsClient.NewListByGalleryPager @@ -331,7 +331,7 @@ func (client *GalleryApplicationsClient) listByGalleryCreateRequest(ctx context. return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -349,7 +349,7 @@ func (client *GalleryApplicationsClient) listByGalleryHandleResponse(resp *http. // BeginUpdate - Update a gallery Application Definition. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Application Gallery in which the Application Definition is to be updated. // - galleryApplicationName - The name of the gallery Application Definition to be updated. The allowed characters are alphabets @@ -378,7 +378,7 @@ func (client *GalleryApplicationsClient) BeginUpdate(ctx context.Context, resour // Update - Update a gallery Application Definition. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 func (client *GalleryApplicationsClient) update(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, galleryApplication GalleryApplicationUpdate, options *GalleryApplicationsClientBeginUpdateOptions) (*http.Response, error) { var err error const operationName = "GalleryApplicationsClient.BeginUpdate" @@ -424,7 +424,7 @@ func (client *GalleryApplicationsClient) updateCreateRequest(ctx context.Context return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, galleryApplication); err != nil { diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleryapplicationversions_client.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleryapplicationversions_client.go index 3d885cb93e0c..06571af3d467 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleryapplicationversions_client.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleryapplicationversions_client.go @@ -47,7 +47,7 @@ func NewGalleryApplicationVersionsClient(subscriptionID string, credential azcor // BeginCreateOrUpdate - Create or update a gallery Application Version. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Application Gallery in which the Application Definition resides. // - galleryApplicationName - The name of the gallery Application Definition in which the Application Version is to be created. @@ -77,7 +77,7 @@ func (client *GalleryApplicationVersionsClient) BeginCreateOrUpdate(ctx context. // CreateOrUpdate - Create or update a gallery Application Version. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 func (client *GalleryApplicationVersionsClient) createOrUpdate(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, galleryApplicationVersionName string, galleryApplicationVersion GalleryApplicationVersion, options *GalleryApplicationVersionsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error const operationName = "GalleryApplicationVersionsClient.BeginCreateOrUpdate" @@ -127,7 +127,7 @@ func (client *GalleryApplicationVersionsClient) createOrUpdateCreateRequest(ctx return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, galleryApplicationVersion); err != nil { @@ -139,7 +139,7 @@ func (client *GalleryApplicationVersionsClient) createOrUpdateCreateRequest(ctx // BeginDelete - Delete a gallery Application Version. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Application Gallery in which the Application Definition resides. // - galleryApplicationName - The name of the gallery Application Definition in which the Application Version resides. @@ -166,7 +166,7 @@ func (client *GalleryApplicationVersionsClient) BeginDelete(ctx context.Context, // Delete - Delete a gallery Application Version. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 func (client *GalleryApplicationVersionsClient) deleteOperation(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, galleryApplicationVersionName string, options *GalleryApplicationVersionsClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "GalleryApplicationVersionsClient.BeginDelete" @@ -216,7 +216,7 @@ func (client *GalleryApplicationVersionsClient) deleteCreateRequest(ctx context. return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -225,7 +225,7 @@ func (client *GalleryApplicationVersionsClient) deleteCreateRequest(ctx context. // Get - Retrieves information about a gallery Application Version. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Application Gallery in which the Application Definition resides. // - galleryApplicationName - The name of the gallery Application Definition in which the Application Version resides. @@ -285,7 +285,7 @@ func (client *GalleryApplicationVersionsClient) getCreateRequest(ctx context.Con if options != nil && options.Expand != nil { reqQP.Set("$expand", string(*options.Expand)) } - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -302,7 +302,7 @@ func (client *GalleryApplicationVersionsClient) getHandleResponse(resp *http.Res // NewListByGalleryApplicationPager - List gallery Application Versions in a gallery Application Definition. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Application Gallery in which the Application Definition resides. // - galleryApplicationName - The name of the Shared Application Gallery Application Definition from which the Application Versions @@ -356,7 +356,7 @@ func (client *GalleryApplicationVersionsClient) listByGalleryApplicationCreateRe return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -374,7 +374,7 @@ func (client *GalleryApplicationVersionsClient) listByGalleryApplicationHandleRe // BeginUpdate - Update a gallery Application Version. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Application Gallery in which the Application Definition resides. // - galleryApplicationName - The name of the gallery Application Definition in which the Application Version is to be updated. @@ -404,7 +404,7 @@ func (client *GalleryApplicationVersionsClient) BeginUpdate(ctx context.Context, // Update - Update a gallery Application Version. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 func (client *GalleryApplicationVersionsClient) update(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, galleryApplicationVersionName string, galleryApplicationVersion GalleryApplicationVersionUpdate, options *GalleryApplicationVersionsClientBeginUpdateOptions) (*http.Response, error) { var err error const operationName = "GalleryApplicationVersionsClient.BeginUpdate" @@ -454,7 +454,7 @@ func (client *GalleryApplicationVersionsClient) updateCreateRequest(ctx context. return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, galleryApplicationVersion); err != nil { diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleryimages_client.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleryimages_client.go index 6a684b3ed613..8be1a5784a01 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleryimages_client.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleryimages_client.go @@ -47,7 +47,7 @@ func NewGalleryImagesClient(subscriptionID string, credential azcore.TokenCreden // BeginCreateOrUpdate - Create or update a gallery image definition. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Image Gallery in which the Image Definition is to be created. // - galleryImageName - The name of the gallery image definition to be created or updated. The allowed characters are alphabets @@ -76,7 +76,7 @@ func (client *GalleryImagesClient) BeginCreateOrUpdate(ctx context.Context, reso // CreateOrUpdate - Create or update a gallery image definition. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 func (client *GalleryImagesClient) createOrUpdate(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImage GalleryImage, options *GalleryImagesClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error const operationName = "GalleryImagesClient.BeginCreateOrUpdate" @@ -122,7 +122,7 @@ func (client *GalleryImagesClient) createOrUpdateCreateRequest(ctx context.Conte return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, galleryImage); err != nil { @@ -134,7 +134,7 @@ func (client *GalleryImagesClient) createOrUpdateCreateRequest(ctx context.Conte // BeginDelete - Delete a gallery image. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Image Gallery in which the Image Definition is to be deleted. // - galleryImageName - The name of the gallery image definition to be deleted. @@ -160,7 +160,7 @@ func (client *GalleryImagesClient) BeginDelete(ctx context.Context, resourceGrou // Delete - Delete a gallery image. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 func (client *GalleryImagesClient) deleteOperation(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, options *GalleryImagesClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "GalleryImagesClient.BeginDelete" @@ -206,7 +206,7 @@ func (client *GalleryImagesClient) deleteCreateRequest(ctx context.Context, reso return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -215,7 +215,7 @@ func (client *GalleryImagesClient) deleteCreateRequest(ctx context.Context, reso // Get - Retrieves information about a gallery image definition. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Image Gallery from which the Image Definitions are to be retrieved. // - galleryImageName - The name of the gallery image definition to be retrieved. @@ -266,7 +266,7 @@ func (client *GalleryImagesClient) getCreateRequest(ctx context.Context, resourc return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -283,7 +283,7 @@ func (client *GalleryImagesClient) getHandleResponse(resp *http.Response) (Galle // NewListByGalleryPager - List gallery image definitions in a gallery. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Image Gallery from which Image Definitions are to be listed. // - options - GalleryImagesClientListByGalleryOptions contains the optional parameters for the GalleryImagesClient.NewListByGalleryPager @@ -331,7 +331,7 @@ func (client *GalleryImagesClient) listByGalleryCreateRequest(ctx context.Contex return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -349,7 +349,7 @@ func (client *GalleryImagesClient) listByGalleryHandleResponse(resp *http.Respon // BeginUpdate - Update a gallery image definition. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Image Gallery in which the Image Definition is to be updated. // - galleryImageName - The name of the gallery image definition to be updated. The allowed characters are alphabets and numbers @@ -377,7 +377,7 @@ func (client *GalleryImagesClient) BeginUpdate(ctx context.Context, resourceGrou // Update - Update a gallery image definition. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 func (client *GalleryImagesClient) update(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImage GalleryImageUpdate, options *GalleryImagesClientBeginUpdateOptions) (*http.Response, error) { var err error const operationName = "GalleryImagesClient.BeginUpdate" @@ -423,7 +423,7 @@ func (client *GalleryImagesClient) updateCreateRequest(ctx context.Context, reso return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, galleryImage); err != nil { diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleryimageversions_client.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleryimageversions_client.go index 05f3cf3526be..b16e42ed74a0 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleryimageversions_client.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleryimageversions_client.go @@ -47,7 +47,7 @@ func NewGalleryImageVersionsClient(subscriptionID string, credential azcore.Toke // BeginCreateOrUpdate - Create or update a gallery image version. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Image Gallery in which the Image Definition resides. // - galleryImageName - The name of the gallery image definition in which the Image Version is to be created. @@ -77,7 +77,7 @@ func (client *GalleryImageVersionsClient) BeginCreateOrUpdate(ctx context.Contex // CreateOrUpdate - Create or update a gallery image version. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 func (client *GalleryImageVersionsClient) createOrUpdate(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImageVersionName string, galleryImageVersion GalleryImageVersion, options *GalleryImageVersionsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error const operationName = "GalleryImageVersionsClient.BeginCreateOrUpdate" @@ -127,7 +127,7 @@ func (client *GalleryImageVersionsClient) createOrUpdateCreateRequest(ctx contex return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, galleryImageVersion); err != nil { @@ -139,7 +139,7 @@ func (client *GalleryImageVersionsClient) createOrUpdateCreateRequest(ctx contex // BeginDelete - Delete a gallery image version. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Image Gallery in which the Image Definition resides. // - galleryImageName - The name of the gallery image definition in which the Image Version resides. @@ -166,7 +166,7 @@ func (client *GalleryImageVersionsClient) BeginDelete(ctx context.Context, resou // Delete - Delete a gallery image version. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 func (client *GalleryImageVersionsClient) deleteOperation(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImageVersionName string, options *GalleryImageVersionsClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "GalleryImageVersionsClient.BeginDelete" @@ -216,7 +216,7 @@ func (client *GalleryImageVersionsClient) deleteCreateRequest(ctx context.Contex return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -225,7 +225,7 @@ func (client *GalleryImageVersionsClient) deleteCreateRequest(ctx context.Contex // Get - Retrieves information about a gallery image version. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Image Gallery in which the Image Definition resides. // - galleryImageName - The name of the gallery image definition in which the Image Version resides. @@ -285,7 +285,7 @@ func (client *GalleryImageVersionsClient) getCreateRequest(ctx context.Context, if options != nil && options.Expand != nil { reqQP.Set("$expand", string(*options.Expand)) } - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -302,7 +302,7 @@ func (client *GalleryImageVersionsClient) getHandleResponse(resp *http.Response) // NewListByGalleryImagePager - List gallery image versions in a gallery image definition. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Image Gallery in which the Image Definition resides. // - galleryImageName - The name of the Shared Image Gallery Image Definition from which the Image Versions are to be listed. @@ -355,7 +355,7 @@ func (client *GalleryImageVersionsClient) listByGalleryImageCreateRequest(ctx co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -373,7 +373,7 @@ func (client *GalleryImageVersionsClient) listByGalleryImageHandleResponse(resp // BeginUpdate - Update a gallery image version. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Image Gallery in which the Image Definition resides. // - galleryImageName - The name of the gallery image definition in which the Image Version is to be updated. @@ -403,7 +403,7 @@ func (client *GalleryImageVersionsClient) BeginUpdate(ctx context.Context, resou // Update - Update a gallery image version. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 func (client *GalleryImageVersionsClient) update(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImageVersionName string, galleryImageVersion GalleryImageVersionUpdate, options *GalleryImageVersionsClientBeginUpdateOptions) (*http.Response, error) { var err error const operationName = "GalleryImageVersionsClient.BeginUpdate" @@ -453,7 +453,7 @@ func (client *GalleryImageVersionsClient) updateCreateRequest(ctx context.Contex return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, galleryImageVersion); err != nil { diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/gallerysharingprofile_client.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/gallerysharingprofile_client.go index 28d024e9727e..0ccc5ce9fd60 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/gallerysharingprofile_client.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/gallerysharingprofile_client.go @@ -47,7 +47,7 @@ func NewGallerySharingProfileClient(subscriptionID string, credential azcore.Tok // BeginUpdate - Update sharing profile of a gallery. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Image Gallery. // - sharingUpdate - Parameters supplied to the update gallery sharing profile. @@ -73,7 +73,7 @@ func (client *GallerySharingProfileClient) BeginUpdate(ctx context.Context, reso // Update - Update sharing profile of a gallery. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 func (client *GallerySharingProfileClient) update(ctx context.Context, resourceGroupName string, galleryName string, sharingUpdate SharingUpdate, options *GallerySharingProfileClientBeginUpdateOptions) (*http.Response, error) { var err error const operationName = "GallerySharingProfileClient.BeginUpdate" @@ -115,7 +115,7 @@ func (client *GallerySharingProfileClient) updateCreateRequest(ctx context.Conte return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, sharingUpdate); err != nil { diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/models.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/models.go index 5b5a3197bab1..6f755a27ee04 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/models.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/models.go @@ -1037,6 +1037,9 @@ type CreationData struct { // disabled after enabled. PerformancePlus *bool + // If this field is set on a snapshot and createOption is CopyStart, the snapshot will be copied at a quicker speed. + ProvisionedBandwidthCopySpeed *ProvisionedBandwidthCopyOption + // If createOption is ImportSecure, this is the URI of a blob to be imported into VM guest state. SecurityDataURI *string @@ -2382,13 +2385,17 @@ type GalleryArtifactVersionFullSource struct { // The resource Id of the source Community Gallery Image. Only required when using Community Gallery Image as a source. CommunityGalleryImageID *string - // The id of the gallery artifact version source. Can specify a disk uri, snapshot uri, user image or storage account resource. + // The id of the gallery artifact version source. ID *string + + // The resource Id of the source virtual machine. Only required when capturing a virtual machine to source this Gallery Image + // Version. + VirtualMachineID *string } // GalleryArtifactVersionSource - The gallery artifact version source. type GalleryArtifactVersionSource struct { - // The id of the gallery artifact version source. Can specify a disk uri, snapshot uri, user image or storage account resource. + // The id of the gallery artifact version source. ID *string } @@ -2423,7 +2430,7 @@ type GalleryDiskImage struct { // GalleryDiskImageSource - The source for the disk image. type GalleryDiskImageSource struct { - // The id of the gallery artifact version source. Can specify a disk uri, snapshot uri, user image or storage account resource. + // The id of the gallery artifact version source. ID *string // The Storage Account Id that contains the vhd blob being used as a source for this artifact version. diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/models_serde.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/models_serde.go index 3c63478d02f0..16bc25e59cdd 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/models_serde.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/models_serde.go @@ -2404,6 +2404,7 @@ func (c CreationData) MarshalJSON() ([]byte, error) { populate(objectMap, "imageReference", c.ImageReference) populate(objectMap, "logicalSectorSize", c.LogicalSectorSize) populate(objectMap, "performancePlus", c.PerformancePlus) + populate(objectMap, "provisionedBandwidthCopySpeed", c.ProvisionedBandwidthCopySpeed) populate(objectMap, "securityDataUri", c.SecurityDataURI) populate(objectMap, "sourceResourceId", c.SourceResourceID) populate(objectMap, "sourceUri", c.SourceURI) @@ -2440,6 +2441,9 @@ func (c *CreationData) UnmarshalJSON(data []byte) error { case "performancePlus": err = unpopulate(val, "PerformancePlus", &c.PerformancePlus) delete(rawMsg, key) + case "provisionedBandwidthCopySpeed": + err = unpopulate(val, "ProvisionedBandwidthCopySpeed", &c.ProvisionedBandwidthCopySpeed) + delete(rawMsg, key) case "securityDataUri": err = unpopulate(val, "SecurityDataURI", &c.SecurityDataURI) delete(rawMsg, key) @@ -5488,6 +5492,7 @@ func (g GalleryArtifactVersionFullSource) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) populate(objectMap, "communityGalleryImageId", g.CommunityGalleryImageID) populate(objectMap, "id", g.ID) + populate(objectMap, "virtualMachineId", g.VirtualMachineID) return json.Marshal(objectMap) } @@ -5506,6 +5511,9 @@ func (g *GalleryArtifactVersionFullSource) UnmarshalJSON(data []byte) error { case "id": err = unpopulate(val, "ID", &g.ID) delete(rawMsg, key) + case "virtualMachineId": + err = unpopulate(val, "VirtualMachineID", &g.VirtualMachineID) + delete(rawMsg, key) } if err != nil { return fmt.Errorf("unmarshalling type %T: %v", g, err) @@ -19765,7 +19773,7 @@ func populateAny(m map[string]any, k string, v any) { } func unpopulate(data json.RawMessage, fn string, v any) error { - if data == nil { + if data == nil || string(data) == "null" { return nil } if err := json.Unmarshal(data, v); err != nil { diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/proximityplacementgroups_client.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/proximityplacementgroups_client.go index 947d8373647c..c35ed6b0ca00 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/proximityplacementgroups_client.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/proximityplacementgroups_client.go @@ -218,10 +218,10 @@ func (client *ProximityPlacementGroupsClient) getCreateRequest(ctx context.Conte return nil, err } reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-09-01") if options != nil && options.IncludeColocationStatus != nil { reqQP.Set("includeColocationStatus", *options.IncludeColocationStatus) } - reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/resourceskus_client.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/resourceskus_client.go index e0dd1afa4359..f716c1ccf077 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/resourceskus_client.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/resourceskus_client.go @@ -83,10 +83,10 @@ func (client *ResourceSKUsClient) listCreateRequest(ctx context.Context, options return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2021-07-01") if options != nil && options.Filter != nil { reqQP.Set("$filter", *options.Filter) } + reqQP.Set("api-version", "2021-07-01") if options != nil && options.IncludeExtendedLocations != nil { reqQP.Set("includeExtendedLocations", *options.IncludeExtendedLocations) } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/response_types.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/responses.go similarity index 100% rename from cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/response_types.go rename to cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/responses.go diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/sharedgalleries_client.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/sharedgalleries_client.go index 4bf2d09608d5..ee8172bae84c 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/sharedgalleries_client.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/sharedgalleries_client.go @@ -47,7 +47,7 @@ func NewSharedGalleriesClient(subscriptionID string, credential azcore.TokenCred // Get - Get a shared gallery by subscription id or tenant id. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - location - Resource location. // - galleryUniqueName - The unique name of the Shared Gallery. // - options - SharedGalleriesClientGetOptions contains the optional parameters for the SharedGalleriesClient.Get method. @@ -93,7 +93,7 @@ func (client *SharedGalleriesClient) getCreateRequest(ctx context.Context, locat return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -110,7 +110,7 @@ func (client *SharedGalleriesClient) getHandleResponse(resp *http.Response) (Sha // NewListPager - List shared galleries by subscription id or tenant id. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - location - Resource location. // - options - SharedGalleriesClientListOptions contains the optional parameters for the SharedGalleriesClient.NewListPager // method. @@ -153,7 +153,7 @@ func (client *SharedGalleriesClient) listCreateRequest(ctx context.Context, loca return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") if options != nil && options.SharedTo != nil { reqQP.Set("sharedTo", string(*options.SharedTo)) } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/sharedgalleryimages_client.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/sharedgalleryimages_client.go index ad549b1406e1..8f0ef79cfe0e 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/sharedgalleryimages_client.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/sharedgalleryimages_client.go @@ -47,7 +47,7 @@ func NewSharedGalleryImagesClient(subscriptionID string, credential azcore.Token // Get - Get a shared gallery image by subscription id or tenant id. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - location - Resource location. // - galleryUniqueName - The unique name of the Shared Gallery. // - galleryImageName - The name of the Shared Gallery Image Definition from which the Image Versions are to be listed. @@ -98,7 +98,7 @@ func (client *SharedGalleryImagesClient) getCreateRequest(ctx context.Context, l return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -115,7 +115,7 @@ func (client *SharedGalleryImagesClient) getHandleResponse(resp *http.Response) // NewListPager - List shared gallery images by subscription id or tenant id. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - location - Resource location. // - galleryUniqueName - The unique name of the Shared Gallery. // - options - SharedGalleryImagesClientListOptions contains the optional parameters for the SharedGalleryImagesClient.NewListPager @@ -163,7 +163,7 @@ func (client *SharedGalleryImagesClient) listCreateRequest(ctx context.Context, return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") if options != nil && options.SharedTo != nil { reqQP.Set("sharedTo", string(*options.SharedTo)) } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/sharedgalleryimageversions_client.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/sharedgalleryimageversions_client.go index 5c64dc844214..729b0e3c5d54 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/sharedgalleryimageversions_client.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/sharedgalleryimageversions_client.go @@ -47,7 +47,7 @@ func NewSharedGalleryImageVersionsClient(subscriptionID string, credential azcor // Get - Get a shared gallery image version by subscription id or tenant id. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - location - Resource location. // - galleryUniqueName - The unique name of the Shared Gallery. // - galleryImageName - The name of the Shared Gallery Image Definition from which the Image Versions are to be listed. @@ -106,7 +106,7 @@ func (client *SharedGalleryImageVersionsClient) getCreateRequest(ctx context.Con return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -123,7 +123,7 @@ func (client *SharedGalleryImageVersionsClient) getHandleResponse(resp *http.Res // NewListPager - List shared gallery image versions by subscription id or tenant id. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - location - Resource location. // - galleryUniqueName - The unique name of the Shared Gallery. // - galleryImageName - The name of the Shared Gallery Image Definition from which the Image Versions are to be listed. @@ -176,7 +176,7 @@ func (client *SharedGalleryImageVersionsClient) listCreateRequest(ctx context.Co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") if options != nil && options.SharedTo != nil { reqQP.Set("sharedTo", string(*options.SharedTo)) } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/snapshots_client.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/snapshots_client.go index a0d26361362d..8c73f7709e6c 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/snapshots_client.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/snapshots_client.go @@ -47,7 +47,7 @@ func NewSnapshotsClient(subscriptionID string, credential azcore.TokenCredential // BeginCreateOrUpdate - Creates or updates a snapshot. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - snapshotName - The name of the snapshot that is being created. The name can't be changed after the snapshot is created. // Supported characters for the name are a-z, A-Z, 0-9, _ and -. The max name length is 80 @@ -75,7 +75,7 @@ func (client *SnapshotsClient) BeginCreateOrUpdate(ctx context.Context, resource // CreateOrUpdate - Creates or updates a snapshot. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 func (client *SnapshotsClient) createOrUpdate(ctx context.Context, resourceGroupName string, snapshotName string, snapshot Snapshot, options *SnapshotsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error const operationName = "SnapshotsClient.BeginCreateOrUpdate" @@ -117,7 +117,7 @@ func (client *SnapshotsClient) createOrUpdateCreateRequest(ctx context.Context, return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, snapshot); err != nil { @@ -129,7 +129,7 @@ func (client *SnapshotsClient) createOrUpdateCreateRequest(ctx context.Context, // BeginDelete - Deletes a snapshot. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - snapshotName - The name of the snapshot that is being created. The name can't be changed after the snapshot is created. // Supported characters for the name are a-z, A-Z, 0-9, _ and -. The max name length is 80 @@ -155,7 +155,7 @@ func (client *SnapshotsClient) BeginDelete(ctx context.Context, resourceGroupNam // Delete - Deletes a snapshot. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 func (client *SnapshotsClient) deleteOperation(ctx context.Context, resourceGroupName string, snapshotName string, options *SnapshotsClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "SnapshotsClient.BeginDelete" @@ -197,7 +197,7 @@ func (client *SnapshotsClient) deleteCreateRequest(ctx context.Context, resource return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() return req, nil } @@ -205,7 +205,7 @@ func (client *SnapshotsClient) deleteCreateRequest(ctx context.Context, resource // Get - Gets information about a snapshot. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - snapshotName - The name of the snapshot that is being created. The name can't be changed after the snapshot is created. // Supported characters for the name are a-z, A-Z, 0-9, _ and -. The max name length is 80 @@ -253,7 +253,7 @@ func (client *SnapshotsClient) getCreateRequest(ctx context.Context, resourceGro return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -271,7 +271,7 @@ func (client *SnapshotsClient) getHandleResponse(resp *http.Response) (Snapshots // BeginGrantAccess - Grants access to a snapshot. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - snapshotName - The name of the snapshot that is being created. The name can't be changed after the snapshot is created. // Supported characters for the name are a-z, A-Z, 0-9, _ and -. The max name length is 80 @@ -300,7 +300,7 @@ func (client *SnapshotsClient) BeginGrantAccess(ctx context.Context, resourceGro // GrantAccess - Grants access to a snapshot. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 func (client *SnapshotsClient) grantAccess(ctx context.Context, resourceGroupName string, snapshotName string, grantAccessData GrantAccessData, options *SnapshotsClientBeginGrantAccessOptions) (*http.Response, error) { var err error const operationName = "SnapshotsClient.BeginGrantAccess" @@ -342,7 +342,7 @@ func (client *SnapshotsClient) grantAccessCreateRequest(ctx context.Context, res return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, grantAccessData); err != nil { @@ -353,7 +353,7 @@ func (client *SnapshotsClient) grantAccessCreateRequest(ctx context.Context, res // NewListPager - Lists snapshots under a subscription. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - options - SnapshotsClientListOptions contains the optional parameters for the SnapshotsClient.NewListPager method. func (client *SnapshotsClient) NewListPager(options *SnapshotsClientListOptions) *runtime.Pager[SnapshotsClientListResponse] { return runtime.NewPager(runtime.PagingHandler[SnapshotsClientListResponse]{ @@ -390,7 +390,7 @@ func (client *SnapshotsClient) listCreateRequest(ctx context.Context, options *S return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -407,7 +407,7 @@ func (client *SnapshotsClient) listHandleResponse(resp *http.Response) (Snapshot // NewListByResourceGroupPager - Lists snapshots under a resource group. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - options - SnapshotsClientListByResourceGroupOptions contains the optional parameters for the SnapshotsClient.NewListByResourceGroupPager // method. @@ -450,7 +450,7 @@ func (client *SnapshotsClient) listByResourceGroupCreateRequest(ctx context.Cont return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -468,7 +468,7 @@ func (client *SnapshotsClient) listByResourceGroupHandleResponse(resp *http.Resp // BeginRevokeAccess - Revokes access to a snapshot. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - snapshotName - The name of the snapshot that is being created. The name can't be changed after the snapshot is created. // Supported characters for the name are a-z, A-Z, 0-9, _ and -. The max name length is 80 @@ -496,7 +496,7 @@ func (client *SnapshotsClient) BeginRevokeAccess(ctx context.Context, resourceGr // RevokeAccess - Revokes access to a snapshot. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 func (client *SnapshotsClient) revokeAccess(ctx context.Context, resourceGroupName string, snapshotName string, options *SnapshotsClientBeginRevokeAccessOptions) (*http.Response, error) { var err error const operationName = "SnapshotsClient.BeginRevokeAccess" @@ -538,7 +538,7 @@ func (client *SnapshotsClient) revokeAccessCreateRequest(ctx context.Context, re return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() return req, nil } @@ -546,7 +546,7 @@ func (client *SnapshotsClient) revokeAccessCreateRequest(ctx context.Context, re // BeginUpdate - Updates (patches) a snapshot. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - snapshotName - The name of the snapshot that is being created. The name can't be changed after the snapshot is created. // Supported characters for the name are a-z, A-Z, 0-9, _ and -. The max name length is 80 @@ -573,7 +573,7 @@ func (client *SnapshotsClient) BeginUpdate(ctx context.Context, resourceGroupNam // Update - Updates (patches) a snapshot. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 func (client *SnapshotsClient) update(ctx context.Context, resourceGroupName string, snapshotName string, snapshot SnapshotUpdate, options *SnapshotsClientBeginUpdateOptions) (*http.Response, error) { var err error const operationName = "SnapshotsClient.BeginUpdate" @@ -615,7 +615,7 @@ func (client *SnapshotsClient) updateCreateRequest(ctx context.Context, resource return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, snapshot); err != nil { diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/time_rfc3339.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/time_rfc3339.go index 4f75ccd6f1d7..ae4e62dd4271 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/time_rfc3339.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/time_rfc3339.go @@ -19,12 +19,16 @@ import ( ) // Azure reports time in UTC but it doesn't include the 'Z' time zone suffix in some cases. -var tzOffsetRegex = regexp.MustCompile(`(Z|z|\+|-)(\d+:\d+)*"*$`) +var tzOffsetRegex = regexp.MustCompile(`(?:Z|z|\+|-)(?:\d+:\d+)*"*$`) const ( - utcDateTimeJSON = `"2006-01-02T15:04:05.999999999"` - utcDateTime = "2006-01-02T15:04:05.999999999" - dateTimeJSON = `"` + time.RFC3339Nano + `"` + utcDateTime = "2006-01-02T15:04:05.999999999" + utcDateTimeJSON = `"` + utcDateTime + `"` + utcDateTimeNoT = "2006-01-02 15:04:05.999999999" + utcDateTimeJSONNoT = `"` + utcDateTimeNoT + `"` + dateTimeNoT = `2006-01-02 15:04:05.999999999Z07:00` + dateTimeJSON = `"` + time.RFC3339Nano + `"` + dateTimeJSONNoT = `"` + dateTimeNoT + `"` ) type dateTimeRFC3339 time.Time @@ -40,17 +44,33 @@ func (t dateTimeRFC3339) MarshalText() ([]byte, error) { } func (t *dateTimeRFC3339) UnmarshalJSON(data []byte) error { - layout := utcDateTimeJSON - if tzOffsetRegex.Match(data) { + tzOffset := tzOffsetRegex.Match(data) + hasT := strings.Contains(string(data), "T") || strings.Contains(string(data), "t") + var layout string + if tzOffset && hasT { layout = dateTimeJSON + } else if tzOffset { + layout = dateTimeJSONNoT + } else if hasT { + layout = utcDateTimeJSON + } else { + layout = utcDateTimeJSONNoT } return t.Parse(layout, string(data)) } func (t *dateTimeRFC3339) UnmarshalText(data []byte) error { - layout := utcDateTime - if tzOffsetRegex.Match(data) { + tzOffset := tzOffsetRegex.Match(data) + hasT := strings.Contains(string(data), "T") || strings.Contains(string(data), "t") + var layout string + if tzOffset && hasT { layout = time.RFC3339Nano + } else if tzOffset { + layout = dateTimeNoT + } else if hasT { + layout = utcDateTime + } else { + layout = utcDateTimeNoT } return t.Parse(layout, string(data)) } @@ -61,6 +81,10 @@ func (t *dateTimeRFC3339) Parse(layout, value string) error { return err } +func (t dateTimeRFC3339) String() string { + return time.Time(t).Format(time.RFC3339Nano) +} + func populateDateTimeRFC3339(m map[string]any, k string, t *time.Time) { if t == nil { return @@ -74,7 +98,7 @@ func populateDateTimeRFC3339(m map[string]any, k string, t *time.Time) { } func unpopulateDateTimeRFC3339(data json.RawMessage, fn string, t **time.Time) error { - if data == nil || strings.EqualFold(string(data), "null") { + if data == nil || string(data) == "null" { return nil } var aux dateTimeRFC3339 diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineextensionimages_client.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineextensionimages_client.go index a8e90c4b13d2..e8ff65ad0796 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineextensionimages_client.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineextensionimages_client.go @@ -237,12 +237,12 @@ func (client *VirtualMachineExtensionImagesClient) listVersionsCreateRequest(ctx if options != nil && options.Filter != nil { reqQP.Set("$filter", *options.Filter) } - if options != nil && options.Top != nil { - reqQP.Set("$top", strconv.FormatInt(int64(*options.Top), 10)) - } if options != nil && options.Orderby != nil { reqQP.Set("$orderby", *options.Orderby) } + if options != nil && options.Top != nil { + reqQP.Set("$top", strconv.FormatInt(int64(*options.Top), 10)) + } reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineimages_client.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineimages_client.go index 1c5ab27e5ea1..2e0e80d8b858 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineimages_client.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineimages_client.go @@ -188,12 +188,12 @@ func (client *VirtualMachineImagesClient) listCreateRequest(ctx context.Context, if options != nil && options.Expand != nil { reqQP.Set("$expand", *options.Expand) } - if options != nil && options.Top != nil { - reqQP.Set("$top", strconv.FormatInt(int64(*options.Top), 10)) - } if options != nil && options.Orderby != nil { reqQP.Set("$orderby", *options.Orderby) } + if options != nil && options.Top != nil { + reqQP.Set("$top", strconv.FormatInt(int64(*options.Top), 10)) + } reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineimagesedgezone_client.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineimagesedgezone_client.go index 4151b10d3c8e..756fe975e576 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineimagesedgezone_client.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineimagesedgezone_client.go @@ -198,12 +198,12 @@ func (client *VirtualMachineImagesEdgeZoneClient) listCreateRequest(ctx context. if options != nil && options.Expand != nil { reqQP.Set("$expand", *options.Expand) } - if options != nil && options.Top != nil { - reqQP.Set("$top", strconv.FormatInt(int64(*options.Top), 10)) - } if options != nil && options.Orderby != nil { reqQP.Set("$orderby", *options.Orderby) } + if options != nil && options.Top != nil { + reqQP.Set("$top", strconv.FormatInt(int64(*options.Top), 10)) + } reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachines_client.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachines_client.go index b665b49f529b..a816e9a9db54 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachines_client.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachines_client.go @@ -439,13 +439,13 @@ func (client *VirtualMachinesClient) createOrUpdateCreateRequest(ctx context.Con reqQP := req.Raw().URL.Query() reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} if options != nil && options.IfMatch != nil { req.Raw().Header["If-Match"] = []string{*options.IfMatch} } if options != nil && options.IfNoneMatch != nil { req.Raw().Header["If-None-Match"] = []string{*options.IfNoneMatch} } - req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { return nil, err } @@ -524,10 +524,10 @@ func (client *VirtualMachinesClient) deallocateCreateRequest(ctx context.Context return nil, err } reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-09-01") if options != nil && options.Hibernate != nil { reqQP.Set("hibernate", strconv.FormatBool(*options.Hibernate)) } - reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -603,10 +603,10 @@ func (client *VirtualMachinesClient) deleteCreateRequest(ctx context.Context, re return nil, err } reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-09-01") if options != nil && options.ForceDeletion != nil { reqQP.Set("forceDeletion", strconv.FormatBool(*options.ForceDeletion)) } - reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -930,12 +930,12 @@ func (client *VirtualMachinesClient) listCreateRequest(ctx context.Context, reso return nil, err } reqQP := req.Raw().URL.Query() - if options != nil && options.Filter != nil { - reqQP.Set("$filter", *options.Filter) - } if options != nil && options.Expand != nil { reqQP.Set("$expand", string(*options.Expand)) } + if options != nil && options.Filter != nil { + reqQP.Set("$filter", *options.Filter) + } reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} @@ -992,15 +992,15 @@ func (client *VirtualMachinesClient) listAllCreateRequest(ctx context.Context, o return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") - if options != nil && options.StatusOnly != nil { - reqQP.Set("statusOnly", *options.StatusOnly) + if options != nil && options.Expand != nil { + reqQP.Set("$expand", string(*options.Expand)) } if options != nil && options.Filter != nil { reqQP.Set("$filter", *options.Filter) } - if options != nil && options.Expand != nil { - reqQP.Set("$expand", string(*options.Expand)) + reqQP.Set("api-version", "2023-09-01") + if options != nil && options.StatusOnly != nil { + reqQP.Set("statusOnly", *options.StatusOnly) } req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} @@ -1290,10 +1290,10 @@ func (client *VirtualMachinesClient) powerOffCreateRequest(ctx context.Context, return nil, err } reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-09-01") if options != nil && options.SkipShutdown != nil { reqQP.Set("skipShutdown", strconv.FormatBool(*options.SkipShutdown)) } - reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -1667,10 +1667,10 @@ func (client *VirtualMachinesClient) retrieveBootDiagnosticsDataCreateRequest(ct return nil, err } reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-09-01") if options != nil && options.SasURIExpirationTimeInMinutes != nil { reqQP.Set("sasUriExpirationTimeInMinutes", strconv.FormatInt(int64(*options.SasURIExpirationTimeInMinutes), 10)) } - reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -1970,13 +1970,13 @@ func (client *VirtualMachinesClient) updateCreateRequest(ctx context.Context, re reqQP := req.Raw().URL.Query() reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} if options != nil && options.IfMatch != nil { req.Raw().Header["If-Match"] = []string{*options.IfMatch} } if options != nil && options.IfNoneMatch != nil { req.Raw().Header["If-None-Match"] = []string{*options.IfNoneMatch} } - req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { return nil, err } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesets_client.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesets_client.go index eb6506c1599d..81f13d5049f8 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesets_client.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesets_client.go @@ -261,13 +261,13 @@ func (client *VirtualMachineScaleSetsClient) createOrUpdateCreateRequest(ctx con reqQP := req.Raw().URL.Query() reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} if options != nil && options.IfMatch != nil { req.Raw().Header["If-Match"] = []string{*options.IfMatch} } if options != nil && options.IfNoneMatch != nil { req.Raw().Header["If-None-Match"] = []string{*options.IfNoneMatch} } - req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { return nil, err } @@ -348,10 +348,10 @@ func (client *VirtualMachineScaleSetsClient) deallocateCreateRequest(ctx context return nil, err } reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-09-01") if options != nil && options.Hibernate != nil { reqQP.Set("hibernate", strconv.FormatBool(*options.Hibernate)) } - reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if options != nil && options.VMInstanceIDs != nil { @@ -433,10 +433,10 @@ func (client *VirtualMachineScaleSetsClient) deleteCreateRequest(ctx context.Con return nil, err } reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-09-01") if options != nil && options.ForceDeletion != nil { reqQP.Set("forceDeletion", strconv.FormatBool(*options.ForceDeletion)) } - reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -513,10 +513,10 @@ func (client *VirtualMachineScaleSetsClient) deleteInstancesCreateRequest(ctx co return nil, err } reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-09-01") if options != nil && options.ForceDeletion != nil { reqQP.Set("forceDeletion", strconv.FormatBool(*options.ForceDeletion)) } - reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, vmInstanceIDs); err != nil { @@ -578,13 +578,13 @@ func (client *VirtualMachineScaleSetsClient) forceRecoveryServiceFabricPlatformU } reqQP := req.Raw().URL.Query() reqQP.Set("api-version", "2023-09-01") + if options != nil && options.PlacementGroupID != nil { + reqQP.Set("placementGroupId", *options.PlacementGroupID) + } reqQP.Set("platformUpdateDomain", strconv.FormatInt(int64(platformUpdateDomain), 10)) if options != nil && options.Zone != nil { reqQP.Set("zone", *options.Zone) } - if options != nil && options.PlacementGroupID != nil { - reqQP.Set("placementGroupId", *options.PlacementGroupID) - } req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -649,10 +649,10 @@ func (client *VirtualMachineScaleSetsClient) getCreateRequest(ctx context.Contex return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") if options != nil && options.Expand != nil { reqQP.Set("$expand", string(*options.Expand)) } + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -1200,10 +1200,10 @@ func (client *VirtualMachineScaleSetsClient) powerOffCreateRequest(ctx context.C return nil, err } reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-09-01") if options != nil && options.SkipShutdown != nil { reqQP.Set("skipShutdown", strconv.FormatBool(*options.SkipShutdown)) } - reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if options != nil && options.VMInstanceIDs != nil { @@ -1863,13 +1863,13 @@ func (client *VirtualMachineScaleSetsClient) updateCreateRequest(ctx context.Con reqQP := req.Raw().URL.Query() reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} if options != nil && options.IfMatch != nil { req.Raw().Header["If-Match"] = []string{*options.IfMatch} } if options != nil && options.IfNoneMatch != nil { req.Raw().Header["If-None-Match"] = []string{*options.IfNoneMatch} } - req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { return nil, err } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesetvms_client.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesetvms_client.go index e1da5fc116e4..0bd6fc83aee5 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesetvms_client.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesetvms_client.go @@ -372,10 +372,10 @@ func (client *VirtualMachineScaleSetVMsClient) deleteCreateRequest(ctx context.C return nil, err } reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-09-01") if options != nil && options.ForceDeletion != nil { reqQP.Set("forceDeletion", strconv.FormatBool(*options.ForceDeletion)) } - reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -574,15 +574,15 @@ func (client *VirtualMachineScaleSetVMsClient) listCreateRequest(ctx context.Con return nil, err } reqQP := req.Raw().URL.Query() + if options != nil && options.Expand != nil { + reqQP.Set("$expand", *options.Expand) + } if options != nil && options.Filter != nil { reqQP.Set("$filter", *options.Filter) } if options != nil && options.Select != nil { reqQP.Set("$select", *options.Select) } - if options != nil && options.Expand != nil { - reqQP.Set("$expand", *options.Expand) - } reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} @@ -758,10 +758,10 @@ func (client *VirtualMachineScaleSetVMsClient) powerOffCreateRequest(ctx context return nil, err } reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-09-01") if options != nil && options.SkipShutdown != nil { reqQP.Set("skipShutdown", strconv.FormatBool(*options.SkipShutdown)) } - reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -1157,10 +1157,10 @@ func (client *VirtualMachineScaleSetVMsClient) retrieveBootDiagnosticsDataCreate return nil, err } reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-09-01") if options != nil && options.SasURIExpirationTimeInMinutes != nil { reqQP.Set("sasUriExpirationTimeInMinutes", strconv.FormatInt(int64(*options.SasURIExpirationTimeInMinutes), 10)) } - reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -1480,13 +1480,13 @@ func (client *VirtualMachineScaleSetVMsClient) updateCreateRequest(ctx context.C reqQP := req.Raw().URL.Query() reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} if options != nil && options.IfMatch != nil { req.Raw().Header["If-Match"] = []string{*options.IfMatch} } if options != nil && options.IfNoneMatch != nil { req.Raw().Header["If-None-Match"] = []string{*options.IfNoneMatch} } - req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { return nil, err } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/CHANGELOG.md b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/CHANGELOG.md index 8861dcfc4d1d..4db53e72116b 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/CHANGELOG.md +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/CHANGELOG.md @@ -1,5 +1,118 @@ # Release History +## 4.9.0-beta.1 (2024-03-22) +### Features Added + +- New value `AgentPoolTypeVirtualMachines` added to enum type `AgentPoolType` +- New value `NetworkPolicyNone` added to enum type `NetworkPolicy` +- New value `NodeOSUpgradeChannelSecurityPatch` added to enum type `NodeOSUpgradeChannel` +- New value `OSSKUMariner`, `OSSKUWindowsAnnual` added to enum type `OSSKU` +- New value `PublicNetworkAccessSecuredByPerimeter` added to enum type `PublicNetworkAccess` +- New value `SnapshotTypeManagedCluster` added to enum type `SnapshotType` +- New value `WorkloadRuntimeKataMshvVMIsolation` added to enum type `WorkloadRuntime` +- New enum type `AddonAutoscaling` with values `AddonAutoscalingDisabled`, `AddonAutoscalingEnabled` +- New enum type `AgentPoolSSHAccess` with values `AgentPoolSSHAccessDisabled`, `AgentPoolSSHAccessLocalUser` +- New enum type `ArtifactSource` with values `ArtifactSourceCache`, `ArtifactSourceDirect` +- New enum type `GuardrailsSupport` with values `GuardrailsSupportPreview`, `GuardrailsSupportStable` +- New enum type `IpvsScheduler` with values `IpvsSchedulerLeastConnection`, `IpvsSchedulerRoundRobin` +- New enum type `Level` with values `LevelEnforcement`, `LevelOff`, `LevelWarning` +- New enum type `Mode` with values `ModeIPTABLES`, `ModeIPVS` +- New enum type `NodeProvisioningMode` with values `NodeProvisioningModeAuto`, `NodeProvisioningModeManual` +- New enum type `PodIPAllocationMode` with values `PodIPAllocationModeDynamicIndividual`, `PodIPAllocationModeStaticBlock` +- New enum type `RestrictionLevel` with values `RestrictionLevelReadOnly`, `RestrictionLevelUnrestricted` +- New enum type `SafeguardsSupport` with values `SafeguardsSupportPreview`, `SafeguardsSupportStable` +- New function `*AgentPoolsClient.BeginDeleteMachines(context.Context, string, string, string, AgentPoolDeleteMachinesParameter, *AgentPoolsClientBeginDeleteMachinesOptions) (*runtime.Poller[AgentPoolsClientDeleteMachinesResponse], error)` +- New function `*ClientFactory.NewMachinesClient() *MachinesClient` +- New function `*ClientFactory.NewManagedClusterSnapshotsClient() *ManagedClusterSnapshotsClient` +- New function `*ClientFactory.NewOperationStatusResultClient() *OperationStatusResultClient` +- New function `NewMachinesClient(string, azcore.TokenCredential, *arm.ClientOptions) (*MachinesClient, error)` +- New function `*MachinesClient.Get(context.Context, string, string, string, string, *MachinesClientGetOptions) (MachinesClientGetResponse, error)` +- New function `*MachinesClient.NewListPager(string, string, string, *MachinesClientListOptions) *runtime.Pager[MachinesClientListResponse]` +- New function `NewManagedClusterSnapshotsClient(string, azcore.TokenCredential, *arm.ClientOptions) (*ManagedClusterSnapshotsClient, error)` +- New function `*ManagedClusterSnapshotsClient.CreateOrUpdate(context.Context, string, string, ManagedClusterSnapshot, *ManagedClusterSnapshotsClientCreateOrUpdateOptions) (ManagedClusterSnapshotsClientCreateOrUpdateResponse, error)` +- New function `*ManagedClusterSnapshotsClient.Delete(context.Context, string, string, *ManagedClusterSnapshotsClientDeleteOptions) (ManagedClusterSnapshotsClientDeleteResponse, error)` +- New function `*ManagedClusterSnapshotsClient.Get(context.Context, string, string, *ManagedClusterSnapshotsClientGetOptions) (ManagedClusterSnapshotsClientGetResponse, error)` +- New function `*ManagedClusterSnapshotsClient.NewListByResourceGroupPager(string, *ManagedClusterSnapshotsClientListByResourceGroupOptions) *runtime.Pager[ManagedClusterSnapshotsClientListByResourceGroupResponse]` +- New function `*ManagedClusterSnapshotsClient.NewListPager(*ManagedClusterSnapshotsClientListOptions) *runtime.Pager[ManagedClusterSnapshotsClientListResponse]` +- New function `*ManagedClusterSnapshotsClient.UpdateTags(context.Context, string, string, TagsObject, *ManagedClusterSnapshotsClientUpdateTagsOptions) (ManagedClusterSnapshotsClientUpdateTagsResponse, error)` +- New function `*ManagedClustersClient.GetGuardrailsVersions(context.Context, string, string, *ManagedClustersClientGetGuardrailsVersionsOptions) (ManagedClustersClientGetGuardrailsVersionsResponse, error)` +- New function `*ManagedClustersClient.GetSafeguardsVersions(context.Context, string, string, *ManagedClustersClientGetSafeguardsVersionsOptions) (ManagedClustersClientGetSafeguardsVersionsResponse, error)` +- New function `*ManagedClustersClient.NewListGuardrailsVersionsPager(string, *ManagedClustersClientListGuardrailsVersionsOptions) *runtime.Pager[ManagedClustersClientListGuardrailsVersionsResponse]` +- New function `*ManagedClustersClient.NewListSafeguardsVersionsPager(string, *ManagedClustersClientListSafeguardsVersionsOptions) *runtime.Pager[ManagedClustersClientListSafeguardsVersionsResponse]` +- New function `NewOperationStatusResultClient(string, azcore.TokenCredential, *arm.ClientOptions) (*OperationStatusResultClient, error)` +- New function `*OperationStatusResultClient.Get(context.Context, string, string, string, *OperationStatusResultClientGetOptions) (OperationStatusResultClientGetResponse, error)` +- New function `*OperationStatusResultClient.GetByAgentPool(context.Context, string, string, string, string, *OperationStatusResultClientGetByAgentPoolOptions) (OperationStatusResultClientGetByAgentPoolResponse, error)` +- New function `*OperationStatusResultClient.NewListPager(string, string, *OperationStatusResultClientListOptions) *runtime.Pager[OperationStatusResultClientListResponse]` +- New struct `AgentPoolArtifactStreamingProfile` +- New struct `AgentPoolDeleteMachinesParameter` +- New struct `AgentPoolGPUProfile` +- New struct `AgentPoolSecurityProfile` +- New struct `AgentPoolWindowsProfile` +- New struct `ErrorAdditionalInfo` +- New struct `ErrorDetail` +- New struct `GuardrailsAvailableVersion` +- New struct `GuardrailsAvailableVersionsList` +- New struct `GuardrailsAvailableVersionsProperties` +- New struct `Machine` +- New struct `MachineIPAddress` +- New struct `MachineListResult` +- New struct `MachineNetworkProperties` +- New struct `MachineProperties` +- New struct `ManagedClusterAIToolchainOperatorProfile` +- New struct `ManagedClusterAzureMonitorProfileAppMonitoring` +- New struct `ManagedClusterAzureMonitorProfileAppMonitoringOpenTelemetryMetrics` +- New struct `ManagedClusterAzureMonitorProfileContainerInsights` +- New struct `ManagedClusterAzureMonitorProfileLogs` +- New struct `ManagedClusterAzureMonitorProfileWindowsHostLogs` +- New struct `ManagedClusterBootstrapProfile` +- New struct `ManagedClusterCostAnalysis` +- New struct `ManagedClusterMetricsProfile` +- New struct `ManagedClusterNodeProvisioningProfile` +- New struct `ManagedClusterNodeResourceGroupProfile` +- New struct `ManagedClusterPropertiesForSnapshot` +- New struct `ManagedClusterSecurityProfileImageIntegrity` +- New struct `ManagedClusterSecurityProfileNodeRestriction` +- New struct `ManagedClusterSnapshot` +- New struct `ManagedClusterSnapshotListResult` +- New struct `ManagedClusterSnapshotProperties` +- New struct `ManualScaleProfile` +- New struct `NetworkMonitoring` +- New struct `NetworkProfileForSnapshot` +- New struct `NetworkProfileKubeProxyConfig` +- New struct `NetworkProfileKubeProxyConfigIpvsConfig` +- New struct `OperationStatusResult` +- New struct `OperationStatusResultList` +- New struct `SafeguardsAvailableVersion` +- New struct `SafeguardsAvailableVersionsList` +- New struct `SafeguardsAvailableVersionsProperties` +- New struct `SafeguardsProfile` +- New struct `ScaleProfile` +- New struct `VirtualMachineNodes` +- New struct `VirtualMachinesProfile` +- New field `IgnorePodDisruptionBudget` in struct `AgentPoolsClientBeginDeleteOptions` +- New field `EnableVnetIntegration`, `SubnetID` in struct `ManagedClusterAPIServerAccessProfile` +- New field `ArtifactStreamingProfile`, `EnableCustomCATrust`, `GpuProfile`, `MessageOfTheDay`, `NodeInitializationTaints`, `PodIPAllocationMode`, `SecurityProfile`, `VirtualMachineNodesStatus`, `VirtualMachinesProfile`, `WindowsProfile` in struct `ManagedClusterAgentPoolProfile` +- New field `ArtifactStreamingProfile`, `EnableCustomCATrust`, `GpuProfile`, `MessageOfTheDay`, `NodeInitializationTaints`, `PodIPAllocationMode`, `SecurityProfile`, `VirtualMachineNodesStatus`, `VirtualMachinesProfile`, `WindowsProfile` in struct `ManagedClusterAgentPoolProfileProperties` +- New field `Logs` in struct `ManagedClusterAzureMonitorProfile` +- New field `AppMonitoringOpenTelemetryMetrics` in struct `ManagedClusterAzureMonitorProfileMetrics` +- New field `EffectiveNoProxy` in struct `ManagedClusterHTTPProxyConfig` +- New field `AiToolchainOperatorProfile`, `BootstrapProfile`, `CreationData`, `EnableNamespaceResources`, `MetricsProfile`, `NodeProvisioningProfile`, `NodeResourceGroupProfile`, `SafeguardsProfile` in struct `ManagedClusterProperties` +- New field `DaemonsetEvictionForEmptyNodes`, `DaemonsetEvictionForOccupiedNodes`, `IgnoreDaemonsetsUtilization` in struct `ManagedClusterPropertiesAutoScalerProfile` +- New field `CustomCATrustCertificates`, `ImageIntegrity`, `NodeRestriction` in struct `ManagedClusterSecurityProfile` +- New field `Version` in struct `ManagedClusterStorageProfileDiskCSIDriver` +- New field `AddonAutoscaling` in struct `ManagedClusterWorkloadAutoScalerProfileVerticalPodAutoscaler` +- New field `IgnorePodDisruptionBudget` in struct `ManagedClustersClientBeginDeleteOptions` +- New field `KubeProxyConfig`, `Monitoring` in struct `NetworkProfile` + + +## 4.8.0 (2024-03-22) +### Features Added + +- New struct `ManagedClusterIngressProfile` +- New struct `ManagedClusterIngressProfileWebAppRouting` +- New field `IngressProfile` in struct `ManagedClusterProperties` + + ## 4.8.0-beta.1 (2024-02-23) ### Features Added diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/agentpools_client.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/agentpools_client.go index 738657364e80..949c12ec336d 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/agentpools_client.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/agentpools_client.go @@ -49,7 +49,7 @@ func NewAgentPoolsClient(subscriptionID string, credential azcore.TokenCredentia // before cancellation can take place, an error is returned. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - agentPoolName - The name of the agent pool. @@ -78,7 +78,7 @@ func (client *AgentPoolsClient) BeginAbortLatestOperation(ctx context.Context, r // before cancellation can take place, an error is returned. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview func (client *AgentPoolsClient) abortLatestOperation(ctx context.Context, resourceGroupName string, resourceName string, agentPoolName string, options *AgentPoolsClientBeginAbortLatestOperationOptions) (*http.Response, error) { var err error const operationName = "AgentPoolsClient.BeginAbortLatestOperation" @@ -124,7 +124,7 @@ func (client *AgentPoolsClient) abortLatestOperationCreateRequest(ctx context.Co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-02-preview") + reqQP.Set("api-version", "2024-01-02-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -133,7 +133,7 @@ func (client *AgentPoolsClient) abortLatestOperationCreateRequest(ctx context.Co // BeginCreateOrUpdate - Creates or updates an agent pool in the specified managed cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - agentPoolName - The name of the agent pool. @@ -160,7 +160,7 @@ func (client *AgentPoolsClient) BeginCreateOrUpdate(ctx context.Context, resourc // CreateOrUpdate - Creates or updates an agent pool in the specified managed cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview func (client *AgentPoolsClient) createOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, agentPoolName string, parameters AgentPool, options *AgentPoolsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error const operationName = "AgentPoolsClient.BeginCreateOrUpdate" @@ -206,7 +206,7 @@ func (client *AgentPoolsClient) createOrUpdateCreateRequest(ctx context.Context, return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-02-preview") + reqQP.Set("api-version", "2024-01-02-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { @@ -218,7 +218,7 @@ func (client *AgentPoolsClient) createOrUpdateCreateRequest(ctx context.Context, // BeginDelete - Deletes an agent pool in the specified managed cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - agentPoolName - The name of the agent pool. @@ -243,7 +243,7 @@ func (client *AgentPoolsClient) BeginDelete(ctx context.Context, resourceGroupNa // Delete - Deletes an agent pool in the specified managed cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview func (client *AgentPoolsClient) deleteOperation(ctx context.Context, resourceGroupName string, resourceName string, agentPoolName string, options *AgentPoolsClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "AgentPoolsClient.BeginDelete" @@ -289,7 +289,7 @@ func (client *AgentPoolsClient) deleteCreateRequest(ctx context.Context, resourc return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-02-preview") + reqQP.Set("api-version", "2024-01-02-preview") if options != nil && options.IgnorePodDisruptionBudget != nil { reqQP.Set("ignore-pod-disruption-budget", strconv.FormatBool(*options.IgnorePodDisruptionBudget)) } @@ -301,7 +301,7 @@ func (client *AgentPoolsClient) deleteCreateRequest(ctx context.Context, resourc // BeginDeleteMachines - Deletes specific machines in an agent pool. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - agentPoolName - The name of the agent pool. @@ -328,7 +328,7 @@ func (client *AgentPoolsClient) BeginDeleteMachines(ctx context.Context, resourc // DeleteMachines - Deletes specific machines in an agent pool. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview func (client *AgentPoolsClient) deleteMachines(ctx context.Context, resourceGroupName string, resourceName string, agentPoolName string, machines AgentPoolDeleteMachinesParameter, options *AgentPoolsClientBeginDeleteMachinesOptions) (*http.Response, error) { var err error const operationName = "AgentPoolsClient.BeginDeleteMachines" @@ -374,7 +374,7 @@ func (client *AgentPoolsClient) deleteMachinesCreateRequest(ctx context.Context, return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-02-preview") + reqQP.Set("api-version", "2024-01-02-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, machines); err != nil { @@ -386,7 +386,7 @@ func (client *AgentPoolsClient) deleteMachinesCreateRequest(ctx context.Context, // Get - Gets the specified managed cluster agent pool. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - agentPoolName - The name of the agent pool. @@ -437,7 +437,7 @@ func (client *AgentPoolsClient) getCreateRequest(ctx context.Context, resourceGr return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-02-preview") + reqQP.Set("api-version", "2024-01-02-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -456,7 +456,7 @@ func (client *AgentPoolsClient) getHandleResponse(resp *http.Response) (AgentPoo // for more details about the version lifecycle. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - options - AgentPoolsClientGetAvailableAgentPoolVersionsOptions contains the optional parameters for the AgentPoolsClient.GetAvailableAgentPoolVersions @@ -503,7 +503,7 @@ func (client *AgentPoolsClient) getAvailableAgentPoolVersionsCreateRequest(ctx c return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-02-preview") + reqQP.Set("api-version", "2024-01-02-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -521,7 +521,7 @@ func (client *AgentPoolsClient) getAvailableAgentPoolVersionsHandleResponse(resp // GetUpgradeProfile - Gets the upgrade profile for an agent pool. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - agentPoolName - The name of the agent pool. @@ -573,7 +573,7 @@ func (client *AgentPoolsClient) getUpgradeProfileCreateRequest(ctx context.Conte return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-02-preview") + reqQP.Set("api-version", "2024-01-02-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -590,7 +590,7 @@ func (client *AgentPoolsClient) getUpgradeProfileHandleResponse(resp *http.Respo // NewListPager - Gets a list of agent pools in the specified managed cluster. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - options - AgentPoolsClientListOptions contains the optional parameters for the AgentPoolsClient.NewListPager method. @@ -637,7 +637,7 @@ func (client *AgentPoolsClient) listCreateRequest(ctx context.Context, resourceG return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-02-preview") + reqQP.Set("api-version", "2024-01-02-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -657,7 +657,7 @@ func (client *AgentPoolsClient) listHandleResponse(resp *http.Response) (AgentPo // versions, see: https://docs.microsoft.com/azure/aks/node-image-upgrade // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - agentPoolName - The name of the agent pool. @@ -686,7 +686,7 @@ func (client *AgentPoolsClient) BeginUpgradeNodeImageVersion(ctx context.Context // versions, see: https://docs.microsoft.com/azure/aks/node-image-upgrade // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview func (client *AgentPoolsClient) upgradeNodeImageVersion(ctx context.Context, resourceGroupName string, resourceName string, agentPoolName string, options *AgentPoolsClientBeginUpgradeNodeImageVersionOptions) (*http.Response, error) { var err error const operationName = "AgentPoolsClient.BeginUpgradeNodeImageVersion" @@ -732,7 +732,7 @@ func (client *AgentPoolsClient) upgradeNodeImageVersionCreateRequest(ctx context return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-02-preview") + reqQP.Set("api-version", "2024-01-02-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/assets.json b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/assets.json index 5f0930eecb21..20f3b44115ca 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/assets.json +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "go", "TagPrefix": "go/resourcemanager/containerservice/armcontainerservice", - "Tag": "go/resourcemanager/containerservice/armcontainerservice_5e026610aa" + "Tag": "go/resourcemanager/containerservice/armcontainerservice_40c86a2493" } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/autorest.md b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/autorest.md index 3eac1efff175..38fbece836fe 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/autorest.md +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/autorest.md @@ -5,9 +5,9 @@ ``` yaml azure-arm: true require: -- https://github.com/Azure/azure-rest-api-specs/blob/d4205894880b989ede35d62d97c8e901ed14fb5a/specification/containerservice/resource-manager/Microsoft.ContainerService/aks/readme.md -- https://github.com/Azure/azure-rest-api-specs/blob/d4205894880b989ede35d62d97c8e901ed14fb5a/specification/containerservice/resource-manager/Microsoft.ContainerService/aks/readme.go.md +- https://github.com/Azure/azure-rest-api-specs/blob/44a81f85be1e4797fbf5e290fc6b41d48788a6ba/specification/containerservice/resource-manager/Microsoft.ContainerService/aks/readme.md +- https://github.com/Azure/azure-rest-api-specs/blob/44a81f85be1e4797fbf5e290fc6b41d48788a6ba/specification/containerservice/resource-manager/Microsoft.ContainerService/aks/readme.go.md license-header: MICROSOFT_MIT_NO_VERSION -module-version: 4.8.0-beta.1 -tag: package-preview-2023-11 +module-version: 4.9.0-beta.1 +tag: package-preview-2024-01 ``` diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/ci.yml b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/ci.yml index 1c3615696e32..176c023b2a07 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/ci.yml +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/ci.yml @@ -21,8 +21,8 @@ pr: include: - sdk/resourcemanager/containerservice/armcontainerservice/ -stages: -- template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml +extends: + template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml parameters: IncludeRelease: true ServiceDirectory: 'resourcemanager/containerservice/armcontainerservice' diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/constants.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/constants.go index 2935ce0978dc..2657efdadbc2 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/constants.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/constants.go @@ -10,7 +10,7 @@ package armcontainerservice const ( moduleName = "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice" - moduleVersion = "v4.8.0-beta.1" + moduleVersion = "v4.9.0-beta.1" ) // AddonAutoscaling - Whether VPA add-on is enabled and configured to scale AKS-managed add-ons. @@ -90,6 +90,24 @@ func PossibleAgentPoolTypeValues() []AgentPoolType { } } +// ArtifactSource - The source where the artifacts are downloaded from. +type ArtifactSource string + +const ( + // ArtifactSourceCache - pull images from Azure Container Registry with cache + ArtifactSourceCache ArtifactSource = "Cache" + // ArtifactSourceDirect - pull images from Microsoft Artifact Registry + ArtifactSourceDirect ArtifactSource = "Direct" +) + +// PossibleArtifactSourceValues returns the possible values for the ArtifactSource const type. +func PossibleArtifactSourceValues() []ArtifactSource { + return []ArtifactSource{ + ArtifactSourceCache, + ArtifactSourceDirect, + } +} + // BackendPoolType - The type of the managed inbound Load Balancer BackendPool. type BackendPoolType string @@ -789,6 +807,30 @@ func PossibleOutboundTypeValues() []OutboundType { } } +// PodIPAllocationMode - The IP allocation mode for pods in the agent pool. Must be used with podSubnetId. The default is +// 'DynamicIndividual'. +type PodIPAllocationMode string + +const ( + // PodIPAllocationModeDynamicIndividual - Each pod gets a single IP address assigned. This is better for maximizing a small + // to medium subnet of size /16 or smaller. The Azure CNI cluster with dynamic IP allocation defaults to this mode if the + // customer does not explicitly specify a podIPAllocationMode + PodIPAllocationModeDynamicIndividual PodIPAllocationMode = "DynamicIndividual" + // PodIPAllocationModeStaticBlock - Each node is statically allocated CIDR block(s) of size /28 = 16 IPs per block to satisfy + // the maxPods per node. Number of CIDR blocks >= (maxPods / 16). The block, rather than a single IP, counts against the Azure + // Vnet Private IP limit of 65K. Therefore block mode is suitable for running larger workloads with more than the current + // limit of 65K pods in a cluster. This mode is better suited to scale with larger subnets of /15 or bigger + PodIPAllocationModeStaticBlock PodIPAllocationMode = "StaticBlock" +) + +// PossiblePodIPAllocationModeValues returns the possible values for the PodIPAllocationMode const type. +func PossiblePodIPAllocationModeValues() []PodIPAllocationMode { + return []PodIPAllocationMode{ + PodIPAllocationModeDynamicIndividual, + PodIPAllocationModeStaticBlock, + } +} + // PrivateEndpointConnectionProvisioningState - The current provisioning state. type PrivateEndpointConnectionProvisioningState string diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/machines_client.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/machines_client.go index 7c990cf79175..5e769e2f6eda 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/machines_client.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/machines_client.go @@ -46,7 +46,7 @@ func NewMachinesClient(subscriptionID string, credential azcore.TokenCredential, // Get - Get a specific machine in the specified agent pool. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - agentPoolName - The name of the agent pool. @@ -102,7 +102,7 @@ func (client *MachinesClient) getCreateRequest(ctx context.Context, resourceGrou return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-02-preview") + reqQP.Set("api-version", "2024-01-02-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -119,7 +119,7 @@ func (client *MachinesClient) getHandleResponse(resp *http.Response) (MachinesCl // NewListPager - Gets a list of machines in the specified agent pool. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - agentPoolName - The name of the agent pool. @@ -171,7 +171,7 @@ func (client *MachinesClient) listCreateRequest(ctx context.Context, resourceGro return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-02-preview") + reqQP.Set("api-version", "2024-01-02-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/maintenanceconfigurations_client.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/maintenanceconfigurations_client.go index e3846ef77d5e..ea12a7b1d289 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/maintenanceconfigurations_client.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/maintenanceconfigurations_client.go @@ -46,7 +46,7 @@ func NewMaintenanceConfigurationsClient(subscriptionID string, credential azcore // CreateOrUpdate - Creates or updates a maintenance configuration in the specified managed cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - configName - The name of the maintenance configuration. @@ -99,7 +99,7 @@ func (client *MaintenanceConfigurationsClient) createOrUpdateCreateRequest(ctx c return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-02-preview") + reqQP.Set("api-version", "2024-01-02-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { @@ -120,7 +120,7 @@ func (client *MaintenanceConfigurationsClient) createOrUpdateHandleResponse(resp // Delete - Deletes a maintenance configuration. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - configName - The name of the maintenance configuration. @@ -171,7 +171,7 @@ func (client *MaintenanceConfigurationsClient) deleteCreateRequest(ctx context.C return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-02-preview") + reqQP.Set("api-version", "2024-01-02-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -180,7 +180,7 @@ func (client *MaintenanceConfigurationsClient) deleteCreateRequest(ctx context.C // Get - Gets the specified maintenance configuration of a managed cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - configName - The name of the maintenance configuration. @@ -232,7 +232,7 @@ func (client *MaintenanceConfigurationsClient) getCreateRequest(ctx context.Cont return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-02-preview") + reqQP.Set("api-version", "2024-01-02-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -249,7 +249,7 @@ func (client *MaintenanceConfigurationsClient) getHandleResponse(resp *http.Resp // NewListByManagedClusterPager - Gets a list of maintenance configurations in the specified managed cluster. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - options - MaintenanceConfigurationsClientListByManagedClusterOptions contains the optional parameters for the MaintenanceConfigurationsClient.NewListByManagedClusterPager @@ -297,7 +297,7 @@ func (client *MaintenanceConfigurationsClient) listByManagedClusterCreateRequest return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-02-preview") + reqQP.Set("api-version", "2024-01-02-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/managedclusters_client.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/managedclusters_client.go index ecbb1143397d..cd64932611a5 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/managedclusters_client.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/managedclusters_client.go @@ -49,7 +49,7 @@ func NewManagedClustersClient(subscriptionID string, credential azcore.TokenCred // completes before cancellation can take place, an error is returned. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - options - ManagedClustersClientBeginAbortLatestOperationOptions contains the optional parameters for the ManagedClustersClient.BeginAbortLatestOperation @@ -77,7 +77,7 @@ func (client *ManagedClustersClient) BeginAbortLatestOperation(ctx context.Conte // completes before cancellation can take place, an error is returned. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview func (client *ManagedClustersClient) abortLatestOperation(ctx context.Context, resourceGroupName string, resourceName string, options *ManagedClustersClientBeginAbortLatestOperationOptions) (*http.Response, error) { var err error const operationName = "ManagedClustersClient.BeginAbortLatestOperation" @@ -119,7 +119,7 @@ func (client *ManagedClustersClient) abortLatestOperationCreateRequest(ctx conte return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-02-preview") + reqQP.Set("api-version", "2024-01-02-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -128,7 +128,7 @@ func (client *ManagedClustersClient) abortLatestOperationCreateRequest(ctx conte // BeginCreateOrUpdate - Creates or updates a managed cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - parameters - The managed cluster to create or update. @@ -154,7 +154,7 @@ func (client *ManagedClustersClient) BeginCreateOrUpdate(ctx context.Context, re // CreateOrUpdate - Creates or updates a managed cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview func (client *ManagedClustersClient) createOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, parameters ManagedCluster, options *ManagedClustersClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error const operationName = "ManagedClustersClient.BeginCreateOrUpdate" @@ -196,7 +196,7 @@ func (client *ManagedClustersClient) createOrUpdateCreateRequest(ctx context.Con return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-02-preview") + reqQP.Set("api-version", "2024-01-02-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { @@ -208,7 +208,7 @@ func (client *ManagedClustersClient) createOrUpdateCreateRequest(ctx context.Con // BeginDelete - Deletes a managed cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - options - ManagedClustersClientBeginDeleteOptions contains the optional parameters for the ManagedClustersClient.BeginDelete @@ -233,7 +233,7 @@ func (client *ManagedClustersClient) BeginDelete(ctx context.Context, resourceGr // Delete - Deletes a managed cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview func (client *ManagedClustersClient) deleteOperation(ctx context.Context, resourceGroupName string, resourceName string, options *ManagedClustersClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "ManagedClustersClient.BeginDelete" @@ -275,7 +275,7 @@ func (client *ManagedClustersClient) deleteCreateRequest(ctx context.Context, re return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-02-preview") + reqQP.Set("api-version", "2024-01-02-preview") if options != nil && options.IgnorePodDisruptionBudget != nil { reqQP.Set("ignore-pod-disruption-budget", strconv.FormatBool(*options.IgnorePodDisruptionBudget)) } @@ -287,7 +287,7 @@ func (client *ManagedClustersClient) deleteCreateRequest(ctx context.Context, re // Get - Gets a managed cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - options - ManagedClustersClientGetOptions contains the optional parameters for the ManagedClustersClient.Get method. @@ -333,7 +333,7 @@ func (client *ManagedClustersClient) getCreateRequest(ctx context.Context, resou return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-02-preview") + reqQP.Set("api-version", "2024-01-02-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -353,7 +353,7 @@ func (client *ManagedClustersClient) getHandleResponse(resp *http.Response) (Man // [https://docs.microsoft.com/rest/api/aks/managedclusters/listclusteradmincredentials] . // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - roleName - The name of the role for managed cluster accessProfile resource. @@ -405,7 +405,7 @@ func (client *ManagedClustersClient) getAccessProfileCreateRequest(ctx context.C return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-02-preview") + reqQP.Set("api-version", "2024-01-02-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -423,7 +423,7 @@ func (client *ManagedClustersClient) getAccessProfileHandleResponse(resp *http.R // GetCommandResult - Gets the results of a command which has been run on the Managed Cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - commandID - Id of the command. @@ -475,7 +475,7 @@ func (client *ManagedClustersClient) getCommandResultCreateRequest(ctx context.C return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-02-preview") + reqQP.Set("api-version", "2024-01-02-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -496,7 +496,7 @@ func (client *ManagedClustersClient) getCommandResultHandleResponse(resp *http.R // GetGuardrailsVersions - Contains Guardrails version along with its support info and whether it is a default version. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview // - location - The name of the Azure region. // - version - Safeguards version // - options - ManagedClustersClientGetGuardrailsVersionsOptions contains the optional parameters for the ManagedClustersClient.GetGuardrailsVersions @@ -543,7 +543,7 @@ func (client *ManagedClustersClient) getGuardrailsVersionsCreateRequest(ctx cont return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-02-preview") + reqQP.Set("api-version", "2024-01-02-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -562,7 +562,7 @@ func (client *ManagedClustersClient) getGuardrailsVersionsHandleResponse(resp *h // and available upgrades // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview // - location - The name of the Azure region. // - mode - The mode of the mesh. // - options - ManagedClustersClientGetMeshRevisionProfileOptions contains the optional parameters for the ManagedClustersClient.GetMeshRevisionProfile @@ -609,7 +609,7 @@ func (client *ManagedClustersClient) getMeshRevisionProfileCreateRequest(ctx con return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-02-preview") + reqQP.Set("api-version", "2024-01-02-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -627,7 +627,7 @@ func (client *ManagedClustersClient) getMeshRevisionProfileHandleResponse(resp * // GetMeshUpgradeProfile - Gets available upgrades for a service mesh in a cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - mode - The mode of the mesh. @@ -679,7 +679,7 @@ func (client *ManagedClustersClient) getMeshUpgradeProfileCreateRequest(ctx cont return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-02-preview") + reqQP.Set("api-version", "2024-01-02-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -697,7 +697,7 @@ func (client *ManagedClustersClient) getMeshUpgradeProfileHandleResponse(resp *h // GetOSOptions - Gets supported OS options in the specified subscription. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview // - location - The name of the Azure region. // - options - ManagedClustersClientGetOSOptionsOptions contains the optional parameters for the ManagedClustersClient.GetOSOptions // method. @@ -739,7 +739,7 @@ func (client *ManagedClustersClient) getOSOptionsCreateRequest(ctx context.Conte return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-02-preview") + reqQP.Set("api-version", "2024-01-02-preview") if options != nil && options.ResourceType != nil { reqQP.Set("resource-type", *options.ResourceType) } @@ -760,7 +760,7 @@ func (client *ManagedClustersClient) getOSOptionsHandleResponse(resp *http.Respo // GetSafeguardsVersions - Contains Safeguards version along with its support info and whether it is a default version. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview // - location - The name of the Azure region. // - version - Safeguards version // - options - ManagedClustersClientGetSafeguardsVersionsOptions contains the optional parameters for the ManagedClustersClient.GetSafeguardsVersions @@ -807,7 +807,7 @@ func (client *ManagedClustersClient) getSafeguardsVersionsCreateRequest(ctx cont return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-02-preview") + reqQP.Set("api-version", "2024-01-02-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -825,7 +825,7 @@ func (client *ManagedClustersClient) getSafeguardsVersionsHandleResponse(resp *h // GetUpgradeProfile - Gets the upgrade profile of a managed cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - options - ManagedClustersClientGetUpgradeProfileOptions contains the optional parameters for the ManagedClustersClient.GetUpgradeProfile @@ -872,7 +872,7 @@ func (client *ManagedClustersClient) getUpgradeProfileCreateRequest(ctx context. return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-02-preview") + reqQP.Set("api-version", "2024-01-02-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -889,7 +889,7 @@ func (client *ManagedClustersClient) getUpgradeProfileHandleResponse(resp *http. // NewListPager - Gets a list of managed clusters in the specified subscription. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview // - options - ManagedClustersClientListOptions contains the optional parameters for the ManagedClustersClient.NewListPager // method. func (client *ManagedClustersClient) NewListPager(options *ManagedClustersClientListOptions) *runtime.Pager[ManagedClustersClientListResponse] { @@ -927,7 +927,7 @@ func (client *ManagedClustersClient) listCreateRequest(ctx context.Context, opti return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-02-preview") + reqQP.Set("api-version", "2024-01-02-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -944,7 +944,7 @@ func (client *ManagedClustersClient) listHandleResponse(resp *http.Response) (Ma // NewListByResourceGroupPager - Lists managed clusters in the specified subscription and resource group. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - options - ManagedClustersClientListByResourceGroupOptions contains the optional parameters for the ManagedClustersClient.NewListByResourceGroupPager // method. @@ -987,7 +987,7 @@ func (client *ManagedClustersClient) listByResourceGroupCreateRequest(ctx contex return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-02-preview") + reqQP.Set("api-version", "2024-01-02-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -1005,7 +1005,7 @@ func (client *ManagedClustersClient) listByResourceGroupHandleResponse(resp *htt // ListClusterAdminCredentials - Lists the admin credentials of a managed cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - options - ManagedClustersClientListClusterAdminCredentialsOptions contains the optional parameters for the ManagedClustersClient.ListClusterAdminCredentials @@ -1052,7 +1052,7 @@ func (client *ManagedClustersClient) listClusterAdminCredentialsCreateRequest(ct return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-02-preview") + reqQP.Set("api-version", "2024-01-02-preview") if options != nil && options.ServerFqdn != nil { reqQP.Set("server-fqdn", *options.ServerFqdn) } @@ -1073,7 +1073,7 @@ func (client *ManagedClustersClient) listClusterAdminCredentialsHandleResponse(r // ListClusterMonitoringUserCredentials - Lists the cluster monitoring user credentials of a managed cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - options - ManagedClustersClientListClusterMonitoringUserCredentialsOptions contains the optional parameters for the ManagedClustersClient.ListClusterMonitoringUserCredentials @@ -1120,7 +1120,7 @@ func (client *ManagedClustersClient) listClusterMonitoringUserCredentialsCreateR return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-02-preview") + reqQP.Set("api-version", "2024-01-02-preview") if options != nil && options.ServerFqdn != nil { reqQP.Set("server-fqdn", *options.ServerFqdn) } @@ -1141,7 +1141,7 @@ func (client *ManagedClustersClient) listClusterMonitoringUserCredentialsHandleR // ListClusterUserCredentials - Lists the user credentials of a managed cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - options - ManagedClustersClientListClusterUserCredentialsOptions contains the optional parameters for the ManagedClustersClient.ListClusterUserCredentials @@ -1188,7 +1188,7 @@ func (client *ManagedClustersClient) listClusterUserCredentialsCreateRequest(ctx return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-02-preview") + reqQP.Set("api-version", "2024-01-02-preview") if options != nil && options.Format != nil { reqQP.Set("format", string(*options.Format)) } @@ -1212,7 +1212,7 @@ func (client *ManagedClustersClient) listClusterUserCredentialsHandleResponse(re // NewListGuardrailsVersionsPager - Contains list of Guardrails version along with its support info and whether it is a default // version. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview // - location - The name of the Azure region. // - options - ManagedClustersClientListGuardrailsVersionsOptions contains the optional parameters for the ManagedClustersClient.NewListGuardrailsVersionsPager // method. @@ -1255,7 +1255,7 @@ func (client *ManagedClustersClient) listGuardrailsVersionsCreateRequest(ctx con return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-02-preview") + reqQP.Set("api-version", "2024-01-02-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -1274,7 +1274,7 @@ func (client *ManagedClustersClient) listGuardrailsVersionsHandleResponse(resp * // upgrades, and details on preview status of the version // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview // - location - The name of the Azure region. // - options - ManagedClustersClientListKubernetesVersionsOptions contains the optional parameters for the ManagedClustersClient.ListKubernetesVersions // method. @@ -1316,7 +1316,7 @@ func (client *ManagedClustersClient) listKubernetesVersionsCreateRequest(ctx con return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-02-preview") + reqQP.Set("api-version", "2024-01-02-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -1334,7 +1334,7 @@ func (client *ManagedClustersClient) listKubernetesVersionsHandleResponse(resp * // NewListMeshRevisionProfilesPager - Contains extra metadata on each revision, including supported revisions, cluster compatibility // and available upgrades // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview // - location - The name of the Azure region. // - options - ManagedClustersClientListMeshRevisionProfilesOptions contains the optional parameters for the ManagedClustersClient.NewListMeshRevisionProfilesPager // method. @@ -1377,7 +1377,7 @@ func (client *ManagedClustersClient) listMeshRevisionProfilesCreateRequest(ctx c return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-02-preview") + reqQP.Set("api-version", "2024-01-02-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -1394,7 +1394,7 @@ func (client *ManagedClustersClient) listMeshRevisionProfilesHandleResponse(resp // NewListMeshUpgradeProfilesPager - Lists available upgrades for all service meshes in a specific cluster. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - options - ManagedClustersClientListMeshUpgradeProfilesOptions contains the optional parameters for the ManagedClustersClient.NewListMeshUpgradeProfilesPager @@ -1442,7 +1442,7 @@ func (client *ManagedClustersClient) listMeshUpgradeProfilesCreateRequest(ctx co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-02-preview") + reqQP.Set("api-version", "2024-01-02-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -1460,7 +1460,7 @@ func (client *ManagedClustersClient) listMeshUpgradeProfilesHandleResponse(resp // NewListOutboundNetworkDependenciesEndpointsPager - Gets a list of egress endpoints (network endpoints of all outbound dependencies) // in the specified managed cluster. The operation returns properties of each egress endpoint. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - options - ManagedClustersClientListOutboundNetworkDependenciesEndpointsOptions contains the optional parameters for the @@ -1508,7 +1508,7 @@ func (client *ManagedClustersClient) listOutboundNetworkDependenciesEndpointsCre return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-02-preview") + reqQP.Set("api-version", "2024-01-02-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -1526,7 +1526,7 @@ func (client *ManagedClustersClient) listOutboundNetworkDependenciesEndpointsHan // NewListSafeguardsVersionsPager - Contains list of Safeguards version along with its support info and whether it is a default // version. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview // - location - The name of the Azure region. // - options - ManagedClustersClientListSafeguardsVersionsOptions contains the optional parameters for the ManagedClustersClient.NewListSafeguardsVersionsPager // method. @@ -1569,7 +1569,7 @@ func (client *ManagedClustersClient) listSafeguardsVersionsCreateRequest(ctx con return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-02-preview") + reqQP.Set("api-version", "2024-01-02-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -1588,7 +1588,7 @@ func (client *ManagedClustersClient) listSafeguardsVersionsHandleResponse(resp * // [https://aka.ms/aks-managed-aad] to update your cluster with AKS-managed Azure AD. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - parameters - The AAD profile to set on the Managed Cluster @@ -1616,7 +1616,7 @@ func (client *ManagedClustersClient) BeginResetAADProfile(ctx context.Context, r // to update your cluster with AKS-managed Azure AD. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview func (client *ManagedClustersClient) resetAADProfile(ctx context.Context, resourceGroupName string, resourceName string, parameters ManagedClusterAADProfile, options *ManagedClustersClientBeginResetAADProfileOptions) (*http.Response, error) { var err error const operationName = "ManagedClustersClient.BeginResetAADProfile" @@ -1658,7 +1658,7 @@ func (client *ManagedClustersClient) resetAADProfileCreateRequest(ctx context.Co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-02-preview") + reqQP.Set("api-version", "2024-01-02-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { @@ -1670,7 +1670,7 @@ func (client *ManagedClustersClient) resetAADProfileCreateRequest(ctx context.Co // BeginResetServicePrincipalProfile - This action cannot be performed on a cluster that is not using a service principal // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - parameters - The service principal profile to set on the managed cluster. @@ -1697,7 +1697,7 @@ func (client *ManagedClustersClient) BeginResetServicePrincipalProfile(ctx conte // ResetServicePrincipalProfile - This action cannot be performed on a cluster that is not using a service principal // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview func (client *ManagedClustersClient) resetServicePrincipalProfile(ctx context.Context, resourceGroupName string, resourceName string, parameters ManagedClusterServicePrincipalProfile, options *ManagedClustersClientBeginResetServicePrincipalProfileOptions) (*http.Response, error) { var err error const operationName = "ManagedClustersClient.BeginResetServicePrincipalProfile" @@ -1739,7 +1739,7 @@ func (client *ManagedClustersClient) resetServicePrincipalProfileCreateRequest(c return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-02-preview") + reqQP.Set("api-version", "2024-01-02-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { @@ -1752,7 +1752,7 @@ func (client *ManagedClustersClient) resetServicePrincipalProfileCreateRequest(c // more details about rotating managed cluster certificates. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - options - ManagedClustersClientBeginRotateClusterCertificatesOptions contains the optional parameters for the ManagedClustersClient.BeginRotateClusterCertificates @@ -1779,7 +1779,7 @@ func (client *ManagedClustersClient) BeginRotateClusterCertificates(ctx context. // details about rotating managed cluster certificates. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview func (client *ManagedClustersClient) rotateClusterCertificates(ctx context.Context, resourceGroupName string, resourceName string, options *ManagedClustersClientBeginRotateClusterCertificatesOptions) (*http.Response, error) { var err error const operationName = "ManagedClustersClient.BeginRotateClusterCertificates" @@ -1821,7 +1821,7 @@ func (client *ManagedClustersClient) rotateClusterCertificatesCreateRequest(ctx return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-02-preview") + reqQP.Set("api-version", "2024-01-02-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -1830,7 +1830,7 @@ func (client *ManagedClustersClient) rotateClusterCertificatesCreateRequest(ctx // BeginRotateServiceAccountSigningKeys - Rotates the service account signing keys of a managed cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - options - ManagedClustersClientBeginRotateServiceAccountSigningKeysOptions contains the optional parameters for the ManagedClustersClient.BeginRotateServiceAccountSigningKeys @@ -1856,7 +1856,7 @@ func (client *ManagedClustersClient) BeginRotateServiceAccountSigningKeys(ctx co // RotateServiceAccountSigningKeys - Rotates the service account signing keys of a managed cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview func (client *ManagedClustersClient) rotateServiceAccountSigningKeys(ctx context.Context, resourceGroupName string, resourceName string, options *ManagedClustersClientBeginRotateServiceAccountSigningKeysOptions) (*http.Response, error) { var err error const operationName = "ManagedClustersClient.BeginRotateServiceAccountSigningKeys" @@ -1898,7 +1898,7 @@ func (client *ManagedClustersClient) rotateServiceAccountSigningKeysCreateReques return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-02-preview") + reqQP.Set("api-version", "2024-01-02-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -1909,7 +1909,7 @@ func (client *ManagedClustersClient) rotateServiceAccountSigningKeysCreateReques // [https://docs.microsoft.com/azure/aks/private-clusters#aks-run-command-preview]. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - requestPayload - The run command request @@ -1938,7 +1938,7 @@ func (client *ManagedClustersClient) BeginRunCommand(ctx context.Context, resour // [https://docs.microsoft.com/azure/aks/private-clusters#aks-run-command-preview]. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview func (client *ManagedClustersClient) runCommand(ctx context.Context, resourceGroupName string, resourceName string, requestPayload RunCommandRequest, options *ManagedClustersClientBeginRunCommandOptions) (*http.Response, error) { var err error const operationName = "ManagedClustersClient.BeginRunCommand" @@ -1980,7 +1980,7 @@ func (client *ManagedClustersClient) runCommandCreateRequest(ctx context.Context return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-02-preview") + reqQP.Set("api-version", "2024-01-02-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, requestPayload); err != nil { @@ -1993,7 +1993,7 @@ func (client *ManagedClustersClient) runCommandCreateRequest(ctx context.Context // a cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - options - ManagedClustersClientBeginStartOptions contains the optional parameters for the ManagedClustersClient.BeginStart @@ -2020,7 +2020,7 @@ func (client *ManagedClustersClient) BeginStart(ctx context.Context, resourceGro // a cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview func (client *ManagedClustersClient) start(ctx context.Context, resourceGroupName string, resourceName string, options *ManagedClustersClientBeginStartOptions) (*http.Response, error) { var err error const operationName = "ManagedClustersClient.BeginStart" @@ -2062,7 +2062,7 @@ func (client *ManagedClustersClient) startCreateRequest(ctx context.Context, res return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-02-preview") + reqQP.Set("api-version", "2024-01-02-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -2074,7 +2074,7 @@ func (client *ManagedClustersClient) startCreateRequest(ctx context.Context, res // for more details about stopping a cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - options - ManagedClustersClientBeginStopOptions contains the optional parameters for the ManagedClustersClient.BeginStop @@ -2103,7 +2103,7 @@ func (client *ManagedClustersClient) BeginStop(ctx context.Context, resourceGrou // for more details about stopping a cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview func (client *ManagedClustersClient) stop(ctx context.Context, resourceGroupName string, resourceName string, options *ManagedClustersClientBeginStopOptions) (*http.Response, error) { var err error const operationName = "ManagedClustersClient.BeginStop" @@ -2145,7 +2145,7 @@ func (client *ManagedClustersClient) stopCreateRequest(ctx context.Context, reso return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-02-preview") + reqQP.Set("api-version", "2024-01-02-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -2154,7 +2154,7 @@ func (client *ManagedClustersClient) stopCreateRequest(ctx context.Context, reso // BeginUpdateTags - Updates tags on a managed cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - parameters - Parameters supplied to the Update Managed Cluster Tags operation. @@ -2180,7 +2180,7 @@ func (client *ManagedClustersClient) BeginUpdateTags(ctx context.Context, resour // UpdateTags - Updates tags on a managed cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview func (client *ManagedClustersClient) updateTags(ctx context.Context, resourceGroupName string, resourceName string, parameters TagsObject, options *ManagedClustersClientBeginUpdateTagsOptions) (*http.Response, error) { var err error const operationName = "ManagedClustersClient.BeginUpdateTags" @@ -2222,7 +2222,7 @@ func (client *ManagedClustersClient) updateTagsCreateRequest(ctx context.Context return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-02-preview") + reqQP.Set("api-version", "2024-01-02-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/managedclustersnapshots_client.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/managedclustersnapshots_client.go index 677aa9964ce1..5137324253fc 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/managedclustersnapshots_client.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/managedclustersnapshots_client.go @@ -46,7 +46,7 @@ func NewManagedClusterSnapshotsClient(subscriptionID string, credential azcore.T // CreateOrUpdate - Creates or updates a managed cluster snapshot. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - parameters - The managed cluster snapshot to create or update. @@ -94,7 +94,7 @@ func (client *ManagedClusterSnapshotsClient) createOrUpdateCreateRequest(ctx con return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-02-preview") + reqQP.Set("api-version", "2024-01-02-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { @@ -115,7 +115,7 @@ func (client *ManagedClusterSnapshotsClient) createOrUpdateHandleResponse(resp * // Delete - Deletes a managed cluster snapshot. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - options - ManagedClusterSnapshotsClientDeleteOptions contains the optional parameters for the ManagedClusterSnapshotsClient.Delete @@ -161,7 +161,7 @@ func (client *ManagedClusterSnapshotsClient) deleteCreateRequest(ctx context.Con return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-02-preview") + reqQP.Set("api-version", "2024-01-02-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -170,7 +170,7 @@ func (client *ManagedClusterSnapshotsClient) deleteCreateRequest(ctx context.Con // Get - Gets a managed cluster snapshot. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - options - ManagedClusterSnapshotsClientGetOptions contains the optional parameters for the ManagedClusterSnapshotsClient.Get @@ -217,7 +217,7 @@ func (client *ManagedClusterSnapshotsClient) getCreateRequest(ctx context.Contex return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-02-preview") + reqQP.Set("api-version", "2024-01-02-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -234,7 +234,7 @@ func (client *ManagedClusterSnapshotsClient) getHandleResponse(resp *http.Respon // NewListPager - Gets a list of managed cluster snapshots in the specified subscription. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview // - options - ManagedClusterSnapshotsClientListOptions contains the optional parameters for the ManagedClusterSnapshotsClient.NewListPager // method. func (client *ManagedClusterSnapshotsClient) NewListPager(options *ManagedClusterSnapshotsClientListOptions) *runtime.Pager[ManagedClusterSnapshotsClientListResponse] { @@ -272,7 +272,7 @@ func (client *ManagedClusterSnapshotsClient) listCreateRequest(ctx context.Conte return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-02-preview") + reqQP.Set("api-version", "2024-01-02-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -289,7 +289,7 @@ func (client *ManagedClusterSnapshotsClient) listHandleResponse(resp *http.Respo // NewListByResourceGroupPager - Lists managed cluster snapshots in the specified subscription and resource group. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - options - ManagedClusterSnapshotsClientListByResourceGroupOptions contains the optional parameters for the ManagedClusterSnapshotsClient.NewListByResourceGroupPager // method. @@ -332,7 +332,7 @@ func (client *ManagedClusterSnapshotsClient) listByResourceGroupCreateRequest(ct return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-02-preview") + reqQP.Set("api-version", "2024-01-02-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -350,7 +350,7 @@ func (client *ManagedClusterSnapshotsClient) listByResourceGroupHandleResponse(r // UpdateTags - Updates tags on a managed cluster snapshot. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - parameters - Parameters supplied to the Update managed cluster snapshot Tags operation. @@ -398,7 +398,7 @@ func (client *ManagedClusterSnapshotsClient) updateTagsCreateRequest(ctx context return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-02-preview") + reqQP.Set("api-version", "2024-01-02-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/models.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/models.go index 9abe513571f5..c484b3d724c9 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/models.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/models.go @@ -967,6 +967,9 @@ type ManagedClusterAgentPoolProfile struct { // For more information see upgrading a node pool [https://docs.microsoft.com/azure/aks/use-multiple-node-pools#upgrade-a-node-pool]. OrchestratorVersion *string + // The IP allocation mode for pods in the agent pool. Must be used with podSubnetId. The default is 'DynamicIndividual'. + PodIPAllocationMode *PodIPAllocationMode + // If omitted, pod IPs are statically assigned on the node subnet (see vnetSubnetID for more details). This is of the form: // /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName} PodSubnetID *string @@ -1165,6 +1168,9 @@ type ManagedClusterAgentPoolProfileProperties struct { // For more information see upgrading a node pool [https://docs.microsoft.com/azure/aks/use-multiple-node-pools#upgrade-a-node-pool]. OrchestratorVersion *string + // The IP allocation mode for pods in the agent pool. Must be used with podSubnetId. The default is 'DynamicIndividual'. + PodIPAllocationMode *PodIPAllocationMode + // If omitted, pod IPs are statically assigned on the node subnet (see vnetSubnetID for more details). This is of the form: // /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName} PodSubnetID *string @@ -1332,6 +1338,15 @@ type ManagedClusterAzureMonitorProfileWindowsHostLogs struct { Enabled *bool } +// ManagedClusterBootstrapProfile - The bootstrap profile. +type ManagedClusterBootstrapProfile struct { + // The source where the artifacts are downloaded from. + ArtifactSource *ArtifactSource + + // The resource Id of Azure Container Registry. The registry must have private network access, premium SKU and zone redundancy. + ContainerRegistryID *string +} + // ManagedClusterCostAnalysis - The cost analysis configuration for the cluster type ManagedClusterCostAnalysis struct { // The Managed Cluster sku.tier must be set to 'Standard' to enable this feature. Enabling this will add Kubernetes Namespace @@ -1638,6 +1653,9 @@ type ManagedClusterProperties struct { // Prometheus addon profile for the container service cluster AzureMonitorProfile *ManagedClusterAzureMonitorProfile + // Profile of the cluster bootstrap configuration. + BootstrapProfile *ManagedClusterBootstrapProfile + // CreationData to be used to specify the source Snapshot ID if the cluster will be created/upgraded using a snapshot. CreationData *CreationData diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/models_serde.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/models_serde.go index e9a87957a15b..1a1639ccac5a 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/models_serde.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/models_serde.go @@ -2333,6 +2333,7 @@ func (m ManagedClusterAgentPoolProfile) MarshalJSON() ([]byte, error) { populate(objectMap, "osSKU", m.OSSKU) populate(objectMap, "osType", m.OSType) populate(objectMap, "orchestratorVersion", m.OrchestratorVersion) + populate(objectMap, "podIPAllocationMode", m.PodIPAllocationMode) populate(objectMap, "podSubnetID", m.PodSubnetID) populate(objectMap, "powerState", m.PowerState) populate(objectMap, "provisioningState", m.ProvisioningState) @@ -2468,6 +2469,9 @@ func (m *ManagedClusterAgentPoolProfile) UnmarshalJSON(data []byte) error { case "orchestratorVersion": err = unpopulate(val, "OrchestratorVersion", &m.OrchestratorVersion) delete(rawMsg, key) + case "podIPAllocationMode": + err = unpopulate(val, "PodIPAllocationMode", &m.PodIPAllocationMode) + delete(rawMsg, key) case "podSubnetID": err = unpopulate(val, "PodSubnetID", &m.PodSubnetID) delete(rawMsg, key) @@ -2567,6 +2571,7 @@ func (m ManagedClusterAgentPoolProfileProperties) MarshalJSON() ([]byte, error) populate(objectMap, "osSKU", m.OSSKU) populate(objectMap, "osType", m.OSType) populate(objectMap, "orchestratorVersion", m.OrchestratorVersion) + populate(objectMap, "podIPAllocationMode", m.PodIPAllocationMode) populate(objectMap, "podSubnetID", m.PodSubnetID) populate(objectMap, "powerState", m.PowerState) populate(objectMap, "provisioningState", m.ProvisioningState) @@ -2699,6 +2704,9 @@ func (m *ManagedClusterAgentPoolProfileProperties) UnmarshalJSON(data []byte) er case "orchestratorVersion": err = unpopulate(val, "OrchestratorVersion", &m.OrchestratorVersion) delete(rawMsg, key) + case "podIPAllocationMode": + err = unpopulate(val, "PodIPAllocationMode", &m.PodIPAllocationMode) + delete(rawMsg, key) case "podSubnetID": err = unpopulate(val, "PodSubnetID", &m.PodSubnetID) delete(rawMsg, key) @@ -3036,6 +3044,37 @@ func (m *ManagedClusterAzureMonitorProfileWindowsHostLogs) UnmarshalJSON(data [] return nil } +// MarshalJSON implements the json.Marshaller interface for type ManagedClusterBootstrapProfile. +func (m ManagedClusterBootstrapProfile) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "artifactSource", m.ArtifactSource) + populate(objectMap, "containerRegistryId", m.ContainerRegistryID) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ManagedClusterBootstrapProfile. +func (m *ManagedClusterBootstrapProfile) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "artifactSource": + err = unpopulate(val, "ArtifactSource", &m.ArtifactSource) + delete(rawMsg, key) + case "containerRegistryId": + err = unpopulate(val, "ContainerRegistryID", &m.ContainerRegistryID) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + } + return nil +} + // MarshalJSON implements the json.Marshaller interface for type ManagedClusterCostAnalysis. func (m ManagedClusterCostAnalysis) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) @@ -3852,6 +3891,7 @@ func (m ManagedClusterProperties) MarshalJSON() ([]byte, error) { populate(objectMap, "autoUpgradeProfile", m.AutoUpgradeProfile) populate(objectMap, "azureMonitorProfile", m.AzureMonitorProfile) populate(objectMap, "azurePortalFQDN", m.AzurePortalFQDN) + populate(objectMap, "bootstrapProfile", m.BootstrapProfile) populate(objectMap, "creationData", m.CreationData) populate(objectMap, "currentKubernetesVersion", m.CurrentKubernetesVersion) populate(objectMap, "dnsPrefix", m.DNSPrefix) @@ -3929,6 +3969,9 @@ func (m *ManagedClusterProperties) UnmarshalJSON(data []byte) error { case "azurePortalFQDN": err = unpopulate(val, "AzurePortalFQDN", &m.AzurePortalFQDN) delete(rawMsg, key) + case "bootstrapProfile": + err = unpopulate(val, "BootstrapProfile", &m.BootstrapProfile) + delete(rawMsg, key) case "creationData": err = unpopulate(val, "CreationData", &m.CreationData) delete(rawMsg, key) diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/operations_client.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/operations_client.go index bfc700c76a31..dbf7f8f10046 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/operations_client.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/operations_client.go @@ -39,7 +39,7 @@ func NewOperationsClient(credential azcore.TokenCredential, options *arm.ClientO // NewListPager - Gets a list of operations. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview // - options - OperationsClientListOptions contains the optional parameters for the OperationsClient.NewListPager method. func (client *OperationsClient) NewListPager(options *OperationsClientListOptions) *runtime.Pager[OperationsClientListResponse] { return runtime.NewPager(runtime.PagingHandler[OperationsClientListResponse]{ @@ -73,7 +73,7 @@ func (client *OperationsClient) listCreateRequest(ctx context.Context, options * return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-02-preview") + reqQP.Set("api-version", "2024-01-02-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/operationstatusresult_client.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/operationstatusresult_client.go index bf79b9b6dd18..8492ab2fd05d 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/operationstatusresult_client.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/operationstatusresult_client.go @@ -46,7 +46,7 @@ func NewOperationStatusResultClient(subscriptionID string, credential azcore.Tok // Get - Get the status of a specific operation in the specified managed cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - operationID - The ID of an ongoing async operation. @@ -98,7 +98,7 @@ func (client *OperationStatusResultClient) getCreateRequest(ctx context.Context, return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-02-preview") + reqQP.Set("api-version", "2024-01-02-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -116,7 +116,7 @@ func (client *OperationStatusResultClient) getHandleResponse(resp *http.Response // GetByAgentPool - Get the status of a specific operation in the specified agent pool. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - agentPoolName - The name of the agent pool. @@ -173,7 +173,7 @@ func (client *OperationStatusResultClient) getByAgentPoolCreateRequest(ctx conte return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-02-preview") + reqQP.Set("api-version", "2024-01-02-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -190,7 +190,7 @@ func (client *OperationStatusResultClient) getByAgentPoolHandleResponse(resp *ht // NewListPager - Gets a list of operations in the specified managedCluster // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - options - OperationStatusResultClientListOptions contains the optional parameters for the OperationStatusResultClient.NewListPager @@ -238,7 +238,7 @@ func (client *OperationStatusResultClient) listCreateRequest(ctx context.Context return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-02-preview") + reqQP.Set("api-version", "2024-01-02-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/privateendpointconnections_client.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/privateendpointconnections_client.go index d20f4f69db3d..2e59d7865f0a 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/privateendpointconnections_client.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/privateendpointconnections_client.go @@ -46,7 +46,7 @@ func NewPrivateEndpointConnectionsClient(subscriptionID string, credential azcor // BeginDelete - Deletes a private endpoint connection. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - privateEndpointConnectionName - The name of the private endpoint connection. @@ -72,7 +72,7 @@ func (client *PrivateEndpointConnectionsClient) BeginDelete(ctx context.Context, // Delete - Deletes a private endpoint connection. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview func (client *PrivateEndpointConnectionsClient) deleteOperation(ctx context.Context, resourceGroupName string, resourceName string, privateEndpointConnectionName string, options *PrivateEndpointConnectionsClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "PrivateEndpointConnectionsClient.BeginDelete" @@ -118,7 +118,7 @@ func (client *PrivateEndpointConnectionsClient) deleteCreateRequest(ctx context. return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-02-preview") + reqQP.Set("api-version", "2024-01-02-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -127,7 +127,7 @@ func (client *PrivateEndpointConnectionsClient) deleteCreateRequest(ctx context. // Get - To learn more about private clusters, see: https://docs.microsoft.com/azure/aks/private-clusters // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - privateEndpointConnectionName - The name of the private endpoint connection. @@ -179,7 +179,7 @@ func (client *PrivateEndpointConnectionsClient) getCreateRequest(ctx context.Con return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-02-preview") + reqQP.Set("api-version", "2024-01-02-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -197,7 +197,7 @@ func (client *PrivateEndpointConnectionsClient) getHandleResponse(resp *http.Res // List - To learn more about private clusters, see: https://docs.microsoft.com/azure/aks/private-clusters // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - options - PrivateEndpointConnectionsClientListOptions contains the optional parameters for the PrivateEndpointConnectionsClient.List @@ -244,7 +244,7 @@ func (client *PrivateEndpointConnectionsClient) listCreateRequest(ctx context.Co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-02-preview") + reqQP.Set("api-version", "2024-01-02-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -262,7 +262,7 @@ func (client *PrivateEndpointConnectionsClient) listHandleResponse(resp *http.Re // Update - Updates a private endpoint connection. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - privateEndpointConnectionName - The name of the private endpoint connection. @@ -315,7 +315,7 @@ func (client *PrivateEndpointConnectionsClient) updateCreateRequest(ctx context. return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-02-preview") + reqQP.Set("api-version", "2024-01-02-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/privatelinkresources_client.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/privatelinkresources_client.go index 19222b1656a5..9b8014d945fe 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/privatelinkresources_client.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/privatelinkresources_client.go @@ -46,7 +46,7 @@ func NewPrivateLinkResourcesClient(subscriptionID string, credential azcore.Toke // List - To learn more about private clusters, see: https://docs.microsoft.com/azure/aks/private-clusters // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - options - PrivateLinkResourcesClientListOptions contains the optional parameters for the PrivateLinkResourcesClient.List @@ -93,7 +93,7 @@ func (client *PrivateLinkResourcesClient) listCreateRequest(ctx context.Context, return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-02-preview") + reqQP.Set("api-version", "2024-01-02-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/resolveprivatelinkserviceid_client.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/resolveprivatelinkserviceid_client.go index d30ebf05b25c..4400fafdfcc2 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/resolveprivatelinkserviceid_client.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/resolveprivatelinkserviceid_client.go @@ -46,7 +46,7 @@ func NewResolvePrivateLinkServiceIDClient(subscriptionID string, credential azco // POST - Gets the private link service ID for the specified managed cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - parameters - Parameters required in order to resolve a private link service ID. @@ -94,7 +94,7 @@ func (client *ResolvePrivateLinkServiceIDClient) postCreateRequest(ctx context.C return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-02-preview") + reqQP.Set("api-version", "2024-01-02-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/snapshots_client.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/snapshots_client.go index c0cb5e2f3514..9ba749d4c716 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/snapshots_client.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/snapshots_client.go @@ -46,7 +46,7 @@ func NewSnapshotsClient(subscriptionID string, credential azcore.TokenCredential // CreateOrUpdate - Creates or updates a snapshot. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - parameters - The snapshot to create or update. @@ -94,7 +94,7 @@ func (client *SnapshotsClient) createOrUpdateCreateRequest(ctx context.Context, return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-02-preview") + reqQP.Set("api-version", "2024-01-02-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { @@ -115,7 +115,7 @@ func (client *SnapshotsClient) createOrUpdateHandleResponse(resp *http.Response) // Delete - Deletes a snapshot. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - options - SnapshotsClientDeleteOptions contains the optional parameters for the SnapshotsClient.Delete method. @@ -160,7 +160,7 @@ func (client *SnapshotsClient) deleteCreateRequest(ctx context.Context, resource return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-02-preview") + reqQP.Set("api-version", "2024-01-02-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -169,7 +169,7 @@ func (client *SnapshotsClient) deleteCreateRequest(ctx context.Context, resource // Get - Gets a snapshot. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - options - SnapshotsClientGetOptions contains the optional parameters for the SnapshotsClient.Get method. @@ -215,7 +215,7 @@ func (client *SnapshotsClient) getCreateRequest(ctx context.Context, resourceGro return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-02-preview") + reqQP.Set("api-version", "2024-01-02-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -232,7 +232,7 @@ func (client *SnapshotsClient) getHandleResponse(resp *http.Response) (Snapshots // NewListPager - Gets a list of snapshots in the specified subscription. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview // - options - SnapshotsClientListOptions contains the optional parameters for the SnapshotsClient.NewListPager method. func (client *SnapshotsClient) NewListPager(options *SnapshotsClientListOptions) *runtime.Pager[SnapshotsClientListResponse] { return runtime.NewPager(runtime.PagingHandler[SnapshotsClientListResponse]{ @@ -269,7 +269,7 @@ func (client *SnapshotsClient) listCreateRequest(ctx context.Context, options *S return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-02-preview") + reqQP.Set("api-version", "2024-01-02-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -286,7 +286,7 @@ func (client *SnapshotsClient) listHandleResponse(resp *http.Response) (Snapshot // NewListByResourceGroupPager - Lists snapshots in the specified subscription and resource group. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - options - SnapshotsClientListByResourceGroupOptions contains the optional parameters for the SnapshotsClient.NewListByResourceGroupPager // method. @@ -329,7 +329,7 @@ func (client *SnapshotsClient) listByResourceGroupCreateRequest(ctx context.Cont return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-02-preview") + reqQP.Set("api-version", "2024-01-02-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -347,7 +347,7 @@ func (client *SnapshotsClient) listByResourceGroupHandleResponse(resp *http.Resp // UpdateTags - Updates tags on a snapshot. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - parameters - Parameters supplied to the Update snapshot Tags operation. @@ -394,7 +394,7 @@ func (client *SnapshotsClient) updateTagsCreateRequest(ctx context.Context, reso return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-02-preview") + reqQP.Set("api-version", "2024-01-02-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/trustedaccessrolebindings_client.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/trustedaccessrolebindings_client.go index 13e18a3767ba..064f89070bcf 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/trustedaccessrolebindings_client.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/trustedaccessrolebindings_client.go @@ -46,7 +46,7 @@ func NewTrustedAccessRoleBindingsClient(subscriptionID string, credential azcore // BeginCreateOrUpdate - Create or update a trusted access role binding // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - trustedAccessRoleBindingName - The name of trusted access role binding. @@ -73,7 +73,7 @@ func (client *TrustedAccessRoleBindingsClient) BeginCreateOrUpdate(ctx context.C // CreateOrUpdate - Create or update a trusted access role binding // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview func (client *TrustedAccessRoleBindingsClient) createOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, trustedAccessRoleBindingName string, trustedAccessRoleBinding TrustedAccessRoleBinding, options *TrustedAccessRoleBindingsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error const operationName = "TrustedAccessRoleBindingsClient.BeginCreateOrUpdate" @@ -119,7 +119,7 @@ func (client *TrustedAccessRoleBindingsClient) createOrUpdateCreateRequest(ctx c return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-02-preview") + reqQP.Set("api-version", "2024-01-02-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, trustedAccessRoleBinding); err != nil { @@ -131,7 +131,7 @@ func (client *TrustedAccessRoleBindingsClient) createOrUpdateCreateRequest(ctx c // BeginDelete - Delete a trusted access role binding. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - trustedAccessRoleBindingName - The name of trusted access role binding. @@ -157,7 +157,7 @@ func (client *TrustedAccessRoleBindingsClient) BeginDelete(ctx context.Context, // Delete - Delete a trusted access role binding. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview func (client *TrustedAccessRoleBindingsClient) deleteOperation(ctx context.Context, resourceGroupName string, resourceName string, trustedAccessRoleBindingName string, options *TrustedAccessRoleBindingsClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "TrustedAccessRoleBindingsClient.BeginDelete" @@ -203,7 +203,7 @@ func (client *TrustedAccessRoleBindingsClient) deleteCreateRequest(ctx context.C return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-02-preview") + reqQP.Set("api-version", "2024-01-02-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -212,7 +212,7 @@ func (client *TrustedAccessRoleBindingsClient) deleteCreateRequest(ctx context.C // Get - Get a trusted access role binding. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - trustedAccessRoleBindingName - The name of trusted access role binding. @@ -264,7 +264,7 @@ func (client *TrustedAccessRoleBindingsClient) getCreateRequest(ctx context.Cont return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-02-preview") + reqQP.Set("api-version", "2024-01-02-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -281,7 +281,7 @@ func (client *TrustedAccessRoleBindingsClient) getHandleResponse(resp *http.Resp // NewListPager - List trusted access role bindings. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - options - TrustedAccessRoleBindingsClientListOptions contains the optional parameters for the TrustedAccessRoleBindingsClient.NewListPager @@ -329,7 +329,7 @@ func (client *TrustedAccessRoleBindingsClient) listCreateRequest(ctx context.Con return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-02-preview") + reqQP.Set("api-version", "2024-01-02-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/trustedaccessroles_client.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/trustedaccessroles_client.go index b50cc3b2d22c..8f68e24d37d6 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/trustedaccessroles_client.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/trustedaccessroles_client.go @@ -45,7 +45,7 @@ func NewTrustedAccessRolesClient(subscriptionID string, credential azcore.TokenC // NewListPager - List supported trusted access roles. // -// Generated from API version 2023-11-02-preview +// Generated from API version 2024-01-02-preview // - location - The name of the Azure region. // - options - TrustedAccessRolesClientListOptions contains the optional parameters for the TrustedAccessRolesClient.NewListPager // method. @@ -88,7 +88,7 @@ func (client *TrustedAccessRolesClient) listCreateRequest(ctx context.Context, l return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-02-preview") + reqQP.Set("api-version", "2024-01-02-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/CHANGELOG.md b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/CHANGELOG.md deleted file mode 100644 index 52911e4cc5e4..000000000000 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/CHANGELOG.md +++ /dev/null @@ -1,2 +0,0 @@ -# Change History - diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/_meta.json b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/_meta.json deleted file mode 100644 index b6d9ac079390..000000000000 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/_meta.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "commit": "ea5bc27ee9cadeb67767d774c82095be2420bcad", - "readme": "/_/azure-rest-api-specs/specification/storage/resource-manager/readme.md", - "tag": "package-2021-02", - "use": "@microsoft.azure/autorest.go@2.1.187", - "repository_url": "https://github.com/Azure/azure-rest-api-specs.git", - "autorest_command": "autorest --use=@microsoft.azure/autorest.go@2.1.187 --tag=package-2021-02 --go-sdk-folder=/_/azure-sdk-for-go --go --verbose --use-onever --version=2.0.4421 --go.license-header=MICROSOFT_MIT_NO_VERSION --enum-prefix /_/azure-rest-api-specs/specification/storage/resource-manager/readme.md", - "additional_properties": { - "additional_options": "--go --verbose --use-onever --version=2.0.4421 --go.license-header=MICROSOFT_MIT_NO_VERSION --enum-prefix" - } -} \ No newline at end of file diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/accounts.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/accounts.go deleted file mode 100644 index aab326ade4c3..000000000000 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/accounts.go +++ /dev/null @@ -1,1444 +0,0 @@ -package storage - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/validation" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// AccountsClient is the the Azure Storage Management API. -type AccountsClient struct { - BaseClient -} - -// NewAccountsClient creates an instance of the AccountsClient client. -func NewAccountsClient(subscriptionID string) AccountsClient { - return NewAccountsClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewAccountsClientWithBaseURI creates an instance of the AccountsClient client using a custom endpoint. Use this -// when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). -func NewAccountsClientWithBaseURI(baseURI string, subscriptionID string) AccountsClient { - return AccountsClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// CheckNameAvailability checks that the storage account name is valid and is not already in use. -// Parameters: -// accountName - the name of the storage account within the specified resource group. Storage account names -// must be between 3 and 24 characters in length and use numbers and lower-case letters only. -func (client AccountsClient) CheckNameAvailability(ctx context.Context, accountName AccountCheckNameAvailabilityParameters) (result CheckNameAvailabilityResult, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.CheckNameAvailability") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: accountName, - Constraints: []validation.Constraint{{Target: "accountName.Name", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "accountName.Type", Name: validation.Null, Rule: true, Chain: nil}}}, - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("storage.AccountsClient", "CheckNameAvailability", err.Error()) - } - - req, err := client.CheckNameAvailabilityPreparer(ctx, accountName) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.AccountsClient", "CheckNameAvailability", nil, "Failure preparing request") - return - } - - resp, err := client.CheckNameAvailabilitySender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "storage.AccountsClient", "CheckNameAvailability", resp, "Failure sending request") - return - } - - result, err = client.CheckNameAvailabilityResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.AccountsClient", "CheckNameAvailability", resp, "Failure responding to request") - return - } - - return -} - -// CheckNameAvailabilityPreparer prepares the CheckNameAvailability request. -func (client AccountsClient) CheckNameAvailabilityPreparer(ctx context.Context, accountName AccountCheckNameAvailabilityParameters) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2021-02-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPost(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Storage/checkNameAvailability", pathParameters), - autorest.WithJSON(accountName), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// CheckNameAvailabilitySender sends the CheckNameAvailability request. The method will close the -// http.Response Body if it receives an error. -func (client AccountsClient) CheckNameAvailabilitySender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// CheckNameAvailabilityResponder handles the response to the CheckNameAvailability request. The method always -// closes the http.Response Body. -func (client AccountsClient) CheckNameAvailabilityResponder(resp *http.Response) (result CheckNameAvailabilityResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Create asynchronously creates a new storage account with the specified parameters. If an account is already created -// and a subsequent create request is issued with different properties, the account properties will be updated. If an -// account is already created and a subsequent create or update request is issued with the exact same set of -// properties, the request will succeed. -// Parameters: -// resourceGroupName - the name of the resource group within the user's subscription. The name is case -// insensitive. -// accountName - the name of the storage account within the specified resource group. Storage account names -// must be between 3 and 24 characters in length and use numbers and lower-case letters only. -// parameters - the parameters to provide for the created account. -func (client AccountsClient) Create(ctx context.Context, resourceGroupName string, accountName string, parameters AccountCreateParameters) (result AccountsCreateFuture, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.Create") - defer func() { - sc := -1 - if result.FutureAPI != nil && result.FutureAPI.Response() != nil { - sc = result.FutureAPI.Response().StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, - {TargetValue: accountName, - Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.Sku", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "parameters.Location", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "parameters.AccountPropertiesCreateParameters", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.AccountPropertiesCreateParameters.SasPolicy", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.AccountPropertiesCreateParameters.SasPolicy.SasExpirationPeriod", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "parameters.AccountPropertiesCreateParameters.SasPolicy.ExpirationAction", Name: validation.Null, Rule: true, Chain: nil}, - }}, - {Target: "parameters.AccountPropertiesCreateParameters.KeyPolicy", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.AccountPropertiesCreateParameters.KeyPolicy.KeyExpirationPeriodInDays", Name: validation.Null, Rule: true, Chain: nil}}}, - {Target: "parameters.AccountPropertiesCreateParameters.CustomDomain", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.AccountPropertiesCreateParameters.CustomDomain.Name", Name: validation.Null, Rule: true, Chain: nil}}}, - {Target: "parameters.AccountPropertiesCreateParameters.AzureFilesIdentityBasedAuthentication", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.AccountPropertiesCreateParameters.AzureFilesIdentityBasedAuthentication.ActiveDirectoryProperties", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.AccountPropertiesCreateParameters.AzureFilesIdentityBasedAuthentication.ActiveDirectoryProperties.DomainName", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "parameters.AccountPropertiesCreateParameters.AzureFilesIdentityBasedAuthentication.ActiveDirectoryProperties.NetBiosDomainName", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "parameters.AccountPropertiesCreateParameters.AzureFilesIdentityBasedAuthentication.ActiveDirectoryProperties.ForestName", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "parameters.AccountPropertiesCreateParameters.AzureFilesIdentityBasedAuthentication.ActiveDirectoryProperties.DomainGUID", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "parameters.AccountPropertiesCreateParameters.AzureFilesIdentityBasedAuthentication.ActiveDirectoryProperties.DomainSid", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "parameters.AccountPropertiesCreateParameters.AzureFilesIdentityBasedAuthentication.ActiveDirectoryProperties.AzureStorageSid", Name: validation.Null, Rule: true, Chain: nil}, - }}, - }}, - }}}}, - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("storage.AccountsClient", "Create", err.Error()) - } - - req, err := client.CreatePreparer(ctx, resourceGroupName, accountName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Create", nil, "Failure preparing request") - return - } - - result, err = client.CreateSender(req) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Create", result.Response(), "Failure sending request") - return - } - - return -} - -// CreatePreparer prepares the Create request. -func (client AccountsClient) CreatePreparer(ctx context.Context, resourceGroupName string, accountName string, parameters AccountCreateParameters) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2021-02-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPut(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// CreateSender sends the Create request. The method will close the -// http.Response Body if it receives an error. -func (client AccountsClient) CreateSender(req *http.Request) (future AccountsCreateFuture, err error) { - var resp *http.Response - future.FutureAPI = &azure.Future{} - resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) - if err != nil { - return - } - var azf azure.Future - azf, err = azure.NewFutureFromResponse(resp) - future.FutureAPI = &azf - future.Result = future.result - return -} - -// CreateResponder handles the response to the Create request. The method always -// closes the http.Response Body. -func (client AccountsClient) CreateResponder(resp *http.Response) (result Account, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Delete deletes a storage account in Microsoft Azure. -// Parameters: -// resourceGroupName - the name of the resource group within the user's subscription. The name is case -// insensitive. -// accountName - the name of the storage account within the specified resource group. Storage account names -// must be between 3 and 24 characters in length and use numbers and lower-case letters only. -func (client AccountsClient) Delete(ctx context.Context, resourceGroupName string, accountName string) (result autorest.Response, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.Delete") - defer func() { - sc := -1 - if result.Response != nil { - sc = result.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, - {TargetValue: accountName, - Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("storage.AccountsClient", "Delete", err.Error()) - } - - req, err := client.DeletePreparer(ctx, resourceGroupName, accountName) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Delete", nil, "Failure preparing request") - return - } - - resp, err := client.DeleteSender(req) - if err != nil { - result.Response = resp - err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Delete", resp, "Failure sending request") - return - } - - result, err = client.DeleteResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Delete", resp, "Failure responding to request") - return - } - - return -} - -// DeletePreparer prepares the Delete request. -func (client AccountsClient) DeletePreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2021-02-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DeleteSender sends the Delete request. The method will close the -// http.Response Body if it receives an error. -func (client AccountsClient) DeleteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// DeleteResponder handles the response to the Delete request. The method always -// closes the http.Response Body. -func (client AccountsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), - autorest.ByClosing()) - result.Response = resp - return -} - -// Failover failover request can be triggered for a storage account in case of availability issues. The failover occurs -// from the storage account's primary cluster to secondary cluster for RA-GRS accounts. The secondary cluster will -// become primary after failover. -// Parameters: -// resourceGroupName - the name of the resource group within the user's subscription. The name is case -// insensitive. -// accountName - the name of the storage account within the specified resource group. Storage account names -// must be between 3 and 24 characters in length and use numbers and lower-case letters only. -func (client AccountsClient) Failover(ctx context.Context, resourceGroupName string, accountName string) (result AccountsFailoverFuture, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.Failover") - defer func() { - sc := -1 - if result.FutureAPI != nil && result.FutureAPI.Response() != nil { - sc = result.FutureAPI.Response().StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, - {TargetValue: accountName, - Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("storage.AccountsClient", "Failover", err.Error()) - } - - req, err := client.FailoverPreparer(ctx, resourceGroupName, accountName) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Failover", nil, "Failure preparing request") - return - } - - result, err = client.FailoverSender(req) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Failover", result.Response(), "Failure sending request") - return - } - - return -} - -// FailoverPreparer prepares the Failover request. -func (client AccountsClient) FailoverPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2021-02-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsPost(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/failover", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// FailoverSender sends the Failover request. The method will close the -// http.Response Body if it receives an error. -func (client AccountsClient) FailoverSender(req *http.Request) (future AccountsFailoverFuture, err error) { - var resp *http.Response - future.FutureAPI = &azure.Future{} - resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) - if err != nil { - return - } - var azf azure.Future - azf, err = azure.NewFutureFromResponse(resp) - future.FutureAPI = &azf - future.Result = future.result - return -} - -// FailoverResponder handles the response to the Failover request. The method always -// closes the http.Response Body. -func (client AccountsClient) FailoverResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), - autorest.ByClosing()) - result.Response = resp - return -} - -// GetProperties returns the properties for the specified storage account including but not limited to name, SKU name, -// location, and account status. The ListKeys operation should be used to retrieve storage keys. -// Parameters: -// resourceGroupName - the name of the resource group within the user's subscription. The name is case -// insensitive. -// accountName - the name of the storage account within the specified resource group. Storage account names -// must be between 3 and 24 characters in length and use numbers and lower-case letters only. -// expand - may be used to expand the properties within account's properties. By default, data is not included -// when fetching properties. Currently we only support geoReplicationStats and blobRestoreStatus. -func (client AccountsClient) GetProperties(ctx context.Context, resourceGroupName string, accountName string, expand AccountExpand) (result Account, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.GetProperties") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, - {TargetValue: accountName, - Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("storage.AccountsClient", "GetProperties", err.Error()) - } - - req, err := client.GetPropertiesPreparer(ctx, resourceGroupName, accountName, expand) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.AccountsClient", "GetProperties", nil, "Failure preparing request") - return - } - - resp, err := client.GetPropertiesSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "storage.AccountsClient", "GetProperties", resp, "Failure sending request") - return - } - - result, err = client.GetPropertiesResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.AccountsClient", "GetProperties", resp, "Failure responding to request") - return - } - - return -} - -// GetPropertiesPreparer prepares the GetProperties request. -func (client AccountsClient) GetPropertiesPreparer(ctx context.Context, resourceGroupName string, accountName string, expand AccountExpand) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2021-02-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if len(string(expand)) > 0 { - queryParameters["$expand"] = autorest.Encode("query", expand) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetPropertiesSender sends the GetProperties request. The method will close the -// http.Response Body if it receives an error. -func (client AccountsClient) GetPropertiesSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// GetPropertiesResponder handles the response to the GetProperties request. The method always -// closes the http.Response Body. -func (client AccountsClient) GetPropertiesResponder(resp *http.Response) (result Account, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// List lists all the storage accounts available under the subscription. Note that storage keys are not returned; use -// the ListKeys operation for this. -func (client AccountsClient) List(ctx context.Context) (result AccountListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.List") - defer func() { - sc := -1 - if result.alr.Response.Response != nil { - sc = result.alr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("storage.AccountsClient", "List", err.Error()) - } - - result.fn = client.listNextResults - req, err := client.ListPreparer(ctx) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.AccountsClient", "List", nil, "Failure preparing request") - return - } - - resp, err := client.ListSender(req) - if err != nil { - result.alr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "storage.AccountsClient", "List", resp, "Failure sending request") - return - } - - result.alr, err = client.ListResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.AccountsClient", "List", resp, "Failure responding to request") - return - } - if result.alr.hasNextLink() && result.alr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// ListPreparer prepares the List request. -func (client AccountsClient) ListPreparer(ctx context.Context) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2021-02-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Storage/storageAccounts", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListSender sends the List request. The method will close the -// http.Response Body if it receives an error. -func (client AccountsClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListResponder handles the response to the List request. The method always -// closes the http.Response Body. -func (client AccountsClient) ListResponder(resp *http.Response) (result AccountListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// listNextResults retrieves the next set of results, if any. -func (client AccountsClient) listNextResults(ctx context.Context, lastResults AccountListResult) (result AccountListResult, err error) { - req, err := lastResults.accountListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "storage.AccountsClient", "listNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "storage.AccountsClient", "listNextResults", resp, "Failure sending next results request") - } - result, err = client.ListResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.AccountsClient", "listNextResults", resp, "Failure responding to next results request") - } - return -} - -// ListComplete enumerates all values, automatically crossing page boundaries as required. -func (client AccountsClient) ListComplete(ctx context.Context) (result AccountListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.List") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.List(ctx) - return -} - -// ListAccountSAS list SAS credentials of a storage account. -// Parameters: -// resourceGroupName - the name of the resource group within the user's subscription. The name is case -// insensitive. -// accountName - the name of the storage account within the specified resource group. Storage account names -// must be between 3 and 24 characters in length and use numbers and lower-case letters only. -// parameters - the parameters to provide to list SAS credentials for the storage account. -func (client AccountsClient) ListAccountSAS(ctx context.Context, resourceGroupName string, accountName string, parameters AccountSasParameters) (result ListAccountSasResponse, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.ListAccountSAS") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, - {TargetValue: accountName, - Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.SharedAccessExpiryTime", Name: validation.Null, Rule: true, Chain: nil}}}, - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("storage.AccountsClient", "ListAccountSAS", err.Error()) - } - - req, err := client.ListAccountSASPreparer(ctx, resourceGroupName, accountName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListAccountSAS", nil, "Failure preparing request") - return - } - - resp, err := client.ListAccountSASSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListAccountSAS", resp, "Failure sending request") - return - } - - result, err = client.ListAccountSASResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListAccountSAS", resp, "Failure responding to request") - return - } - - return -} - -// ListAccountSASPreparer prepares the ListAccountSAS request. -func (client AccountsClient) ListAccountSASPreparer(ctx context.Context, resourceGroupName string, accountName string, parameters AccountSasParameters) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2021-02-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPost(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/ListAccountSas", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListAccountSASSender sends the ListAccountSAS request. The method will close the -// http.Response Body if it receives an error. -func (client AccountsClient) ListAccountSASSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListAccountSASResponder handles the response to the ListAccountSAS request. The method always -// closes the http.Response Body. -func (client AccountsClient) ListAccountSASResponder(resp *http.Response) (result ListAccountSasResponse, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// ListByResourceGroup lists all the storage accounts available under the given resource group. Note that storage keys -// are not returned; use the ListKeys operation for this. -// Parameters: -// resourceGroupName - the name of the resource group within the user's subscription. The name is case -// insensitive. -func (client AccountsClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (result AccountListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.ListByResourceGroup") - defer func() { - sc := -1 - if result.alr.Response.Response != nil { - sc = result.alr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("storage.AccountsClient", "ListByResourceGroup", err.Error()) - } - - result.fn = client.listByResourceGroupNextResults - req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListByResourceGroup", nil, "Failure preparing request") - return - } - - resp, err := client.ListByResourceGroupSender(req) - if err != nil { - result.alr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListByResourceGroup", resp, "Failure sending request") - return - } - - result.alr, err = client.ListByResourceGroupResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListByResourceGroup", resp, "Failure responding to request") - return - } - if result.alr.hasNextLink() && result.alr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// ListByResourceGroupPreparer prepares the ListByResourceGroup request. -func (client AccountsClient) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2021-02-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the -// http.Response Body if it receives an error. -func (client AccountsClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always -// closes the http.Response Body. -func (client AccountsClient) ListByResourceGroupResponder(resp *http.Response) (result AccountListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// listByResourceGroupNextResults retrieves the next set of results, if any. -func (client AccountsClient) listByResourceGroupNextResults(ctx context.Context, lastResults AccountListResult) (result AccountListResult, err error) { - req, err := lastResults.accountListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "storage.AccountsClient", "listByResourceGroupNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.ListByResourceGroupSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "storage.AccountsClient", "listByResourceGroupNextResults", resp, "Failure sending next results request") - } - result, err = client.ListByResourceGroupResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.AccountsClient", "listByResourceGroupNextResults", resp, "Failure responding to next results request") - } - return -} - -// ListByResourceGroupComplete enumerates all values, automatically crossing page boundaries as required. -func (client AccountsClient) ListByResourceGroupComplete(ctx context.Context, resourceGroupName string) (result AccountListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.ListByResourceGroup") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.ListByResourceGroup(ctx, resourceGroupName) - return -} - -// ListKeys lists the access keys or Kerberos keys (if active directory enabled) for the specified storage account. -// Parameters: -// resourceGroupName - the name of the resource group within the user's subscription. The name is case -// insensitive. -// accountName - the name of the storage account within the specified resource group. Storage account names -// must be between 3 and 24 characters in length and use numbers and lower-case letters only. -// expand - specifies type of the key to be listed. Possible value is kerb. -func (client AccountsClient) ListKeys(ctx context.Context, resourceGroupName string, accountName string, expand ListKeyExpand) (result AccountListKeysResult, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.ListKeys") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, - {TargetValue: accountName, - Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("storage.AccountsClient", "ListKeys", err.Error()) - } - - req, err := client.ListKeysPreparer(ctx, resourceGroupName, accountName, expand) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListKeys", nil, "Failure preparing request") - return - } - - resp, err := client.ListKeysSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListKeys", resp, "Failure sending request") - return - } - - result, err = client.ListKeysResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListKeys", resp, "Failure responding to request") - return - } - - return -} - -// ListKeysPreparer prepares the ListKeys request. -func (client AccountsClient) ListKeysPreparer(ctx context.Context, resourceGroupName string, accountName string, expand ListKeyExpand) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2021-02-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if len(string(expand)) > 0 { - queryParameters["$expand"] = autorest.Encode("query", expand) - } - - preparer := autorest.CreatePreparer( - autorest.AsPost(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/listKeys", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListKeysSender sends the ListKeys request. The method will close the -// http.Response Body if it receives an error. -func (client AccountsClient) ListKeysSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListKeysResponder handles the response to the ListKeys request. The method always -// closes the http.Response Body. -func (client AccountsClient) ListKeysResponder(resp *http.Response) (result AccountListKeysResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// ListServiceSAS list service SAS credentials of a specific resource. -// Parameters: -// resourceGroupName - the name of the resource group within the user's subscription. The name is case -// insensitive. -// accountName - the name of the storage account within the specified resource group. Storage account names -// must be between 3 and 24 characters in length and use numbers and lower-case letters only. -// parameters - the parameters to provide to list service SAS credentials. -func (client AccountsClient) ListServiceSAS(ctx context.Context, resourceGroupName string, accountName string, parameters ServiceSasParameters) (result ListServiceSasResponse, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.ListServiceSAS") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, - {TargetValue: accountName, - Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.CanonicalizedResource", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "parameters.Identifier", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.Identifier", Name: validation.MaxLength, Rule: 64, Chain: nil}}}}}, - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("storage.AccountsClient", "ListServiceSAS", err.Error()) - } - - req, err := client.ListServiceSASPreparer(ctx, resourceGroupName, accountName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListServiceSAS", nil, "Failure preparing request") - return - } - - resp, err := client.ListServiceSASSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListServiceSAS", resp, "Failure sending request") - return - } - - result, err = client.ListServiceSASResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListServiceSAS", resp, "Failure responding to request") - return - } - - return -} - -// ListServiceSASPreparer prepares the ListServiceSAS request. -func (client AccountsClient) ListServiceSASPreparer(ctx context.Context, resourceGroupName string, accountName string, parameters ServiceSasParameters) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2021-02-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPost(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/ListServiceSas", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListServiceSASSender sends the ListServiceSAS request. The method will close the -// http.Response Body if it receives an error. -func (client AccountsClient) ListServiceSASSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListServiceSASResponder handles the response to the ListServiceSAS request. The method always -// closes the http.Response Body. -func (client AccountsClient) ListServiceSASResponder(resp *http.Response) (result ListServiceSasResponse, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// RegenerateKey regenerates one of the access keys or Kerberos keys for the specified storage account. -// Parameters: -// resourceGroupName - the name of the resource group within the user's subscription. The name is case -// insensitive. -// accountName - the name of the storage account within the specified resource group. Storage account names -// must be between 3 and 24 characters in length and use numbers and lower-case letters only. -// regenerateKey - specifies name of the key which should be regenerated -- key1, key2, kerb1, kerb2. -func (client AccountsClient) RegenerateKey(ctx context.Context, resourceGroupName string, accountName string, regenerateKey AccountRegenerateKeyParameters) (result AccountListKeysResult, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.RegenerateKey") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, - {TargetValue: accountName, - Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: regenerateKey, - Constraints: []validation.Constraint{{Target: "regenerateKey.KeyName", Name: validation.Null, Rule: true, Chain: nil}}}, - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("storage.AccountsClient", "RegenerateKey", err.Error()) - } - - req, err := client.RegenerateKeyPreparer(ctx, resourceGroupName, accountName, regenerateKey) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.AccountsClient", "RegenerateKey", nil, "Failure preparing request") - return - } - - resp, err := client.RegenerateKeySender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "storage.AccountsClient", "RegenerateKey", resp, "Failure sending request") - return - } - - result, err = client.RegenerateKeyResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.AccountsClient", "RegenerateKey", resp, "Failure responding to request") - return - } - - return -} - -// RegenerateKeyPreparer prepares the RegenerateKey request. -func (client AccountsClient) RegenerateKeyPreparer(ctx context.Context, resourceGroupName string, accountName string, regenerateKey AccountRegenerateKeyParameters) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2021-02-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPost(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/regenerateKey", pathParameters), - autorest.WithJSON(regenerateKey), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// RegenerateKeySender sends the RegenerateKey request. The method will close the -// http.Response Body if it receives an error. -func (client AccountsClient) RegenerateKeySender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// RegenerateKeyResponder handles the response to the RegenerateKey request. The method always -// closes the http.Response Body. -func (client AccountsClient) RegenerateKeyResponder(resp *http.Response) (result AccountListKeysResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// RestoreBlobRanges restore blobs in the specified blob ranges -// Parameters: -// resourceGroupName - the name of the resource group within the user's subscription. The name is case -// insensitive. -// accountName - the name of the storage account within the specified resource group. Storage account names -// must be between 3 and 24 characters in length and use numbers and lower-case letters only. -// parameters - the parameters to provide for restore blob ranges. -func (client AccountsClient) RestoreBlobRanges(ctx context.Context, resourceGroupName string, accountName string, parameters BlobRestoreParameters) (result AccountsRestoreBlobRangesFuture, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.RestoreBlobRanges") - defer func() { - sc := -1 - if result.FutureAPI != nil && result.FutureAPI.Response() != nil { - sc = result.FutureAPI.Response().StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, - {TargetValue: accountName, - Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.TimeToRestore", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "parameters.BlobRanges", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewError("storage.AccountsClient", "RestoreBlobRanges", err.Error()) - } - - req, err := client.RestoreBlobRangesPreparer(ctx, resourceGroupName, accountName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.AccountsClient", "RestoreBlobRanges", nil, "Failure preparing request") - return - } - - result, err = client.RestoreBlobRangesSender(req) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.AccountsClient", "RestoreBlobRanges", result.Response(), "Failure sending request") - return - } - - return -} - -// RestoreBlobRangesPreparer prepares the RestoreBlobRanges request. -func (client AccountsClient) RestoreBlobRangesPreparer(ctx context.Context, resourceGroupName string, accountName string, parameters BlobRestoreParameters) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2021-02-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPost(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/restoreBlobRanges", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// RestoreBlobRangesSender sends the RestoreBlobRanges request. The method will close the -// http.Response Body if it receives an error. -func (client AccountsClient) RestoreBlobRangesSender(req *http.Request) (future AccountsRestoreBlobRangesFuture, err error) { - var resp *http.Response - future.FutureAPI = &azure.Future{} - resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) - if err != nil { - return - } - var azf azure.Future - azf, err = azure.NewFutureFromResponse(resp) - future.FutureAPI = &azf - future.Result = future.result - return -} - -// RestoreBlobRangesResponder handles the response to the RestoreBlobRanges request. The method always -// closes the http.Response Body. -func (client AccountsClient) RestoreBlobRangesResponder(resp *http.Response) (result BlobRestoreStatus, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// RevokeUserDelegationKeys revoke user delegation keys. -// Parameters: -// resourceGroupName - the name of the resource group within the user's subscription. The name is case -// insensitive. -// accountName - the name of the storage account within the specified resource group. Storage account names -// must be between 3 and 24 characters in length and use numbers and lower-case letters only. -func (client AccountsClient) RevokeUserDelegationKeys(ctx context.Context, resourceGroupName string, accountName string) (result autorest.Response, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.RevokeUserDelegationKeys") - defer func() { - sc := -1 - if result.Response != nil { - sc = result.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, - {TargetValue: accountName, - Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("storage.AccountsClient", "RevokeUserDelegationKeys", err.Error()) - } - - req, err := client.RevokeUserDelegationKeysPreparer(ctx, resourceGroupName, accountName) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.AccountsClient", "RevokeUserDelegationKeys", nil, "Failure preparing request") - return - } - - resp, err := client.RevokeUserDelegationKeysSender(req) - if err != nil { - result.Response = resp - err = autorest.NewErrorWithError(err, "storage.AccountsClient", "RevokeUserDelegationKeys", resp, "Failure sending request") - return - } - - result, err = client.RevokeUserDelegationKeysResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.AccountsClient", "RevokeUserDelegationKeys", resp, "Failure responding to request") - return - } - - return -} - -// RevokeUserDelegationKeysPreparer prepares the RevokeUserDelegationKeys request. -func (client AccountsClient) RevokeUserDelegationKeysPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2021-02-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsPost(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/revokeUserDelegationKeys", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// RevokeUserDelegationKeysSender sends the RevokeUserDelegationKeys request. The method will close the -// http.Response Body if it receives an error. -func (client AccountsClient) RevokeUserDelegationKeysSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// RevokeUserDelegationKeysResponder handles the response to the RevokeUserDelegationKeys request. The method always -// closes the http.Response Body. -func (client AccountsClient) RevokeUserDelegationKeysResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByClosing()) - result.Response = resp - return -} - -// Update the update operation can be used to update the SKU, encryption, access tier, or tags for a storage account. -// It can also be used to map the account to a custom domain. Only one custom domain is supported per storage account; -// the replacement/change of custom domain is not supported. In order to replace an old custom domain, the old value -// must be cleared/unregistered before a new value can be set. The update of multiple properties is supported. This -// call does not change the storage keys for the account. If you want to change the storage account keys, use the -// regenerate keys operation. The location and name of the storage account cannot be changed after creation. -// Parameters: -// resourceGroupName - the name of the resource group within the user's subscription. The name is case -// insensitive. -// accountName - the name of the storage account within the specified resource group. Storage account names -// must be between 3 and 24 characters in length and use numbers and lower-case letters only. -// parameters - the parameters to provide for the updated account. -func (client AccountsClient) Update(ctx context.Context, resourceGroupName string, accountName string, parameters AccountUpdateParameters) (result Account, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.Update") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, - {TargetValue: accountName, - Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("storage.AccountsClient", "Update", err.Error()) - } - - req, err := client.UpdatePreparer(ctx, resourceGroupName, accountName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Update", nil, "Failure preparing request") - return - } - - resp, err := client.UpdateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Update", resp, "Failure sending request") - return - } - - result, err = client.UpdateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Update", resp, "Failure responding to request") - return - } - - return -} - -// UpdatePreparer prepares the Update request. -func (client AccountsClient) UpdatePreparer(ctx context.Context, resourceGroupName string, accountName string, parameters AccountUpdateParameters) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2021-02-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPatch(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// UpdateSender sends the Update request. The method will close the -// http.Response Body if it receives an error. -func (client AccountsClient) UpdateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// UpdateResponder handles the response to the Update request. The method always -// closes the http.Response Body. -func (client AccountsClient) UpdateResponder(resp *http.Response) (result Account, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/blobcontainers.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/blobcontainers.go deleted file mode 100644 index 000b65a67996..000000000000 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/blobcontainers.go +++ /dev/null @@ -1,1437 +0,0 @@ -package storage - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/validation" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// BlobContainersClient is the the Azure Storage Management API. -type BlobContainersClient struct { - BaseClient -} - -// NewBlobContainersClient creates an instance of the BlobContainersClient client. -func NewBlobContainersClient(subscriptionID string) BlobContainersClient { - return NewBlobContainersClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewBlobContainersClientWithBaseURI creates an instance of the BlobContainersClient client using a custom endpoint. -// Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). -func NewBlobContainersClientWithBaseURI(baseURI string, subscriptionID string) BlobContainersClient { - return BlobContainersClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// ClearLegalHold clears legal hold tags. Clearing the same or non-existent tag results in an idempotent operation. -// ClearLegalHold clears out only the specified tags in the request. -// Parameters: -// resourceGroupName - the name of the resource group within the user's subscription. The name is case -// insensitive. -// accountName - the name of the storage account within the specified resource group. Storage account names -// must be between 3 and 24 characters in length and use numbers and lower-case letters only. -// containerName - the name of the blob container within the specified storage account. Blob container names -// must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every -// dash (-) character must be immediately preceded and followed by a letter or number. -// legalHold - the LegalHold property that will be clear from a blob container. -func (client BlobContainersClient) ClearLegalHold(ctx context.Context, resourceGroupName string, accountName string, containerName string, legalHold LegalHold) (result LegalHold, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BlobContainersClient.ClearLegalHold") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, - {TargetValue: accountName, - Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: containerName, - Constraints: []validation.Constraint{{Target: "containerName", Name: validation.MaxLength, Rule: 63, Chain: nil}, - {Target: "containerName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: legalHold, - Constraints: []validation.Constraint{{Target: "legalHold.Tags", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewError("storage.BlobContainersClient", "ClearLegalHold", err.Error()) - } - - req, err := client.ClearLegalHoldPreparer(ctx, resourceGroupName, accountName, containerName, legalHold) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "ClearLegalHold", nil, "Failure preparing request") - return - } - - resp, err := client.ClearLegalHoldSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "ClearLegalHold", resp, "Failure sending request") - return - } - - result, err = client.ClearLegalHoldResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "ClearLegalHold", resp, "Failure responding to request") - return - } - - return -} - -// ClearLegalHoldPreparer prepares the ClearLegalHold request. -func (client BlobContainersClient) ClearLegalHoldPreparer(ctx context.Context, resourceGroupName string, accountName string, containerName string, legalHold LegalHold) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "containerName": autorest.Encode("path", containerName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2021-02-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - legalHold.HasLegalHold = nil - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPost(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/clearLegalHold", pathParameters), - autorest.WithJSON(legalHold), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ClearLegalHoldSender sends the ClearLegalHold request. The method will close the -// http.Response Body if it receives an error. -func (client BlobContainersClient) ClearLegalHoldSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ClearLegalHoldResponder handles the response to the ClearLegalHold request. The method always -// closes the http.Response Body. -func (client BlobContainersClient) ClearLegalHoldResponder(resp *http.Response) (result LegalHold, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Create creates a new container under the specified account as described by request body. The container resource -// includes metadata and properties for that container. It does not include a list of the blobs contained by the -// container. -// Parameters: -// resourceGroupName - the name of the resource group within the user's subscription. The name is case -// insensitive. -// accountName - the name of the storage account within the specified resource group. Storage account names -// must be between 3 and 24 characters in length and use numbers and lower-case letters only. -// containerName - the name of the blob container within the specified storage account. Blob container names -// must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every -// dash (-) character must be immediately preceded and followed by a letter or number. -// blobContainer - properties of the blob container to create. -func (client BlobContainersClient) Create(ctx context.Context, resourceGroupName string, accountName string, containerName string, blobContainer BlobContainer) (result BlobContainer, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BlobContainersClient.Create") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, - {TargetValue: accountName, - Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: containerName, - Constraints: []validation.Constraint{{Target: "containerName", Name: validation.MaxLength, Rule: 63, Chain: nil}, - {Target: "containerName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("storage.BlobContainersClient", "Create", err.Error()) - } - - req, err := client.CreatePreparer(ctx, resourceGroupName, accountName, containerName, blobContainer) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Create", nil, "Failure preparing request") - return - } - - resp, err := client.CreateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Create", resp, "Failure sending request") - return - } - - result, err = client.CreateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Create", resp, "Failure responding to request") - return - } - - return -} - -// CreatePreparer prepares the Create request. -func (client BlobContainersClient) CreatePreparer(ctx context.Context, resourceGroupName string, accountName string, containerName string, blobContainer BlobContainer) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "containerName": autorest.Encode("path", containerName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2021-02-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPut(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}", pathParameters), - autorest.WithJSON(blobContainer), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// CreateSender sends the Create request. The method will close the -// http.Response Body if it receives an error. -func (client BlobContainersClient) CreateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// CreateResponder handles the response to the Create request. The method always -// closes the http.Response Body. -func (client BlobContainersClient) CreateResponder(resp *http.Response) (result BlobContainer, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// CreateOrUpdateImmutabilityPolicy creates or updates an unlocked immutability policy. ETag in If-Match is honored if -// given but not required for this operation. -// Parameters: -// resourceGroupName - the name of the resource group within the user's subscription. The name is case -// insensitive. -// accountName - the name of the storage account within the specified resource group. Storage account names -// must be between 3 and 24 characters in length and use numbers and lower-case letters only. -// containerName - the name of the blob container within the specified storage account. Blob container names -// must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every -// dash (-) character must be immediately preceded and followed by a letter or number. -// parameters - the ImmutabilityPolicy Properties that will be created or updated to a blob container. -// ifMatch - the entity state (ETag) version of the immutability policy to update. A value of "*" can be used -// to apply the operation only if the immutability policy already exists. If omitted, this operation will -// always be applied. -func (client BlobContainersClient) CreateOrUpdateImmutabilityPolicy(ctx context.Context, resourceGroupName string, accountName string, containerName string, parameters *ImmutabilityPolicy, ifMatch string) (result ImmutabilityPolicy, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BlobContainersClient.CreateOrUpdateImmutabilityPolicy") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, - {TargetValue: accountName, - Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: containerName, - Constraints: []validation.Constraint{{Target: "containerName", Name: validation.MaxLength, Rule: 63, Chain: nil}, - {Target: "containerName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.ImmutabilityPolicyProperty", Name: validation.Null, Rule: true, Chain: nil}}}}}}); err != nil { - return result, validation.NewError("storage.BlobContainersClient", "CreateOrUpdateImmutabilityPolicy", err.Error()) - } - - req, err := client.CreateOrUpdateImmutabilityPolicyPreparer(ctx, resourceGroupName, accountName, containerName, parameters, ifMatch) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "CreateOrUpdateImmutabilityPolicy", nil, "Failure preparing request") - return - } - - resp, err := client.CreateOrUpdateImmutabilityPolicySender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "CreateOrUpdateImmutabilityPolicy", resp, "Failure sending request") - return - } - - result, err = client.CreateOrUpdateImmutabilityPolicyResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "CreateOrUpdateImmutabilityPolicy", resp, "Failure responding to request") - return - } - - return -} - -// CreateOrUpdateImmutabilityPolicyPreparer prepares the CreateOrUpdateImmutabilityPolicy request. -func (client BlobContainersClient) CreateOrUpdateImmutabilityPolicyPreparer(ctx context.Context, resourceGroupName string, accountName string, containerName string, parameters *ImmutabilityPolicy, ifMatch string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "containerName": autorest.Encode("path", containerName), - "immutabilityPolicyName": autorest.Encode("path", "default"), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2021-02-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPut(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/immutabilityPolicies/{immutabilityPolicyName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - if parameters != nil { - preparer = autorest.DecoratePreparer(preparer, - autorest.WithJSON(parameters)) - } - if len(ifMatch) > 0 { - preparer = autorest.DecoratePreparer(preparer, - autorest.WithHeader("If-Match", autorest.String(ifMatch))) - } - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// CreateOrUpdateImmutabilityPolicySender sends the CreateOrUpdateImmutabilityPolicy request. The method will close the -// http.Response Body if it receives an error. -func (client BlobContainersClient) CreateOrUpdateImmutabilityPolicySender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// CreateOrUpdateImmutabilityPolicyResponder handles the response to the CreateOrUpdateImmutabilityPolicy request. The method always -// closes the http.Response Body. -func (client BlobContainersClient) CreateOrUpdateImmutabilityPolicyResponder(resp *http.Response) (result ImmutabilityPolicy, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Delete deletes specified container under its account. -// Parameters: -// resourceGroupName - the name of the resource group within the user's subscription. The name is case -// insensitive. -// accountName - the name of the storage account within the specified resource group. Storage account names -// must be between 3 and 24 characters in length and use numbers and lower-case letters only. -// containerName - the name of the blob container within the specified storage account. Blob container names -// must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every -// dash (-) character must be immediately preceded and followed by a letter or number. -func (client BlobContainersClient) Delete(ctx context.Context, resourceGroupName string, accountName string, containerName string) (result autorest.Response, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BlobContainersClient.Delete") - defer func() { - sc := -1 - if result.Response != nil { - sc = result.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, - {TargetValue: accountName, - Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: containerName, - Constraints: []validation.Constraint{{Target: "containerName", Name: validation.MaxLength, Rule: 63, Chain: nil}, - {Target: "containerName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("storage.BlobContainersClient", "Delete", err.Error()) - } - - req, err := client.DeletePreparer(ctx, resourceGroupName, accountName, containerName) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Delete", nil, "Failure preparing request") - return - } - - resp, err := client.DeleteSender(req) - if err != nil { - result.Response = resp - err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Delete", resp, "Failure sending request") - return - } - - result, err = client.DeleteResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Delete", resp, "Failure responding to request") - return - } - - return -} - -// DeletePreparer prepares the Delete request. -func (client BlobContainersClient) DeletePreparer(ctx context.Context, resourceGroupName string, accountName string, containerName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "containerName": autorest.Encode("path", containerName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2021-02-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DeleteSender sends the Delete request. The method will close the -// http.Response Body if it receives an error. -func (client BlobContainersClient) DeleteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// DeleteResponder handles the response to the Delete request. The method always -// closes the http.Response Body. -func (client BlobContainersClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), - autorest.ByClosing()) - result.Response = resp - return -} - -// DeleteImmutabilityPolicy aborts an unlocked immutability policy. The response of delete has -// immutabilityPeriodSinceCreationInDays set to 0. ETag in If-Match is required for this operation. Deleting a locked -// immutability policy is not allowed, the only way is to delete the container after deleting all expired blobs inside -// the policy locked container. -// Parameters: -// resourceGroupName - the name of the resource group within the user's subscription. The name is case -// insensitive. -// accountName - the name of the storage account within the specified resource group. Storage account names -// must be between 3 and 24 characters in length and use numbers and lower-case letters only. -// containerName - the name of the blob container within the specified storage account. Blob container names -// must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every -// dash (-) character must be immediately preceded and followed by a letter or number. -// ifMatch - the entity state (ETag) version of the immutability policy to update. A value of "*" can be used -// to apply the operation only if the immutability policy already exists. If omitted, this operation will -// always be applied. -func (client BlobContainersClient) DeleteImmutabilityPolicy(ctx context.Context, resourceGroupName string, accountName string, containerName string, ifMatch string) (result ImmutabilityPolicy, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BlobContainersClient.DeleteImmutabilityPolicy") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, - {TargetValue: accountName, - Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: containerName, - Constraints: []validation.Constraint{{Target: "containerName", Name: validation.MaxLength, Rule: 63, Chain: nil}, - {Target: "containerName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("storage.BlobContainersClient", "DeleteImmutabilityPolicy", err.Error()) - } - - req, err := client.DeleteImmutabilityPolicyPreparer(ctx, resourceGroupName, accountName, containerName, ifMatch) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "DeleteImmutabilityPolicy", nil, "Failure preparing request") - return - } - - resp, err := client.DeleteImmutabilityPolicySender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "DeleteImmutabilityPolicy", resp, "Failure sending request") - return - } - - result, err = client.DeleteImmutabilityPolicyResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "DeleteImmutabilityPolicy", resp, "Failure responding to request") - return - } - - return -} - -// DeleteImmutabilityPolicyPreparer prepares the DeleteImmutabilityPolicy request. -func (client BlobContainersClient) DeleteImmutabilityPolicyPreparer(ctx context.Context, resourceGroupName string, accountName string, containerName string, ifMatch string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "containerName": autorest.Encode("path", containerName), - "immutabilityPolicyName": autorest.Encode("path", "default"), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2021-02-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/immutabilityPolicies/{immutabilityPolicyName}", pathParameters), - autorest.WithQueryParameters(queryParameters), - autorest.WithHeader("If-Match", autorest.String(ifMatch))) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DeleteImmutabilityPolicySender sends the DeleteImmutabilityPolicy request. The method will close the -// http.Response Body if it receives an error. -func (client BlobContainersClient) DeleteImmutabilityPolicySender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// DeleteImmutabilityPolicyResponder handles the response to the DeleteImmutabilityPolicy request. The method always -// closes the http.Response Body. -func (client BlobContainersClient) DeleteImmutabilityPolicyResponder(resp *http.Response) (result ImmutabilityPolicy, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// ExtendImmutabilityPolicy extends the immutabilityPeriodSinceCreationInDays of a locked immutabilityPolicy. The only -// action allowed on a Locked policy will be this action. ETag in If-Match is required for this operation. -// Parameters: -// resourceGroupName - the name of the resource group within the user's subscription. The name is case -// insensitive. -// accountName - the name of the storage account within the specified resource group. Storage account names -// must be between 3 and 24 characters in length and use numbers and lower-case letters only. -// containerName - the name of the blob container within the specified storage account. Blob container names -// must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every -// dash (-) character must be immediately preceded and followed by a letter or number. -// ifMatch - the entity state (ETag) version of the immutability policy to update. A value of "*" can be used -// to apply the operation only if the immutability policy already exists. If omitted, this operation will -// always be applied. -// parameters - the ImmutabilityPolicy Properties that will be extended for a blob container. -func (client BlobContainersClient) ExtendImmutabilityPolicy(ctx context.Context, resourceGroupName string, accountName string, containerName string, ifMatch string, parameters *ImmutabilityPolicy) (result ImmutabilityPolicy, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BlobContainersClient.ExtendImmutabilityPolicy") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, - {TargetValue: accountName, - Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: containerName, - Constraints: []validation.Constraint{{Target: "containerName", Name: validation.MaxLength, Rule: 63, Chain: nil}, - {Target: "containerName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.ImmutabilityPolicyProperty", Name: validation.Null, Rule: true, Chain: nil}}}}}}); err != nil { - return result, validation.NewError("storage.BlobContainersClient", "ExtendImmutabilityPolicy", err.Error()) - } - - req, err := client.ExtendImmutabilityPolicyPreparer(ctx, resourceGroupName, accountName, containerName, ifMatch, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "ExtendImmutabilityPolicy", nil, "Failure preparing request") - return - } - - resp, err := client.ExtendImmutabilityPolicySender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "ExtendImmutabilityPolicy", resp, "Failure sending request") - return - } - - result, err = client.ExtendImmutabilityPolicyResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "ExtendImmutabilityPolicy", resp, "Failure responding to request") - return - } - - return -} - -// ExtendImmutabilityPolicyPreparer prepares the ExtendImmutabilityPolicy request. -func (client BlobContainersClient) ExtendImmutabilityPolicyPreparer(ctx context.Context, resourceGroupName string, accountName string, containerName string, ifMatch string, parameters *ImmutabilityPolicy) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "containerName": autorest.Encode("path", containerName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2021-02-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPost(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/immutabilityPolicies/default/extend", pathParameters), - autorest.WithQueryParameters(queryParameters), - autorest.WithHeader("If-Match", autorest.String(ifMatch))) - if parameters != nil { - preparer = autorest.DecoratePreparer(preparer, - autorest.WithJSON(parameters)) - } - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ExtendImmutabilityPolicySender sends the ExtendImmutabilityPolicy request. The method will close the -// http.Response Body if it receives an error. -func (client BlobContainersClient) ExtendImmutabilityPolicySender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ExtendImmutabilityPolicyResponder handles the response to the ExtendImmutabilityPolicy request. The method always -// closes the http.Response Body. -func (client BlobContainersClient) ExtendImmutabilityPolicyResponder(resp *http.Response) (result ImmutabilityPolicy, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Get gets properties of a specified container. -// Parameters: -// resourceGroupName - the name of the resource group within the user's subscription. The name is case -// insensitive. -// accountName - the name of the storage account within the specified resource group. Storage account names -// must be between 3 and 24 characters in length and use numbers and lower-case letters only. -// containerName - the name of the blob container within the specified storage account. Blob container names -// must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every -// dash (-) character must be immediately preceded and followed by a letter or number. -func (client BlobContainersClient) Get(ctx context.Context, resourceGroupName string, accountName string, containerName string) (result BlobContainer, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BlobContainersClient.Get") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, - {TargetValue: accountName, - Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: containerName, - Constraints: []validation.Constraint{{Target: "containerName", Name: validation.MaxLength, Rule: 63, Chain: nil}, - {Target: "containerName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("storage.BlobContainersClient", "Get", err.Error()) - } - - req, err := client.GetPreparer(ctx, resourceGroupName, accountName, containerName) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Get", nil, "Failure preparing request") - return - } - - resp, err := client.GetSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Get", resp, "Failure sending request") - return - } - - result, err = client.GetResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Get", resp, "Failure responding to request") - return - } - - return -} - -// GetPreparer prepares the Get request. -func (client BlobContainersClient) GetPreparer(ctx context.Context, resourceGroupName string, accountName string, containerName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "containerName": autorest.Encode("path", containerName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2021-02-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetSender sends the Get request. The method will close the -// http.Response Body if it receives an error. -func (client BlobContainersClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// GetResponder handles the response to the Get request. The method always -// closes the http.Response Body. -func (client BlobContainersClient) GetResponder(resp *http.Response) (result BlobContainer, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// GetImmutabilityPolicy gets the existing immutability policy along with the corresponding ETag in response headers -// and body. -// Parameters: -// resourceGroupName - the name of the resource group within the user's subscription. The name is case -// insensitive. -// accountName - the name of the storage account within the specified resource group. Storage account names -// must be between 3 and 24 characters in length and use numbers and lower-case letters only. -// containerName - the name of the blob container within the specified storage account. Blob container names -// must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every -// dash (-) character must be immediately preceded and followed by a letter or number. -// ifMatch - the entity state (ETag) version of the immutability policy to update. A value of "*" can be used -// to apply the operation only if the immutability policy already exists. If omitted, this operation will -// always be applied. -func (client BlobContainersClient) GetImmutabilityPolicy(ctx context.Context, resourceGroupName string, accountName string, containerName string, ifMatch string) (result ImmutabilityPolicy, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BlobContainersClient.GetImmutabilityPolicy") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, - {TargetValue: accountName, - Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: containerName, - Constraints: []validation.Constraint{{Target: "containerName", Name: validation.MaxLength, Rule: 63, Chain: nil}, - {Target: "containerName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("storage.BlobContainersClient", "GetImmutabilityPolicy", err.Error()) - } - - req, err := client.GetImmutabilityPolicyPreparer(ctx, resourceGroupName, accountName, containerName, ifMatch) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "GetImmutabilityPolicy", nil, "Failure preparing request") - return - } - - resp, err := client.GetImmutabilityPolicySender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "GetImmutabilityPolicy", resp, "Failure sending request") - return - } - - result, err = client.GetImmutabilityPolicyResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "GetImmutabilityPolicy", resp, "Failure responding to request") - return - } - - return -} - -// GetImmutabilityPolicyPreparer prepares the GetImmutabilityPolicy request. -func (client BlobContainersClient) GetImmutabilityPolicyPreparer(ctx context.Context, resourceGroupName string, accountName string, containerName string, ifMatch string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "containerName": autorest.Encode("path", containerName), - "immutabilityPolicyName": autorest.Encode("path", "default"), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2021-02-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/immutabilityPolicies/{immutabilityPolicyName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - if len(ifMatch) > 0 { - preparer = autorest.DecoratePreparer(preparer, - autorest.WithHeader("If-Match", autorest.String(ifMatch))) - } - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetImmutabilityPolicySender sends the GetImmutabilityPolicy request. The method will close the -// http.Response Body if it receives an error. -func (client BlobContainersClient) GetImmutabilityPolicySender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// GetImmutabilityPolicyResponder handles the response to the GetImmutabilityPolicy request. The method always -// closes the http.Response Body. -func (client BlobContainersClient) GetImmutabilityPolicyResponder(resp *http.Response) (result ImmutabilityPolicy, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Lease the Lease Container operation establishes and manages a lock on a container for delete operations. The lock -// duration can be 15 to 60 seconds, or can be infinite. -// Parameters: -// resourceGroupName - the name of the resource group within the user's subscription. The name is case -// insensitive. -// accountName - the name of the storage account within the specified resource group. Storage account names -// must be between 3 and 24 characters in length and use numbers and lower-case letters only. -// containerName - the name of the blob container within the specified storage account. Blob container names -// must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every -// dash (-) character must be immediately preceded and followed by a letter or number. -// parameters - lease Container request body. -func (client BlobContainersClient) Lease(ctx context.Context, resourceGroupName string, accountName string, containerName string, parameters *LeaseContainerRequest) (result LeaseContainerResponse, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BlobContainersClient.Lease") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, - {TargetValue: accountName, - Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: containerName, - Constraints: []validation.Constraint{{Target: "containerName", Name: validation.MaxLength, Rule: 63, Chain: nil}, - {Target: "containerName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("storage.BlobContainersClient", "Lease", err.Error()) - } - - req, err := client.LeasePreparer(ctx, resourceGroupName, accountName, containerName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Lease", nil, "Failure preparing request") - return - } - - resp, err := client.LeaseSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Lease", resp, "Failure sending request") - return - } - - result, err = client.LeaseResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Lease", resp, "Failure responding to request") - return - } - - return -} - -// LeasePreparer prepares the Lease request. -func (client BlobContainersClient) LeasePreparer(ctx context.Context, resourceGroupName string, accountName string, containerName string, parameters *LeaseContainerRequest) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "containerName": autorest.Encode("path", containerName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2021-02-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPost(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/lease", pathParameters), - autorest.WithQueryParameters(queryParameters)) - if parameters != nil { - preparer = autorest.DecoratePreparer(preparer, - autorest.WithJSON(parameters)) - } - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// LeaseSender sends the Lease request. The method will close the -// http.Response Body if it receives an error. -func (client BlobContainersClient) LeaseSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// LeaseResponder handles the response to the Lease request. The method always -// closes the http.Response Body. -func (client BlobContainersClient) LeaseResponder(resp *http.Response) (result LeaseContainerResponse, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// List lists all containers and does not support a prefix like data plane. Also SRP today does not return continuation -// token. -// Parameters: -// resourceGroupName - the name of the resource group within the user's subscription. The name is case -// insensitive. -// accountName - the name of the storage account within the specified resource group. Storage account names -// must be between 3 and 24 characters in length and use numbers and lower-case letters only. -// maxpagesize - optional. Specified maximum number of containers that can be included in the list. -// filter - optional. When specified, only container names starting with the filter will be listed. -// include - optional, used to include the properties for soft deleted blob containers. -func (client BlobContainersClient) List(ctx context.Context, resourceGroupName string, accountName string, maxpagesize string, filter string, include ListContainersInclude) (result ListContainerItemsPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BlobContainersClient.List") - defer func() { - sc := -1 - if result.lci.Response.Response != nil { - sc = result.lci.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, - {TargetValue: accountName, - Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("storage.BlobContainersClient", "List", err.Error()) - } - - result.fn = client.listNextResults - req, err := client.ListPreparer(ctx, resourceGroupName, accountName, maxpagesize, filter, include) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "List", nil, "Failure preparing request") - return - } - - resp, err := client.ListSender(req) - if err != nil { - result.lci.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "List", resp, "Failure sending request") - return - } - - result.lci, err = client.ListResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "List", resp, "Failure responding to request") - return - } - if result.lci.hasNextLink() && result.lci.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// ListPreparer prepares the List request. -func (client BlobContainersClient) ListPreparer(ctx context.Context, resourceGroupName string, accountName string, maxpagesize string, filter string, include ListContainersInclude) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2021-02-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if len(maxpagesize) > 0 { - queryParameters["$maxpagesize"] = autorest.Encode("query", maxpagesize) - } - if len(filter) > 0 { - queryParameters["$filter"] = autorest.Encode("query", filter) - } - if len(string(include)) > 0 { - queryParameters["$include"] = autorest.Encode("query", include) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListSender sends the List request. The method will close the -// http.Response Body if it receives an error. -func (client BlobContainersClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListResponder handles the response to the List request. The method always -// closes the http.Response Body. -func (client BlobContainersClient) ListResponder(resp *http.Response) (result ListContainerItems, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// listNextResults retrieves the next set of results, if any. -func (client BlobContainersClient) listNextResults(ctx context.Context, lastResults ListContainerItems) (result ListContainerItems, err error) { - req, err := lastResults.listContainerItemsPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "storage.BlobContainersClient", "listNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "storage.BlobContainersClient", "listNextResults", resp, "Failure sending next results request") - } - result, err = client.ListResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "listNextResults", resp, "Failure responding to next results request") - } - return -} - -// ListComplete enumerates all values, automatically crossing page boundaries as required. -func (client BlobContainersClient) ListComplete(ctx context.Context, resourceGroupName string, accountName string, maxpagesize string, filter string, include ListContainersInclude) (result ListContainerItemsIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BlobContainersClient.List") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.List(ctx, resourceGroupName, accountName, maxpagesize, filter, include) - return -} - -// LockImmutabilityPolicy sets the ImmutabilityPolicy to Locked state. The only action allowed on a Locked policy is -// ExtendImmutabilityPolicy action. ETag in If-Match is required for this operation. -// Parameters: -// resourceGroupName - the name of the resource group within the user's subscription. The name is case -// insensitive. -// accountName - the name of the storage account within the specified resource group. Storage account names -// must be between 3 and 24 characters in length and use numbers and lower-case letters only. -// containerName - the name of the blob container within the specified storage account. Blob container names -// must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every -// dash (-) character must be immediately preceded and followed by a letter or number. -// ifMatch - the entity state (ETag) version of the immutability policy to update. A value of "*" can be used -// to apply the operation only if the immutability policy already exists. If omitted, this operation will -// always be applied. -func (client BlobContainersClient) LockImmutabilityPolicy(ctx context.Context, resourceGroupName string, accountName string, containerName string, ifMatch string) (result ImmutabilityPolicy, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BlobContainersClient.LockImmutabilityPolicy") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, - {TargetValue: accountName, - Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: containerName, - Constraints: []validation.Constraint{{Target: "containerName", Name: validation.MaxLength, Rule: 63, Chain: nil}, - {Target: "containerName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("storage.BlobContainersClient", "LockImmutabilityPolicy", err.Error()) - } - - req, err := client.LockImmutabilityPolicyPreparer(ctx, resourceGroupName, accountName, containerName, ifMatch) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "LockImmutabilityPolicy", nil, "Failure preparing request") - return - } - - resp, err := client.LockImmutabilityPolicySender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "LockImmutabilityPolicy", resp, "Failure sending request") - return - } - - result, err = client.LockImmutabilityPolicyResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "LockImmutabilityPolicy", resp, "Failure responding to request") - return - } - - return -} - -// LockImmutabilityPolicyPreparer prepares the LockImmutabilityPolicy request. -func (client BlobContainersClient) LockImmutabilityPolicyPreparer(ctx context.Context, resourceGroupName string, accountName string, containerName string, ifMatch string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "containerName": autorest.Encode("path", containerName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2021-02-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsPost(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/immutabilityPolicies/default/lock", pathParameters), - autorest.WithQueryParameters(queryParameters), - autorest.WithHeader("If-Match", autorest.String(ifMatch))) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// LockImmutabilityPolicySender sends the LockImmutabilityPolicy request. The method will close the -// http.Response Body if it receives an error. -func (client BlobContainersClient) LockImmutabilityPolicySender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// LockImmutabilityPolicyResponder handles the response to the LockImmutabilityPolicy request. The method always -// closes the http.Response Body. -func (client BlobContainersClient) LockImmutabilityPolicyResponder(resp *http.Response) (result ImmutabilityPolicy, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// SetLegalHold sets legal hold tags. Setting the same tag results in an idempotent operation. SetLegalHold follows an -// append pattern and does not clear out the existing tags that are not specified in the request. -// Parameters: -// resourceGroupName - the name of the resource group within the user's subscription. The name is case -// insensitive. -// accountName - the name of the storage account within the specified resource group. Storage account names -// must be between 3 and 24 characters in length and use numbers and lower-case letters only. -// containerName - the name of the blob container within the specified storage account. Blob container names -// must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every -// dash (-) character must be immediately preceded and followed by a letter or number. -// legalHold - the LegalHold property that will be set to a blob container. -func (client BlobContainersClient) SetLegalHold(ctx context.Context, resourceGroupName string, accountName string, containerName string, legalHold LegalHold) (result LegalHold, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BlobContainersClient.SetLegalHold") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, - {TargetValue: accountName, - Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: containerName, - Constraints: []validation.Constraint{{Target: "containerName", Name: validation.MaxLength, Rule: 63, Chain: nil}, - {Target: "containerName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: legalHold, - Constraints: []validation.Constraint{{Target: "legalHold.Tags", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewError("storage.BlobContainersClient", "SetLegalHold", err.Error()) - } - - req, err := client.SetLegalHoldPreparer(ctx, resourceGroupName, accountName, containerName, legalHold) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "SetLegalHold", nil, "Failure preparing request") - return - } - - resp, err := client.SetLegalHoldSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "SetLegalHold", resp, "Failure sending request") - return - } - - result, err = client.SetLegalHoldResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "SetLegalHold", resp, "Failure responding to request") - return - } - - return -} - -// SetLegalHoldPreparer prepares the SetLegalHold request. -func (client BlobContainersClient) SetLegalHoldPreparer(ctx context.Context, resourceGroupName string, accountName string, containerName string, legalHold LegalHold) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "containerName": autorest.Encode("path", containerName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2021-02-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - legalHold.HasLegalHold = nil - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPost(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/setLegalHold", pathParameters), - autorest.WithJSON(legalHold), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// SetLegalHoldSender sends the SetLegalHold request. The method will close the -// http.Response Body if it receives an error. -func (client BlobContainersClient) SetLegalHoldSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// SetLegalHoldResponder handles the response to the SetLegalHold request. The method always -// closes the http.Response Body. -func (client BlobContainersClient) SetLegalHoldResponder(resp *http.Response) (result LegalHold, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Update updates container properties as specified in request body. Properties not mentioned in the request will be -// unchanged. Update fails if the specified container doesn't already exist. -// Parameters: -// resourceGroupName - the name of the resource group within the user's subscription. The name is case -// insensitive. -// accountName - the name of the storage account within the specified resource group. Storage account names -// must be between 3 and 24 characters in length and use numbers and lower-case letters only. -// containerName - the name of the blob container within the specified storage account. Blob container names -// must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every -// dash (-) character must be immediately preceded and followed by a letter or number. -// blobContainer - properties to update for the blob container. -func (client BlobContainersClient) Update(ctx context.Context, resourceGroupName string, accountName string, containerName string, blobContainer BlobContainer) (result BlobContainer, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BlobContainersClient.Update") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, - {TargetValue: accountName, - Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: containerName, - Constraints: []validation.Constraint{{Target: "containerName", Name: validation.MaxLength, Rule: 63, Chain: nil}, - {Target: "containerName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("storage.BlobContainersClient", "Update", err.Error()) - } - - req, err := client.UpdatePreparer(ctx, resourceGroupName, accountName, containerName, blobContainer) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Update", nil, "Failure preparing request") - return - } - - resp, err := client.UpdateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Update", resp, "Failure sending request") - return - } - - result, err = client.UpdateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Update", resp, "Failure responding to request") - return - } - - return -} - -// UpdatePreparer prepares the Update request. -func (client BlobContainersClient) UpdatePreparer(ctx context.Context, resourceGroupName string, accountName string, containerName string, blobContainer BlobContainer) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "containerName": autorest.Encode("path", containerName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2021-02-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPatch(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}", pathParameters), - autorest.WithJSON(blobContainer), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// UpdateSender sends the Update request. The method will close the -// http.Response Body if it receives an error. -func (client BlobContainersClient) UpdateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// UpdateResponder handles the response to the Update request. The method always -// closes the http.Response Body. -func (client BlobContainersClient) UpdateResponder(resp *http.Response) (result BlobContainer, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/blobinventorypolicies.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/blobinventorypolicies.go deleted file mode 100644 index 1b331244726d..000000000000 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/blobinventorypolicies.go +++ /dev/null @@ -1,411 +0,0 @@ -package storage - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/validation" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// BlobInventoryPoliciesClient is the the Azure Storage Management API. -type BlobInventoryPoliciesClient struct { - BaseClient -} - -// NewBlobInventoryPoliciesClient creates an instance of the BlobInventoryPoliciesClient client. -func NewBlobInventoryPoliciesClient(subscriptionID string) BlobInventoryPoliciesClient { - return NewBlobInventoryPoliciesClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewBlobInventoryPoliciesClientWithBaseURI creates an instance of the BlobInventoryPoliciesClient client using a -// custom endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, -// Azure stack). -func NewBlobInventoryPoliciesClientWithBaseURI(baseURI string, subscriptionID string) BlobInventoryPoliciesClient { - return BlobInventoryPoliciesClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// CreateOrUpdate sets the blob inventory policy to the specified storage account. -// Parameters: -// resourceGroupName - the name of the resource group within the user's subscription. The name is case -// insensitive. -// accountName - the name of the storage account within the specified resource group. Storage account names -// must be between 3 and 24 characters in length and use numbers and lower-case letters only. -// properties - the blob inventory policy set to a storage account. -func (client BlobInventoryPoliciesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, accountName string, properties BlobInventoryPolicy) (result BlobInventoryPolicy, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BlobInventoryPoliciesClient.CreateOrUpdate") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, - {TargetValue: accountName, - Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: properties, - Constraints: []validation.Constraint{{Target: "properties.BlobInventoryPolicyProperties", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "properties.BlobInventoryPolicyProperties.Policy", Name: validation.Null, Rule: true, - Chain: []validation.Constraint{{Target: "properties.BlobInventoryPolicyProperties.Policy.Enabled", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "properties.BlobInventoryPolicyProperties.Policy.Destination", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "properties.BlobInventoryPolicyProperties.Policy.Type", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "properties.BlobInventoryPolicyProperties.Policy.Rules", Name: validation.Null, Rule: true, Chain: nil}, - }}, - }}}}}); err != nil { - return result, validation.NewError("storage.BlobInventoryPoliciesClient", "CreateOrUpdate", err.Error()) - } - - req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, accountName, properties) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.BlobInventoryPoliciesClient", "CreateOrUpdate", nil, "Failure preparing request") - return - } - - resp, err := client.CreateOrUpdateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "storage.BlobInventoryPoliciesClient", "CreateOrUpdate", resp, "Failure sending request") - return - } - - result, err = client.CreateOrUpdateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.BlobInventoryPoliciesClient", "CreateOrUpdate", resp, "Failure responding to request") - return - } - - return -} - -// CreateOrUpdatePreparer prepares the CreateOrUpdate request. -func (client BlobInventoryPoliciesClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, accountName string, properties BlobInventoryPolicy) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "blobInventoryPolicyName": autorest.Encode("path", "default"), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2021-02-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPut(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/inventoryPolicies/{blobInventoryPolicyName}", pathParameters), - autorest.WithJSON(properties), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the -// http.Response Body if it receives an error. -func (client BlobInventoryPoliciesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always -// closes the http.Response Body. -func (client BlobInventoryPoliciesClient) CreateOrUpdateResponder(resp *http.Response) (result BlobInventoryPolicy, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Delete deletes the blob inventory policy associated with the specified storage account. -// Parameters: -// resourceGroupName - the name of the resource group within the user's subscription. The name is case -// insensitive. -// accountName - the name of the storage account within the specified resource group. Storage account names -// must be between 3 and 24 characters in length and use numbers and lower-case letters only. -func (client BlobInventoryPoliciesClient) Delete(ctx context.Context, resourceGroupName string, accountName string) (result autorest.Response, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BlobInventoryPoliciesClient.Delete") - defer func() { - sc := -1 - if result.Response != nil { - sc = result.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, - {TargetValue: accountName, - Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("storage.BlobInventoryPoliciesClient", "Delete", err.Error()) - } - - req, err := client.DeletePreparer(ctx, resourceGroupName, accountName) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.BlobInventoryPoliciesClient", "Delete", nil, "Failure preparing request") - return - } - - resp, err := client.DeleteSender(req) - if err != nil { - result.Response = resp - err = autorest.NewErrorWithError(err, "storage.BlobInventoryPoliciesClient", "Delete", resp, "Failure sending request") - return - } - - result, err = client.DeleteResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.BlobInventoryPoliciesClient", "Delete", resp, "Failure responding to request") - return - } - - return -} - -// DeletePreparer prepares the Delete request. -func (client BlobInventoryPoliciesClient) DeletePreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "blobInventoryPolicyName": autorest.Encode("path", "default"), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2021-02-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/inventoryPolicies/{blobInventoryPolicyName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DeleteSender sends the Delete request. The method will close the -// http.Response Body if it receives an error. -func (client BlobInventoryPoliciesClient) DeleteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// DeleteResponder handles the response to the Delete request. The method always -// closes the http.Response Body. -func (client BlobInventoryPoliciesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), - autorest.ByClosing()) - result.Response = resp - return -} - -// Get gets the blob inventory policy associated with the specified storage account. -// Parameters: -// resourceGroupName - the name of the resource group within the user's subscription. The name is case -// insensitive. -// accountName - the name of the storage account within the specified resource group. Storage account names -// must be between 3 and 24 characters in length and use numbers and lower-case letters only. -func (client BlobInventoryPoliciesClient) Get(ctx context.Context, resourceGroupName string, accountName string) (result BlobInventoryPolicy, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BlobInventoryPoliciesClient.Get") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, - {TargetValue: accountName, - Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("storage.BlobInventoryPoliciesClient", "Get", err.Error()) - } - - req, err := client.GetPreparer(ctx, resourceGroupName, accountName) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.BlobInventoryPoliciesClient", "Get", nil, "Failure preparing request") - return - } - - resp, err := client.GetSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "storage.BlobInventoryPoliciesClient", "Get", resp, "Failure sending request") - return - } - - result, err = client.GetResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.BlobInventoryPoliciesClient", "Get", resp, "Failure responding to request") - return - } - - return -} - -// GetPreparer prepares the Get request. -func (client BlobInventoryPoliciesClient) GetPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "blobInventoryPolicyName": autorest.Encode("path", "default"), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2021-02-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/inventoryPolicies/{blobInventoryPolicyName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetSender sends the Get request. The method will close the -// http.Response Body if it receives an error. -func (client BlobInventoryPoliciesClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// GetResponder handles the response to the Get request. The method always -// closes the http.Response Body. -func (client BlobInventoryPoliciesClient) GetResponder(resp *http.Response) (result BlobInventoryPolicy, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// List gets the blob inventory policy associated with the specified storage account. -// Parameters: -// resourceGroupName - the name of the resource group within the user's subscription. The name is case -// insensitive. -// accountName - the name of the storage account within the specified resource group. Storage account names -// must be between 3 and 24 characters in length and use numbers and lower-case letters only. -func (client BlobInventoryPoliciesClient) List(ctx context.Context, resourceGroupName string, accountName string) (result ListBlobInventoryPolicy, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BlobInventoryPoliciesClient.List") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, - {TargetValue: accountName, - Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("storage.BlobInventoryPoliciesClient", "List", err.Error()) - } - - req, err := client.ListPreparer(ctx, resourceGroupName, accountName) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.BlobInventoryPoliciesClient", "List", nil, "Failure preparing request") - return - } - - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "storage.BlobInventoryPoliciesClient", "List", resp, "Failure sending request") - return - } - - result, err = client.ListResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.BlobInventoryPoliciesClient", "List", resp, "Failure responding to request") - return - } - - return -} - -// ListPreparer prepares the List request. -func (client BlobInventoryPoliciesClient) ListPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2021-02-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/inventoryPolicies", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListSender sends the List request. The method will close the -// http.Response Body if it receives an error. -func (client BlobInventoryPoliciesClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListResponder handles the response to the List request. The method always -// closes the http.Response Body. -func (client BlobInventoryPoliciesClient) ListResponder(resp *http.Response) (result ListBlobInventoryPolicy, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/blobservices.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/blobservices.go deleted file mode 100644 index 3f3af97acc86..000000000000 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/blobservices.go +++ /dev/null @@ -1,344 +0,0 @@ -package storage - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/validation" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// BlobServicesClient is the the Azure Storage Management API. -type BlobServicesClient struct { - BaseClient -} - -// NewBlobServicesClient creates an instance of the BlobServicesClient client. -func NewBlobServicesClient(subscriptionID string) BlobServicesClient { - return NewBlobServicesClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewBlobServicesClientWithBaseURI creates an instance of the BlobServicesClient client using a custom endpoint. Use -// this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). -func NewBlobServicesClientWithBaseURI(baseURI string, subscriptionID string) BlobServicesClient { - return BlobServicesClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// GetServiceProperties gets the properties of a storage account’s Blob service, including properties for Storage -// Analytics and CORS (Cross-Origin Resource Sharing) rules. -// Parameters: -// resourceGroupName - the name of the resource group within the user's subscription. The name is case -// insensitive. -// accountName - the name of the storage account within the specified resource group. Storage account names -// must be between 3 and 24 characters in length and use numbers and lower-case letters only. -func (client BlobServicesClient) GetServiceProperties(ctx context.Context, resourceGroupName string, accountName string) (result BlobServiceProperties, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BlobServicesClient.GetServiceProperties") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, - {TargetValue: accountName, - Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("storage.BlobServicesClient", "GetServiceProperties", err.Error()) - } - - req, err := client.GetServicePropertiesPreparer(ctx, resourceGroupName, accountName) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.BlobServicesClient", "GetServiceProperties", nil, "Failure preparing request") - return - } - - resp, err := client.GetServicePropertiesSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "storage.BlobServicesClient", "GetServiceProperties", resp, "Failure sending request") - return - } - - result, err = client.GetServicePropertiesResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.BlobServicesClient", "GetServiceProperties", resp, "Failure responding to request") - return - } - - return -} - -// GetServicePropertiesPreparer prepares the GetServiceProperties request. -func (client BlobServicesClient) GetServicePropertiesPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "BlobServicesName": autorest.Encode("path", "default"), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2021-02-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/{BlobServicesName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetServicePropertiesSender sends the GetServiceProperties request. The method will close the -// http.Response Body if it receives an error. -func (client BlobServicesClient) GetServicePropertiesSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// GetServicePropertiesResponder handles the response to the GetServiceProperties request. The method always -// closes the http.Response Body. -func (client BlobServicesClient) GetServicePropertiesResponder(resp *http.Response) (result BlobServiceProperties, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// List list blob services of storage account. It returns a collection of one object named default. -// Parameters: -// resourceGroupName - the name of the resource group within the user's subscription. The name is case -// insensitive. -// accountName - the name of the storage account within the specified resource group. Storage account names -// must be between 3 and 24 characters in length and use numbers and lower-case letters only. -func (client BlobServicesClient) List(ctx context.Context, resourceGroupName string, accountName string) (result BlobServiceItems, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BlobServicesClient.List") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, - {TargetValue: accountName, - Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("storage.BlobServicesClient", "List", err.Error()) - } - - req, err := client.ListPreparer(ctx, resourceGroupName, accountName) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.BlobServicesClient", "List", nil, "Failure preparing request") - return - } - - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "storage.BlobServicesClient", "List", resp, "Failure sending request") - return - } - - result, err = client.ListResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.BlobServicesClient", "List", resp, "Failure responding to request") - return - } - - return -} - -// ListPreparer prepares the List request. -func (client BlobServicesClient) ListPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2021-02-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListSender sends the List request. The method will close the -// http.Response Body if it receives an error. -func (client BlobServicesClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListResponder handles the response to the List request. The method always -// closes the http.Response Body. -func (client BlobServicesClient) ListResponder(resp *http.Response) (result BlobServiceItems, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// SetServiceProperties sets the properties of a storage account’s Blob service, including properties for Storage -// Analytics and CORS (Cross-Origin Resource Sharing) rules. -// Parameters: -// resourceGroupName - the name of the resource group within the user's subscription. The name is case -// insensitive. -// accountName - the name of the storage account within the specified resource group. Storage account names -// must be between 3 and 24 characters in length and use numbers and lower-case letters only. -// parameters - the properties of a storage account’s Blob service, including properties for Storage Analytics -// and CORS (Cross-Origin Resource Sharing) rules. -func (client BlobServicesClient) SetServiceProperties(ctx context.Context, resourceGroupName string, accountName string, parameters BlobServiceProperties) (result BlobServiceProperties, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BlobServicesClient.SetServiceProperties") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, - {TargetValue: accountName, - Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.BlobServicePropertiesProperties", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.BlobServicePropertiesProperties.DeleteRetentionPolicy", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.BlobServicePropertiesProperties.DeleteRetentionPolicy.Days", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.BlobServicePropertiesProperties.DeleteRetentionPolicy.Days", Name: validation.InclusiveMaximum, Rule: int64(365), Chain: nil}, - {Target: "parameters.BlobServicePropertiesProperties.DeleteRetentionPolicy.Days", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, - }}, - }}, - {Target: "parameters.BlobServicePropertiesProperties.ChangeFeed", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.BlobServicePropertiesProperties.ChangeFeed.RetentionInDays", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.BlobServicePropertiesProperties.ChangeFeed.RetentionInDays", Name: validation.InclusiveMaximum, Rule: int64(146000), Chain: nil}, - {Target: "parameters.BlobServicePropertiesProperties.ChangeFeed.RetentionInDays", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, - }}, - }}, - {Target: "parameters.BlobServicePropertiesProperties.RestorePolicy", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.BlobServicePropertiesProperties.RestorePolicy.Enabled", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "parameters.BlobServicePropertiesProperties.RestorePolicy.Days", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.BlobServicePropertiesProperties.RestorePolicy.Days", Name: validation.InclusiveMaximum, Rule: int64(365), Chain: nil}, - {Target: "parameters.BlobServicePropertiesProperties.RestorePolicy.Days", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, - }}, - }}, - {Target: "parameters.BlobServicePropertiesProperties.ContainerDeleteRetentionPolicy", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.BlobServicePropertiesProperties.ContainerDeleteRetentionPolicy.Days", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.BlobServicePropertiesProperties.ContainerDeleteRetentionPolicy.Days", Name: validation.InclusiveMaximum, Rule: int64(365), Chain: nil}, - {Target: "parameters.BlobServicePropertiesProperties.ContainerDeleteRetentionPolicy.Days", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, - }}, - }}, - {Target: "parameters.BlobServicePropertiesProperties.LastAccessTimeTrackingPolicy", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.BlobServicePropertiesProperties.LastAccessTimeTrackingPolicy.Enable", Name: validation.Null, Rule: true, Chain: nil}}}, - }}}}}); err != nil { - return result, validation.NewError("storage.BlobServicesClient", "SetServiceProperties", err.Error()) - } - - req, err := client.SetServicePropertiesPreparer(ctx, resourceGroupName, accountName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.BlobServicesClient", "SetServiceProperties", nil, "Failure preparing request") - return - } - - resp, err := client.SetServicePropertiesSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "storage.BlobServicesClient", "SetServiceProperties", resp, "Failure sending request") - return - } - - result, err = client.SetServicePropertiesResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.BlobServicesClient", "SetServiceProperties", resp, "Failure responding to request") - return - } - - return -} - -// SetServicePropertiesPreparer prepares the SetServiceProperties request. -func (client BlobServicesClient) SetServicePropertiesPreparer(ctx context.Context, resourceGroupName string, accountName string, parameters BlobServiceProperties) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "BlobServicesName": autorest.Encode("path", "default"), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2021-02-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - parameters.Sku = nil - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPut(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/{BlobServicesName}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// SetServicePropertiesSender sends the SetServiceProperties request. The method will close the -// http.Response Body if it receives an error. -func (client BlobServicesClient) SetServicePropertiesSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// SetServicePropertiesResponder handles the response to the SetServiceProperties request. The method always -// closes the http.Response Body. -func (client BlobServicesClient) SetServicePropertiesResponder(resp *http.Response) (result BlobServiceProperties, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/client.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/client.go deleted file mode 100644 index 71aa87b9192e..000000000000 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/client.go +++ /dev/null @@ -1,43 +0,0 @@ -// Deprecated: Please note, this package has been deprecated. A replacement package is available [github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage). We strongly encourage you to upgrade to continue receiving updates. See [Migration Guide](https://aka.ms/azsdk/golang/t2/migration) for guidance on upgrading. Refer to our [deprecation policy](https://azure.github.io/azure-sdk/policies_support.html) for more details. -// -// Package storage implements the Azure ARM Storage service API version 2021-02-01. -// -// The Azure Storage Management API. -package storage - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "github.com/Azure/go-autorest/autorest" -) - -const ( - // DefaultBaseURI is the default URI used for the service Storage - DefaultBaseURI = "https://management.azure.com" -) - -// BaseClient is the base client for Storage. -type BaseClient struct { - autorest.Client - BaseURI string - SubscriptionID string -} - -// New creates an instance of the BaseClient client. -func New(subscriptionID string) BaseClient { - return NewWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewWithBaseURI creates an instance of the BaseClient client using a custom endpoint. Use this when interacting with -// an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). -func NewWithBaseURI(baseURI string, subscriptionID string) BaseClient { - return BaseClient{ - Client: autorest.NewClientWithUserAgent(UserAgent()), - BaseURI: baseURI, - SubscriptionID: subscriptionID, - } -} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/deletedaccounts.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/deletedaccounts.go deleted file mode 100644 index 109ae05dc96b..000000000000 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/deletedaccounts.go +++ /dev/null @@ -1,236 +0,0 @@ -package storage - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/validation" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// DeletedAccountsClient is the the Azure Storage Management API. -type DeletedAccountsClient struct { - BaseClient -} - -// NewDeletedAccountsClient creates an instance of the DeletedAccountsClient client. -func NewDeletedAccountsClient(subscriptionID string) DeletedAccountsClient { - return NewDeletedAccountsClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewDeletedAccountsClientWithBaseURI creates an instance of the DeletedAccountsClient client using a custom endpoint. -// Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). -func NewDeletedAccountsClientWithBaseURI(baseURI string, subscriptionID string) DeletedAccountsClient { - return DeletedAccountsClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// Get get properties of specified deleted account resource. -// Parameters: -// deletedAccountName - name of the deleted storage account. -// location - the location of the deleted storage account. -func (client DeletedAccountsClient) Get(ctx context.Context, deletedAccountName string, location string) (result DeletedAccount, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DeletedAccountsClient.Get") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: deletedAccountName, - Constraints: []validation.Constraint{{Target: "deletedAccountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "deletedAccountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("storage.DeletedAccountsClient", "Get", err.Error()) - } - - req, err := client.GetPreparer(ctx, deletedAccountName, location) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.DeletedAccountsClient", "Get", nil, "Failure preparing request") - return - } - - resp, err := client.GetSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "storage.DeletedAccountsClient", "Get", resp, "Failure sending request") - return - } - - result, err = client.GetResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.DeletedAccountsClient", "Get", resp, "Failure responding to request") - return - } - - return -} - -// GetPreparer prepares the Get request. -func (client DeletedAccountsClient) GetPreparer(ctx context.Context, deletedAccountName string, location string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "deletedAccountName": autorest.Encode("path", deletedAccountName), - "location": autorest.Encode("path", location), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2021-02-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Storage/locations/{location}/deletedAccounts/{deletedAccountName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetSender sends the Get request. The method will close the -// http.Response Body if it receives an error. -func (client DeletedAccountsClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// GetResponder handles the response to the Get request. The method always -// closes the http.Response Body. -func (client DeletedAccountsClient) GetResponder(resp *http.Response) (result DeletedAccount, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// List lists deleted accounts under the subscription. -func (client DeletedAccountsClient) List(ctx context.Context) (result DeletedAccountListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DeletedAccountsClient.List") - defer func() { - sc := -1 - if result.dalr.Response.Response != nil { - sc = result.dalr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("storage.DeletedAccountsClient", "List", err.Error()) - } - - result.fn = client.listNextResults - req, err := client.ListPreparer(ctx) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.DeletedAccountsClient", "List", nil, "Failure preparing request") - return - } - - resp, err := client.ListSender(req) - if err != nil { - result.dalr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "storage.DeletedAccountsClient", "List", resp, "Failure sending request") - return - } - - result.dalr, err = client.ListResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.DeletedAccountsClient", "List", resp, "Failure responding to request") - return - } - if result.dalr.hasNextLink() && result.dalr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// ListPreparer prepares the List request. -func (client DeletedAccountsClient) ListPreparer(ctx context.Context) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2021-02-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Storage/deletedAccounts", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListSender sends the List request. The method will close the -// http.Response Body if it receives an error. -func (client DeletedAccountsClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListResponder handles the response to the List request. The method always -// closes the http.Response Body. -func (client DeletedAccountsClient) ListResponder(resp *http.Response) (result DeletedAccountListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// listNextResults retrieves the next set of results, if any. -func (client DeletedAccountsClient) listNextResults(ctx context.Context, lastResults DeletedAccountListResult) (result DeletedAccountListResult, err error) { - req, err := lastResults.deletedAccountListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "storage.DeletedAccountsClient", "listNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "storage.DeletedAccountsClient", "listNextResults", resp, "Failure sending next results request") - } - result, err = client.ListResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.DeletedAccountsClient", "listNextResults", resp, "Failure responding to next results request") - } - return -} - -// ListComplete enumerates all values, automatically crossing page boundaries as required. -func (client DeletedAccountsClient) ListComplete(ctx context.Context) (result DeletedAccountListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DeletedAccountsClient.List") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.List(ctx) - return -} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/encryptionscopes.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/encryptionscopes.go deleted file mode 100644 index 3235434fc63e..000000000000 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/encryptionscopes.go +++ /dev/null @@ -1,469 +0,0 @@ -package storage - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/validation" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// EncryptionScopesClient is the the Azure Storage Management API. -type EncryptionScopesClient struct { - BaseClient -} - -// NewEncryptionScopesClient creates an instance of the EncryptionScopesClient client. -func NewEncryptionScopesClient(subscriptionID string) EncryptionScopesClient { - return NewEncryptionScopesClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewEncryptionScopesClientWithBaseURI creates an instance of the EncryptionScopesClient client using a custom -// endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure -// stack). -func NewEncryptionScopesClientWithBaseURI(baseURI string, subscriptionID string) EncryptionScopesClient { - return EncryptionScopesClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// Get returns the properties for the specified encryption scope. -// Parameters: -// resourceGroupName - the name of the resource group within the user's subscription. The name is case -// insensitive. -// accountName - the name of the storage account within the specified resource group. Storage account names -// must be between 3 and 24 characters in length and use numbers and lower-case letters only. -// encryptionScopeName - the name of the encryption scope within the specified storage account. Encryption -// scope names must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) -// only. Every dash (-) character must be immediately preceded and followed by a letter or number. -func (client EncryptionScopesClient) Get(ctx context.Context, resourceGroupName string, accountName string, encryptionScopeName string) (result EncryptionScope, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/EncryptionScopesClient.Get") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, - {TargetValue: accountName, - Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: encryptionScopeName, - Constraints: []validation.Constraint{{Target: "encryptionScopeName", Name: validation.MaxLength, Rule: 63, Chain: nil}, - {Target: "encryptionScopeName", Name: validation.MinLength, Rule: 3, Chain: nil}}}}); err != nil { - return result, validation.NewError("storage.EncryptionScopesClient", "Get", err.Error()) - } - - req, err := client.GetPreparer(ctx, resourceGroupName, accountName, encryptionScopeName) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.EncryptionScopesClient", "Get", nil, "Failure preparing request") - return - } - - resp, err := client.GetSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "storage.EncryptionScopesClient", "Get", resp, "Failure sending request") - return - } - - result, err = client.GetResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.EncryptionScopesClient", "Get", resp, "Failure responding to request") - return - } - - return -} - -// GetPreparer prepares the Get request. -func (client EncryptionScopesClient) GetPreparer(ctx context.Context, resourceGroupName string, accountName string, encryptionScopeName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "encryptionScopeName": autorest.Encode("path", encryptionScopeName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2021-02-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/encryptionScopes/{encryptionScopeName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetSender sends the Get request. The method will close the -// http.Response Body if it receives an error. -func (client EncryptionScopesClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// GetResponder handles the response to the Get request. The method always -// closes the http.Response Body. -func (client EncryptionScopesClient) GetResponder(resp *http.Response) (result EncryptionScope, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// List lists all the encryption scopes available under the specified storage account. -// Parameters: -// resourceGroupName - the name of the resource group within the user's subscription. The name is case -// insensitive. -// accountName - the name of the storage account within the specified resource group. Storage account names -// must be between 3 and 24 characters in length and use numbers and lower-case letters only. -func (client EncryptionScopesClient) List(ctx context.Context, resourceGroupName string, accountName string) (result EncryptionScopeListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/EncryptionScopesClient.List") - defer func() { - sc := -1 - if result.eslr.Response.Response != nil { - sc = result.eslr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, - {TargetValue: accountName, - Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("storage.EncryptionScopesClient", "List", err.Error()) - } - - result.fn = client.listNextResults - req, err := client.ListPreparer(ctx, resourceGroupName, accountName) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.EncryptionScopesClient", "List", nil, "Failure preparing request") - return - } - - resp, err := client.ListSender(req) - if err != nil { - result.eslr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "storage.EncryptionScopesClient", "List", resp, "Failure sending request") - return - } - - result.eslr, err = client.ListResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.EncryptionScopesClient", "List", resp, "Failure responding to request") - return - } - if result.eslr.hasNextLink() && result.eslr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// ListPreparer prepares the List request. -func (client EncryptionScopesClient) ListPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2021-02-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/encryptionScopes", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListSender sends the List request. The method will close the -// http.Response Body if it receives an error. -func (client EncryptionScopesClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListResponder handles the response to the List request. The method always -// closes the http.Response Body. -func (client EncryptionScopesClient) ListResponder(resp *http.Response) (result EncryptionScopeListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// listNextResults retrieves the next set of results, if any. -func (client EncryptionScopesClient) listNextResults(ctx context.Context, lastResults EncryptionScopeListResult) (result EncryptionScopeListResult, err error) { - req, err := lastResults.encryptionScopeListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "storage.EncryptionScopesClient", "listNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "storage.EncryptionScopesClient", "listNextResults", resp, "Failure sending next results request") - } - result, err = client.ListResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.EncryptionScopesClient", "listNextResults", resp, "Failure responding to next results request") - } - return -} - -// ListComplete enumerates all values, automatically crossing page boundaries as required. -func (client EncryptionScopesClient) ListComplete(ctx context.Context, resourceGroupName string, accountName string) (result EncryptionScopeListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/EncryptionScopesClient.List") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.List(ctx, resourceGroupName, accountName) - return -} - -// Patch update encryption scope properties as specified in the request body. Update fails if the specified encryption -// scope does not already exist. -// Parameters: -// resourceGroupName - the name of the resource group within the user's subscription. The name is case -// insensitive. -// accountName - the name of the storage account within the specified resource group. Storage account names -// must be between 3 and 24 characters in length and use numbers and lower-case letters only. -// encryptionScopeName - the name of the encryption scope within the specified storage account. Encryption -// scope names must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) -// only. Every dash (-) character must be immediately preceded and followed by a letter or number. -// encryptionScope - encryption scope properties to be used for the update. -func (client EncryptionScopesClient) Patch(ctx context.Context, resourceGroupName string, accountName string, encryptionScopeName string, encryptionScope EncryptionScope) (result EncryptionScope, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/EncryptionScopesClient.Patch") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, - {TargetValue: accountName, - Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: encryptionScopeName, - Constraints: []validation.Constraint{{Target: "encryptionScopeName", Name: validation.MaxLength, Rule: 63, Chain: nil}, - {Target: "encryptionScopeName", Name: validation.MinLength, Rule: 3, Chain: nil}}}}); err != nil { - return result, validation.NewError("storage.EncryptionScopesClient", "Patch", err.Error()) - } - - req, err := client.PatchPreparer(ctx, resourceGroupName, accountName, encryptionScopeName, encryptionScope) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.EncryptionScopesClient", "Patch", nil, "Failure preparing request") - return - } - - resp, err := client.PatchSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "storage.EncryptionScopesClient", "Patch", resp, "Failure sending request") - return - } - - result, err = client.PatchResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.EncryptionScopesClient", "Patch", resp, "Failure responding to request") - return - } - - return -} - -// PatchPreparer prepares the Patch request. -func (client EncryptionScopesClient) PatchPreparer(ctx context.Context, resourceGroupName string, accountName string, encryptionScopeName string, encryptionScope EncryptionScope) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "encryptionScopeName": autorest.Encode("path", encryptionScopeName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2021-02-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPatch(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/encryptionScopes/{encryptionScopeName}", pathParameters), - autorest.WithJSON(encryptionScope), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// PatchSender sends the Patch request. The method will close the -// http.Response Body if it receives an error. -func (client EncryptionScopesClient) PatchSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// PatchResponder handles the response to the Patch request. The method always -// closes the http.Response Body. -func (client EncryptionScopesClient) PatchResponder(resp *http.Response) (result EncryptionScope, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Put synchronously creates or updates an encryption scope under the specified storage account. If an encryption scope -// is already created and a subsequent request is issued with different properties, the encryption scope properties -// will be updated per the specified request. -// Parameters: -// resourceGroupName - the name of the resource group within the user's subscription. The name is case -// insensitive. -// accountName - the name of the storage account within the specified resource group. Storage account names -// must be between 3 and 24 characters in length and use numbers and lower-case letters only. -// encryptionScopeName - the name of the encryption scope within the specified storage account. Encryption -// scope names must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) -// only. Every dash (-) character must be immediately preceded and followed by a letter or number. -// encryptionScope - encryption scope properties to be used for the create or update. -func (client EncryptionScopesClient) Put(ctx context.Context, resourceGroupName string, accountName string, encryptionScopeName string, encryptionScope EncryptionScope) (result EncryptionScope, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/EncryptionScopesClient.Put") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, - {TargetValue: accountName, - Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: encryptionScopeName, - Constraints: []validation.Constraint{{Target: "encryptionScopeName", Name: validation.MaxLength, Rule: 63, Chain: nil}, - {Target: "encryptionScopeName", Name: validation.MinLength, Rule: 3, Chain: nil}}}}); err != nil { - return result, validation.NewError("storage.EncryptionScopesClient", "Put", err.Error()) - } - - req, err := client.PutPreparer(ctx, resourceGroupName, accountName, encryptionScopeName, encryptionScope) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.EncryptionScopesClient", "Put", nil, "Failure preparing request") - return - } - - resp, err := client.PutSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "storage.EncryptionScopesClient", "Put", resp, "Failure sending request") - return - } - - result, err = client.PutResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.EncryptionScopesClient", "Put", resp, "Failure responding to request") - return - } - - return -} - -// PutPreparer prepares the Put request. -func (client EncryptionScopesClient) PutPreparer(ctx context.Context, resourceGroupName string, accountName string, encryptionScopeName string, encryptionScope EncryptionScope) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "encryptionScopeName": autorest.Encode("path", encryptionScopeName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2021-02-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPut(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/encryptionScopes/{encryptionScopeName}", pathParameters), - autorest.WithJSON(encryptionScope), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// PutSender sends the Put request. The method will close the -// http.Response Body if it receives an error. -func (client EncryptionScopesClient) PutSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// PutResponder handles the response to the Put request. The method always -// closes the http.Response Body. -func (client EncryptionScopesClient) PutResponder(resp *http.Response) (result EncryptionScope, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/enums.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/enums.go deleted file mode 100644 index 65368dba60af..000000000000 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/enums.go +++ /dev/null @@ -1,863 +0,0 @@ -package storage - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -// AccessTier enumerates the values for access tier. -type AccessTier string - -const ( - // AccessTierCool ... - AccessTierCool AccessTier = "Cool" - // AccessTierHot ... - AccessTierHot AccessTier = "Hot" -) - -// PossibleAccessTierValues returns an array of possible values for the AccessTier const type. -func PossibleAccessTierValues() []AccessTier { - return []AccessTier{AccessTierCool, AccessTierHot} -} - -// AccountExpand enumerates the values for account expand. -type AccountExpand string - -const ( - // AccountExpandBlobRestoreStatus ... - AccountExpandBlobRestoreStatus AccountExpand = "blobRestoreStatus" - // AccountExpandGeoReplicationStats ... - AccountExpandGeoReplicationStats AccountExpand = "geoReplicationStats" -) - -// PossibleAccountExpandValues returns an array of possible values for the AccountExpand const type. -func PossibleAccountExpandValues() []AccountExpand { - return []AccountExpand{AccountExpandBlobRestoreStatus, AccountExpandGeoReplicationStats} -} - -// AccountStatus enumerates the values for account status. -type AccountStatus string - -const ( - // AccountStatusAvailable ... - AccountStatusAvailable AccountStatus = "available" - // AccountStatusUnavailable ... - AccountStatusUnavailable AccountStatus = "unavailable" -) - -// PossibleAccountStatusValues returns an array of possible values for the AccountStatus const type. -func PossibleAccountStatusValues() []AccountStatus { - return []AccountStatus{AccountStatusAvailable, AccountStatusUnavailable} -} - -// Action enumerates the values for action. -type Action string - -const ( - // ActionAllow ... - ActionAllow Action = "Allow" -) - -// PossibleActionValues returns an array of possible values for the Action const type. -func PossibleActionValues() []Action { - return []Action{ActionAllow} -} - -// Action1 enumerates the values for action 1. -type Action1 string - -const ( - // Action1Acquire ... - Action1Acquire Action1 = "Acquire" - // Action1Break ... - Action1Break Action1 = "Break" - // Action1Change ... - Action1Change Action1 = "Change" - // Action1Release ... - Action1Release Action1 = "Release" - // Action1Renew ... - Action1Renew Action1 = "Renew" -) - -// PossibleAction1Values returns an array of possible values for the Action1 const type. -func PossibleAction1Values() []Action1 { - return []Action1{Action1Acquire, Action1Break, Action1Change, Action1Release, Action1Renew} -} - -// BlobRestoreProgressStatus enumerates the values for blob restore progress status. -type BlobRestoreProgressStatus string - -const ( - // BlobRestoreProgressStatusComplete ... - BlobRestoreProgressStatusComplete BlobRestoreProgressStatus = "Complete" - // BlobRestoreProgressStatusFailed ... - BlobRestoreProgressStatusFailed BlobRestoreProgressStatus = "Failed" - // BlobRestoreProgressStatusInProgress ... - BlobRestoreProgressStatusInProgress BlobRestoreProgressStatus = "InProgress" -) - -// PossibleBlobRestoreProgressStatusValues returns an array of possible values for the BlobRestoreProgressStatus const type. -func PossibleBlobRestoreProgressStatusValues() []BlobRestoreProgressStatus { - return []BlobRestoreProgressStatus{BlobRestoreProgressStatusComplete, BlobRestoreProgressStatusFailed, BlobRestoreProgressStatusInProgress} -} - -// Bypass enumerates the values for bypass. -type Bypass string - -const ( - // BypassAzureServices ... - BypassAzureServices Bypass = "AzureServices" - // BypassLogging ... - BypassLogging Bypass = "Logging" - // BypassMetrics ... - BypassMetrics Bypass = "Metrics" - // BypassNone ... - BypassNone Bypass = "None" -) - -// PossibleBypassValues returns an array of possible values for the Bypass const type. -func PossibleBypassValues() []Bypass { - return []Bypass{BypassAzureServices, BypassLogging, BypassMetrics, BypassNone} -} - -// CreatedByType enumerates the values for created by type. -type CreatedByType string - -const ( - // CreatedByTypeApplication ... - CreatedByTypeApplication CreatedByType = "Application" - // CreatedByTypeKey ... - CreatedByTypeKey CreatedByType = "Key" - // CreatedByTypeManagedIdentity ... - CreatedByTypeManagedIdentity CreatedByType = "ManagedIdentity" - // CreatedByTypeUser ... - CreatedByTypeUser CreatedByType = "User" -) - -// PossibleCreatedByTypeValues returns an array of possible values for the CreatedByType const type. -func PossibleCreatedByTypeValues() []CreatedByType { - return []CreatedByType{CreatedByTypeApplication, CreatedByTypeKey, CreatedByTypeManagedIdentity, CreatedByTypeUser} -} - -// DefaultAction enumerates the values for default action. -type DefaultAction string - -const ( - // DefaultActionAllow ... - DefaultActionAllow DefaultAction = "Allow" - // DefaultActionDeny ... - DefaultActionDeny DefaultAction = "Deny" -) - -// PossibleDefaultActionValues returns an array of possible values for the DefaultAction const type. -func PossibleDefaultActionValues() []DefaultAction { - return []DefaultAction{DefaultActionAllow, DefaultActionDeny} -} - -// DirectoryServiceOptions enumerates the values for directory service options. -type DirectoryServiceOptions string - -const ( - // DirectoryServiceOptionsAADDS ... - DirectoryServiceOptionsAADDS DirectoryServiceOptions = "AADDS" - // DirectoryServiceOptionsAD ... - DirectoryServiceOptionsAD DirectoryServiceOptions = "AD" - // DirectoryServiceOptionsNone ... - DirectoryServiceOptionsNone DirectoryServiceOptions = "None" -) - -// PossibleDirectoryServiceOptionsValues returns an array of possible values for the DirectoryServiceOptions const type. -func PossibleDirectoryServiceOptionsValues() []DirectoryServiceOptions { - return []DirectoryServiceOptions{DirectoryServiceOptionsAADDS, DirectoryServiceOptionsAD, DirectoryServiceOptionsNone} -} - -// EnabledProtocols enumerates the values for enabled protocols. -type EnabledProtocols string - -const ( - // EnabledProtocolsNFS ... - EnabledProtocolsNFS EnabledProtocols = "NFS" - // EnabledProtocolsSMB ... - EnabledProtocolsSMB EnabledProtocols = "SMB" -) - -// PossibleEnabledProtocolsValues returns an array of possible values for the EnabledProtocols const type. -func PossibleEnabledProtocolsValues() []EnabledProtocols { - return []EnabledProtocols{EnabledProtocolsNFS, EnabledProtocolsSMB} -} - -// EncryptionScopeSource enumerates the values for encryption scope source. -type EncryptionScopeSource string - -const ( - // EncryptionScopeSourceMicrosoftKeyVault ... - EncryptionScopeSourceMicrosoftKeyVault EncryptionScopeSource = "Microsoft.KeyVault" - // EncryptionScopeSourceMicrosoftStorage ... - EncryptionScopeSourceMicrosoftStorage EncryptionScopeSource = "Microsoft.Storage" -) - -// PossibleEncryptionScopeSourceValues returns an array of possible values for the EncryptionScopeSource const type. -func PossibleEncryptionScopeSourceValues() []EncryptionScopeSource { - return []EncryptionScopeSource{EncryptionScopeSourceMicrosoftKeyVault, EncryptionScopeSourceMicrosoftStorage} -} - -// EncryptionScopeState enumerates the values for encryption scope state. -type EncryptionScopeState string - -const ( - // EncryptionScopeStateDisabled ... - EncryptionScopeStateDisabled EncryptionScopeState = "Disabled" - // EncryptionScopeStateEnabled ... - EncryptionScopeStateEnabled EncryptionScopeState = "Enabled" -) - -// PossibleEncryptionScopeStateValues returns an array of possible values for the EncryptionScopeState const type. -func PossibleEncryptionScopeStateValues() []EncryptionScopeState { - return []EncryptionScopeState{EncryptionScopeStateDisabled, EncryptionScopeStateEnabled} -} - -// ExtendedLocationTypes enumerates the values for extended location types. -type ExtendedLocationTypes string - -const ( - // ExtendedLocationTypesEdgeZone ... - ExtendedLocationTypesEdgeZone ExtendedLocationTypes = "EdgeZone" -) - -// PossibleExtendedLocationTypesValues returns an array of possible values for the ExtendedLocationTypes const type. -func PossibleExtendedLocationTypesValues() []ExtendedLocationTypes { - return []ExtendedLocationTypes{ExtendedLocationTypesEdgeZone} -} - -// GeoReplicationStatus enumerates the values for geo replication status. -type GeoReplicationStatus string - -const ( - // GeoReplicationStatusBootstrap ... - GeoReplicationStatusBootstrap GeoReplicationStatus = "Bootstrap" - // GeoReplicationStatusLive ... - GeoReplicationStatusLive GeoReplicationStatus = "Live" - // GeoReplicationStatusUnavailable ... - GeoReplicationStatusUnavailable GeoReplicationStatus = "Unavailable" -) - -// PossibleGeoReplicationStatusValues returns an array of possible values for the GeoReplicationStatus const type. -func PossibleGeoReplicationStatusValues() []GeoReplicationStatus { - return []GeoReplicationStatus{GeoReplicationStatusBootstrap, GeoReplicationStatusLive, GeoReplicationStatusUnavailable} -} - -// GetShareExpand enumerates the values for get share expand. -type GetShareExpand string - -const ( - // GetShareExpandStats ... - GetShareExpandStats GetShareExpand = "stats" -) - -// PossibleGetShareExpandValues returns an array of possible values for the GetShareExpand const type. -func PossibleGetShareExpandValues() []GetShareExpand { - return []GetShareExpand{GetShareExpandStats} -} - -// HTTPProtocol enumerates the values for http protocol. -type HTTPProtocol string - -const ( - // HTTPProtocolHTTPS ... - HTTPProtocolHTTPS HTTPProtocol = "https" - // HTTPProtocolHttpshttp ... - HTTPProtocolHttpshttp HTTPProtocol = "https,http" -) - -// PossibleHTTPProtocolValues returns an array of possible values for the HTTPProtocol const type. -func PossibleHTTPProtocolValues() []HTTPProtocol { - return []HTTPProtocol{HTTPProtocolHTTPS, HTTPProtocolHttpshttp} -} - -// IdentityType enumerates the values for identity type. -type IdentityType string - -const ( - // IdentityTypeNone ... - IdentityTypeNone IdentityType = "None" - // IdentityTypeSystemAssigned ... - IdentityTypeSystemAssigned IdentityType = "SystemAssigned" - // IdentityTypeSystemAssignedUserAssigned ... - IdentityTypeSystemAssignedUserAssigned IdentityType = "SystemAssigned,UserAssigned" - // IdentityTypeUserAssigned ... - IdentityTypeUserAssigned IdentityType = "UserAssigned" -) - -// PossibleIdentityTypeValues returns an array of possible values for the IdentityType const type. -func PossibleIdentityTypeValues() []IdentityType { - return []IdentityType{IdentityTypeNone, IdentityTypeSystemAssigned, IdentityTypeSystemAssignedUserAssigned, IdentityTypeUserAssigned} -} - -// ImmutabilityPolicyState enumerates the values for immutability policy state. -type ImmutabilityPolicyState string - -const ( - // ImmutabilityPolicyStateLocked ... - ImmutabilityPolicyStateLocked ImmutabilityPolicyState = "Locked" - // ImmutabilityPolicyStateUnlocked ... - ImmutabilityPolicyStateUnlocked ImmutabilityPolicyState = "Unlocked" -) - -// PossibleImmutabilityPolicyStateValues returns an array of possible values for the ImmutabilityPolicyState const type. -func PossibleImmutabilityPolicyStateValues() []ImmutabilityPolicyState { - return []ImmutabilityPolicyState{ImmutabilityPolicyStateLocked, ImmutabilityPolicyStateUnlocked} -} - -// ImmutabilityPolicyUpdateType enumerates the values for immutability policy update type. -type ImmutabilityPolicyUpdateType string - -const ( - // ImmutabilityPolicyUpdateTypeExtend ... - ImmutabilityPolicyUpdateTypeExtend ImmutabilityPolicyUpdateType = "extend" - // ImmutabilityPolicyUpdateTypeLock ... - ImmutabilityPolicyUpdateTypeLock ImmutabilityPolicyUpdateType = "lock" - // ImmutabilityPolicyUpdateTypePut ... - ImmutabilityPolicyUpdateTypePut ImmutabilityPolicyUpdateType = "put" -) - -// PossibleImmutabilityPolicyUpdateTypeValues returns an array of possible values for the ImmutabilityPolicyUpdateType const type. -func PossibleImmutabilityPolicyUpdateTypeValues() []ImmutabilityPolicyUpdateType { - return []ImmutabilityPolicyUpdateType{ImmutabilityPolicyUpdateTypeExtend, ImmutabilityPolicyUpdateTypeLock, ImmutabilityPolicyUpdateTypePut} -} - -// KeyPermission enumerates the values for key permission. -type KeyPermission string - -const ( - // KeyPermissionFull ... - KeyPermissionFull KeyPermission = "Full" - // KeyPermissionRead ... - KeyPermissionRead KeyPermission = "Read" -) - -// PossibleKeyPermissionValues returns an array of possible values for the KeyPermission const type. -func PossibleKeyPermissionValues() []KeyPermission { - return []KeyPermission{KeyPermissionFull, KeyPermissionRead} -} - -// KeySource enumerates the values for key source. -type KeySource string - -const ( - // KeySourceMicrosoftKeyvault ... - KeySourceMicrosoftKeyvault KeySource = "Microsoft.Keyvault" - // KeySourceMicrosoftStorage ... - KeySourceMicrosoftStorage KeySource = "Microsoft.Storage" -) - -// PossibleKeySourceValues returns an array of possible values for the KeySource const type. -func PossibleKeySourceValues() []KeySource { - return []KeySource{KeySourceMicrosoftKeyvault, KeySourceMicrosoftStorage} -} - -// KeyType enumerates the values for key type. -type KeyType string - -const ( - // KeyTypeAccount ... - KeyTypeAccount KeyType = "Account" - // KeyTypeService ... - KeyTypeService KeyType = "Service" -) - -// PossibleKeyTypeValues returns an array of possible values for the KeyType const type. -func PossibleKeyTypeValues() []KeyType { - return []KeyType{KeyTypeAccount, KeyTypeService} -} - -// Kind enumerates the values for kind. -type Kind string - -const ( - // KindBlobStorage ... - KindBlobStorage Kind = "BlobStorage" - // KindBlockBlobStorage ... - KindBlockBlobStorage Kind = "BlockBlobStorage" - // KindFileStorage ... - KindFileStorage Kind = "FileStorage" - // KindStorage ... - KindStorage Kind = "Storage" - // KindStorageV2 ... - KindStorageV2 Kind = "StorageV2" -) - -// PossibleKindValues returns an array of possible values for the Kind const type. -func PossibleKindValues() []Kind { - return []Kind{KindBlobStorage, KindBlockBlobStorage, KindFileStorage, KindStorage, KindStorageV2} -} - -// LargeFileSharesState enumerates the values for large file shares state. -type LargeFileSharesState string - -const ( - // LargeFileSharesStateDisabled ... - LargeFileSharesStateDisabled LargeFileSharesState = "Disabled" - // LargeFileSharesStateEnabled ... - LargeFileSharesStateEnabled LargeFileSharesState = "Enabled" -) - -// PossibleLargeFileSharesStateValues returns an array of possible values for the LargeFileSharesState const type. -func PossibleLargeFileSharesStateValues() []LargeFileSharesState { - return []LargeFileSharesState{LargeFileSharesStateDisabled, LargeFileSharesStateEnabled} -} - -// LeaseDuration enumerates the values for lease duration. -type LeaseDuration string - -const ( - // LeaseDurationFixed ... - LeaseDurationFixed LeaseDuration = "Fixed" - // LeaseDurationInfinite ... - LeaseDurationInfinite LeaseDuration = "Infinite" -) - -// PossibleLeaseDurationValues returns an array of possible values for the LeaseDuration const type. -func PossibleLeaseDurationValues() []LeaseDuration { - return []LeaseDuration{LeaseDurationFixed, LeaseDurationInfinite} -} - -// LeaseState enumerates the values for lease state. -type LeaseState string - -const ( - // LeaseStateAvailable ... - LeaseStateAvailable LeaseState = "Available" - // LeaseStateBreaking ... - LeaseStateBreaking LeaseState = "Breaking" - // LeaseStateBroken ... - LeaseStateBroken LeaseState = "Broken" - // LeaseStateExpired ... - LeaseStateExpired LeaseState = "Expired" - // LeaseStateLeased ... - LeaseStateLeased LeaseState = "Leased" -) - -// PossibleLeaseStateValues returns an array of possible values for the LeaseState const type. -func PossibleLeaseStateValues() []LeaseState { - return []LeaseState{LeaseStateAvailable, LeaseStateBreaking, LeaseStateBroken, LeaseStateExpired, LeaseStateLeased} -} - -// LeaseStatus enumerates the values for lease status. -type LeaseStatus string - -const ( - // LeaseStatusLocked ... - LeaseStatusLocked LeaseStatus = "Locked" - // LeaseStatusUnlocked ... - LeaseStatusUnlocked LeaseStatus = "Unlocked" -) - -// PossibleLeaseStatusValues returns an array of possible values for the LeaseStatus const type. -func PossibleLeaseStatusValues() []LeaseStatus { - return []LeaseStatus{LeaseStatusLocked, LeaseStatusUnlocked} -} - -// ListContainersInclude enumerates the values for list containers include. -type ListContainersInclude string - -const ( - // ListContainersIncludeDeleted ... - ListContainersIncludeDeleted ListContainersInclude = "deleted" -) - -// PossibleListContainersIncludeValues returns an array of possible values for the ListContainersInclude const type. -func PossibleListContainersIncludeValues() []ListContainersInclude { - return []ListContainersInclude{ListContainersIncludeDeleted} -} - -// ListKeyExpand enumerates the values for list key expand. -type ListKeyExpand string - -const ( - // ListKeyExpandKerb ... - ListKeyExpandKerb ListKeyExpand = "kerb" -) - -// PossibleListKeyExpandValues returns an array of possible values for the ListKeyExpand const type. -func PossibleListKeyExpandValues() []ListKeyExpand { - return []ListKeyExpand{ListKeyExpandKerb} -} - -// ListSharesExpand enumerates the values for list shares expand. -type ListSharesExpand string - -const ( - // ListSharesExpandDeleted ... - ListSharesExpandDeleted ListSharesExpand = "deleted" - // ListSharesExpandSnapshots ... - ListSharesExpandSnapshots ListSharesExpand = "snapshots" -) - -// PossibleListSharesExpandValues returns an array of possible values for the ListSharesExpand const type. -func PossibleListSharesExpandValues() []ListSharesExpand { - return []ListSharesExpand{ListSharesExpandDeleted, ListSharesExpandSnapshots} -} - -// MinimumTLSVersion enumerates the values for minimum tls version. -type MinimumTLSVersion string - -const ( - // MinimumTLSVersionTLS10 ... - MinimumTLSVersionTLS10 MinimumTLSVersion = "TLS1_0" - // MinimumTLSVersionTLS11 ... - MinimumTLSVersionTLS11 MinimumTLSVersion = "TLS1_1" - // MinimumTLSVersionTLS12 ... - MinimumTLSVersionTLS12 MinimumTLSVersion = "TLS1_2" -) - -// PossibleMinimumTLSVersionValues returns an array of possible values for the MinimumTLSVersion const type. -func PossibleMinimumTLSVersionValues() []MinimumTLSVersion { - return []MinimumTLSVersion{MinimumTLSVersionTLS10, MinimumTLSVersionTLS11, MinimumTLSVersionTLS12} -} - -// Name enumerates the values for name. -type Name string - -const ( - // NameAccessTimeTracking ... - NameAccessTimeTracking Name = "AccessTimeTracking" -) - -// PossibleNameValues returns an array of possible values for the Name const type. -func PossibleNameValues() []Name { - return []Name{NameAccessTimeTracking} -} - -// Permissions enumerates the values for permissions. -type Permissions string - -const ( - // PermissionsA ... - PermissionsA Permissions = "a" - // PermissionsC ... - PermissionsC Permissions = "c" - // PermissionsD ... - PermissionsD Permissions = "d" - // PermissionsL ... - PermissionsL Permissions = "l" - // PermissionsP ... - PermissionsP Permissions = "p" - // PermissionsR ... - PermissionsR Permissions = "r" - // PermissionsU ... - PermissionsU Permissions = "u" - // PermissionsW ... - PermissionsW Permissions = "w" -) - -// PossiblePermissionsValues returns an array of possible values for the Permissions const type. -func PossiblePermissionsValues() []Permissions { - return []Permissions{PermissionsA, PermissionsC, PermissionsD, PermissionsL, PermissionsP, PermissionsR, PermissionsU, PermissionsW} -} - -// PrivateEndpointConnectionProvisioningState enumerates the values for private endpoint connection -// provisioning state. -type PrivateEndpointConnectionProvisioningState string - -const ( - // PrivateEndpointConnectionProvisioningStateCreating ... - PrivateEndpointConnectionProvisioningStateCreating PrivateEndpointConnectionProvisioningState = "Creating" - // PrivateEndpointConnectionProvisioningStateDeleting ... - PrivateEndpointConnectionProvisioningStateDeleting PrivateEndpointConnectionProvisioningState = "Deleting" - // PrivateEndpointConnectionProvisioningStateFailed ... - PrivateEndpointConnectionProvisioningStateFailed PrivateEndpointConnectionProvisioningState = "Failed" - // PrivateEndpointConnectionProvisioningStateSucceeded ... - PrivateEndpointConnectionProvisioningStateSucceeded PrivateEndpointConnectionProvisioningState = "Succeeded" -) - -// PossiblePrivateEndpointConnectionProvisioningStateValues returns an array of possible values for the PrivateEndpointConnectionProvisioningState const type. -func PossiblePrivateEndpointConnectionProvisioningStateValues() []PrivateEndpointConnectionProvisioningState { - return []PrivateEndpointConnectionProvisioningState{PrivateEndpointConnectionProvisioningStateCreating, PrivateEndpointConnectionProvisioningStateDeleting, PrivateEndpointConnectionProvisioningStateFailed, PrivateEndpointConnectionProvisioningStateSucceeded} -} - -// PrivateEndpointServiceConnectionStatus enumerates the values for private endpoint service connection status. -type PrivateEndpointServiceConnectionStatus string - -const ( - // PrivateEndpointServiceConnectionStatusApproved ... - PrivateEndpointServiceConnectionStatusApproved PrivateEndpointServiceConnectionStatus = "Approved" - // PrivateEndpointServiceConnectionStatusPending ... - PrivateEndpointServiceConnectionStatusPending PrivateEndpointServiceConnectionStatus = "Pending" - // PrivateEndpointServiceConnectionStatusRejected ... - PrivateEndpointServiceConnectionStatusRejected PrivateEndpointServiceConnectionStatus = "Rejected" -) - -// PossiblePrivateEndpointServiceConnectionStatusValues returns an array of possible values for the PrivateEndpointServiceConnectionStatus const type. -func PossiblePrivateEndpointServiceConnectionStatusValues() []PrivateEndpointServiceConnectionStatus { - return []PrivateEndpointServiceConnectionStatus{PrivateEndpointServiceConnectionStatusApproved, PrivateEndpointServiceConnectionStatusPending, PrivateEndpointServiceConnectionStatusRejected} -} - -// ProvisioningState enumerates the values for provisioning state. -type ProvisioningState string - -const ( - // ProvisioningStateCreating ... - ProvisioningStateCreating ProvisioningState = "Creating" - // ProvisioningStateResolvingDNS ... - ProvisioningStateResolvingDNS ProvisioningState = "ResolvingDNS" - // ProvisioningStateSucceeded ... - ProvisioningStateSucceeded ProvisioningState = "Succeeded" -) - -// PossibleProvisioningStateValues returns an array of possible values for the ProvisioningState const type. -func PossibleProvisioningStateValues() []ProvisioningState { - return []ProvisioningState{ProvisioningStateCreating, ProvisioningStateResolvingDNS, ProvisioningStateSucceeded} -} - -// PublicAccess enumerates the values for public access. -type PublicAccess string - -const ( - // PublicAccessBlob ... - PublicAccessBlob PublicAccess = "Blob" - // PublicAccessContainer ... - PublicAccessContainer PublicAccess = "Container" - // PublicAccessNone ... - PublicAccessNone PublicAccess = "None" -) - -// PossiblePublicAccessValues returns an array of possible values for the PublicAccess const type. -func PossiblePublicAccessValues() []PublicAccess { - return []PublicAccess{PublicAccessBlob, PublicAccessContainer, PublicAccessNone} -} - -// PutSharesExpand enumerates the values for put shares expand. -type PutSharesExpand string - -const ( - // PutSharesExpandSnapshots ... - PutSharesExpandSnapshots PutSharesExpand = "snapshots" -) - -// PossiblePutSharesExpandValues returns an array of possible values for the PutSharesExpand const type. -func PossiblePutSharesExpandValues() []PutSharesExpand { - return []PutSharesExpand{PutSharesExpandSnapshots} -} - -// Reason enumerates the values for reason. -type Reason string - -const ( - // ReasonAccountNameInvalid ... - ReasonAccountNameInvalid Reason = "AccountNameInvalid" - // ReasonAlreadyExists ... - ReasonAlreadyExists Reason = "AlreadyExists" -) - -// PossibleReasonValues returns an array of possible values for the Reason const type. -func PossibleReasonValues() []Reason { - return []Reason{ReasonAccountNameInvalid, ReasonAlreadyExists} -} - -// ReasonCode enumerates the values for reason code. -type ReasonCode string - -const ( - // ReasonCodeNotAvailableForSubscription ... - ReasonCodeNotAvailableForSubscription ReasonCode = "NotAvailableForSubscription" - // ReasonCodeQuotaID ... - ReasonCodeQuotaID ReasonCode = "QuotaId" -) - -// PossibleReasonCodeValues returns an array of possible values for the ReasonCode const type. -func PossibleReasonCodeValues() []ReasonCode { - return []ReasonCode{ReasonCodeNotAvailableForSubscription, ReasonCodeQuotaID} -} - -// RootSquashType enumerates the values for root squash type. -type RootSquashType string - -const ( - // RootSquashTypeAllSquash ... - RootSquashTypeAllSquash RootSquashType = "AllSquash" - // RootSquashTypeNoRootSquash ... - RootSquashTypeNoRootSquash RootSquashType = "NoRootSquash" - // RootSquashTypeRootSquash ... - RootSquashTypeRootSquash RootSquashType = "RootSquash" -) - -// PossibleRootSquashTypeValues returns an array of possible values for the RootSquashType const type. -func PossibleRootSquashTypeValues() []RootSquashType { - return []RootSquashType{RootSquashTypeAllSquash, RootSquashTypeNoRootSquash, RootSquashTypeRootSquash} -} - -// RoutingChoice enumerates the values for routing choice. -type RoutingChoice string - -const ( - // RoutingChoiceInternetRouting ... - RoutingChoiceInternetRouting RoutingChoice = "InternetRouting" - // RoutingChoiceMicrosoftRouting ... - RoutingChoiceMicrosoftRouting RoutingChoice = "MicrosoftRouting" -) - -// PossibleRoutingChoiceValues returns an array of possible values for the RoutingChoice const type. -func PossibleRoutingChoiceValues() []RoutingChoice { - return []RoutingChoice{RoutingChoiceInternetRouting, RoutingChoiceMicrosoftRouting} -} - -// Services enumerates the values for services. -type Services string - -const ( - // ServicesB ... - ServicesB Services = "b" - // ServicesF ... - ServicesF Services = "f" - // ServicesQ ... - ServicesQ Services = "q" - // ServicesT ... - ServicesT Services = "t" -) - -// PossibleServicesValues returns an array of possible values for the Services const type. -func PossibleServicesValues() []Services { - return []Services{ServicesB, ServicesF, ServicesQ, ServicesT} -} - -// ShareAccessTier enumerates the values for share access tier. -type ShareAccessTier string - -const ( - // ShareAccessTierCool ... - ShareAccessTierCool ShareAccessTier = "Cool" - // ShareAccessTierHot ... - ShareAccessTierHot ShareAccessTier = "Hot" - // ShareAccessTierPremium ... - ShareAccessTierPremium ShareAccessTier = "Premium" - // ShareAccessTierTransactionOptimized ... - ShareAccessTierTransactionOptimized ShareAccessTier = "TransactionOptimized" -) - -// PossibleShareAccessTierValues returns an array of possible values for the ShareAccessTier const type. -func PossibleShareAccessTierValues() []ShareAccessTier { - return []ShareAccessTier{ShareAccessTierCool, ShareAccessTierHot, ShareAccessTierPremium, ShareAccessTierTransactionOptimized} -} - -// SignedResource enumerates the values for signed resource. -type SignedResource string - -const ( - // SignedResourceB ... - SignedResourceB SignedResource = "b" - // SignedResourceC ... - SignedResourceC SignedResource = "c" - // SignedResourceF ... - SignedResourceF SignedResource = "f" - // SignedResourceS ... - SignedResourceS SignedResource = "s" -) - -// PossibleSignedResourceValues returns an array of possible values for the SignedResource const type. -func PossibleSignedResourceValues() []SignedResource { - return []SignedResource{SignedResourceB, SignedResourceC, SignedResourceF, SignedResourceS} -} - -// SignedResourceTypes enumerates the values for signed resource types. -type SignedResourceTypes string - -const ( - // SignedResourceTypesC ... - SignedResourceTypesC SignedResourceTypes = "c" - // SignedResourceTypesO ... - SignedResourceTypesO SignedResourceTypes = "o" - // SignedResourceTypesS ... - SignedResourceTypesS SignedResourceTypes = "s" -) - -// PossibleSignedResourceTypesValues returns an array of possible values for the SignedResourceTypes const type. -func PossibleSignedResourceTypesValues() []SignedResourceTypes { - return []SignedResourceTypes{SignedResourceTypesC, SignedResourceTypesO, SignedResourceTypesS} -} - -// SkuName enumerates the values for sku name. -type SkuName string - -const ( - // SkuNamePremiumLRS ... - SkuNamePremiumLRS SkuName = "Premium_LRS" - // SkuNamePremiumZRS ... - SkuNamePremiumZRS SkuName = "Premium_ZRS" - // SkuNameStandardGRS ... - SkuNameStandardGRS SkuName = "Standard_GRS" - // SkuNameStandardGZRS ... - SkuNameStandardGZRS SkuName = "Standard_GZRS" - // SkuNameStandardLRS ... - SkuNameStandardLRS SkuName = "Standard_LRS" - // SkuNameStandardRAGRS ... - SkuNameStandardRAGRS SkuName = "Standard_RAGRS" - // SkuNameStandardRAGZRS ... - SkuNameStandardRAGZRS SkuName = "Standard_RAGZRS" - // SkuNameStandardZRS ... - SkuNameStandardZRS SkuName = "Standard_ZRS" -) - -// PossibleSkuNameValues returns an array of possible values for the SkuName const type. -func PossibleSkuNameValues() []SkuName { - return []SkuName{SkuNamePremiumLRS, SkuNamePremiumZRS, SkuNameStandardGRS, SkuNameStandardGZRS, SkuNameStandardLRS, SkuNameStandardRAGRS, SkuNameStandardRAGZRS, SkuNameStandardZRS} -} - -// SkuTier enumerates the values for sku tier. -type SkuTier string - -const ( - // SkuTierPremium ... - SkuTierPremium SkuTier = "Premium" - // SkuTierStandard ... - SkuTierStandard SkuTier = "Standard" -) - -// PossibleSkuTierValues returns an array of possible values for the SkuTier const type. -func PossibleSkuTierValues() []SkuTier { - return []SkuTier{SkuTierPremium, SkuTierStandard} -} - -// State enumerates the values for state. -type State string - -const ( - // StateDeprovisioning ... - StateDeprovisioning State = "deprovisioning" - // StateFailed ... - StateFailed State = "failed" - // StateNetworkSourceDeleted ... - StateNetworkSourceDeleted State = "networkSourceDeleted" - // StateProvisioning ... - StateProvisioning State = "provisioning" - // StateSucceeded ... - StateSucceeded State = "succeeded" -) - -// PossibleStateValues returns an array of possible values for the State const type. -func PossibleStateValues() []State { - return []State{StateDeprovisioning, StateFailed, StateNetworkSourceDeleted, StateProvisioning, StateSucceeded} -} - -// UsageUnit enumerates the values for usage unit. -type UsageUnit string - -const ( - // UsageUnitBytes ... - UsageUnitBytes UsageUnit = "Bytes" - // UsageUnitBytesPerSecond ... - UsageUnitBytesPerSecond UsageUnit = "BytesPerSecond" - // UsageUnitCount ... - UsageUnitCount UsageUnit = "Count" - // UsageUnitCountsPerSecond ... - UsageUnitCountsPerSecond UsageUnit = "CountsPerSecond" - // UsageUnitPercent ... - UsageUnitPercent UsageUnit = "Percent" - // UsageUnitSeconds ... - UsageUnitSeconds UsageUnit = "Seconds" -) - -// PossibleUsageUnitValues returns an array of possible values for the UsageUnit const type. -func PossibleUsageUnitValues() []UsageUnit { - return []UsageUnit{UsageUnitBytes, UsageUnitBytesPerSecond, UsageUnitCount, UsageUnitCountsPerSecond, UsageUnitPercent, UsageUnitSeconds} -} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/fileservices.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/fileservices.go deleted file mode 100644 index 3c6351539a2c..000000000000 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/fileservices.go +++ /dev/null @@ -1,323 +0,0 @@ -package storage - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/validation" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// FileServicesClient is the the Azure Storage Management API. -type FileServicesClient struct { - BaseClient -} - -// NewFileServicesClient creates an instance of the FileServicesClient client. -func NewFileServicesClient(subscriptionID string) FileServicesClient { - return NewFileServicesClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewFileServicesClientWithBaseURI creates an instance of the FileServicesClient client using a custom endpoint. Use -// this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). -func NewFileServicesClientWithBaseURI(baseURI string, subscriptionID string) FileServicesClient { - return FileServicesClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// GetServiceProperties gets the properties of file services in storage accounts, including CORS (Cross-Origin Resource -// Sharing) rules. -// Parameters: -// resourceGroupName - the name of the resource group within the user's subscription. The name is case -// insensitive. -// accountName - the name of the storage account within the specified resource group. Storage account names -// must be between 3 and 24 characters in length and use numbers and lower-case letters only. -func (client FileServicesClient) GetServiceProperties(ctx context.Context, resourceGroupName string, accountName string) (result FileServiceProperties, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/FileServicesClient.GetServiceProperties") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, - {TargetValue: accountName, - Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("storage.FileServicesClient", "GetServiceProperties", err.Error()) - } - - req, err := client.GetServicePropertiesPreparer(ctx, resourceGroupName, accountName) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.FileServicesClient", "GetServiceProperties", nil, "Failure preparing request") - return - } - - resp, err := client.GetServicePropertiesSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "storage.FileServicesClient", "GetServiceProperties", resp, "Failure sending request") - return - } - - result, err = client.GetServicePropertiesResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.FileServicesClient", "GetServiceProperties", resp, "Failure responding to request") - return - } - - return -} - -// GetServicePropertiesPreparer prepares the GetServiceProperties request. -func (client FileServicesClient) GetServicePropertiesPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "FileServicesName": autorest.Encode("path", "default"), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2021-02-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/{FileServicesName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetServicePropertiesSender sends the GetServiceProperties request. The method will close the -// http.Response Body if it receives an error. -func (client FileServicesClient) GetServicePropertiesSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// GetServicePropertiesResponder handles the response to the GetServiceProperties request. The method always -// closes the http.Response Body. -func (client FileServicesClient) GetServicePropertiesResponder(resp *http.Response) (result FileServiceProperties, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// List list all file services in storage accounts -// Parameters: -// resourceGroupName - the name of the resource group within the user's subscription. The name is case -// insensitive. -// accountName - the name of the storage account within the specified resource group. Storage account names -// must be between 3 and 24 characters in length and use numbers and lower-case letters only. -func (client FileServicesClient) List(ctx context.Context, resourceGroupName string, accountName string) (result FileServiceItems, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/FileServicesClient.List") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, - {TargetValue: accountName, - Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("storage.FileServicesClient", "List", err.Error()) - } - - req, err := client.ListPreparer(ctx, resourceGroupName, accountName) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.FileServicesClient", "List", nil, "Failure preparing request") - return - } - - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "storage.FileServicesClient", "List", resp, "Failure sending request") - return - } - - result, err = client.ListResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.FileServicesClient", "List", resp, "Failure responding to request") - return - } - - return -} - -// ListPreparer prepares the List request. -func (client FileServicesClient) ListPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2021-02-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListSender sends the List request. The method will close the -// http.Response Body if it receives an error. -func (client FileServicesClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListResponder handles the response to the List request. The method always -// closes the http.Response Body. -func (client FileServicesClient) ListResponder(resp *http.Response) (result FileServiceItems, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// SetServiceProperties sets the properties of file services in storage accounts, including CORS (Cross-Origin Resource -// Sharing) rules. -// Parameters: -// resourceGroupName - the name of the resource group within the user's subscription. The name is case -// insensitive. -// accountName - the name of the storage account within the specified resource group. Storage account names -// must be between 3 and 24 characters in length and use numbers and lower-case letters only. -// parameters - the properties of file services in storage accounts, including CORS (Cross-Origin Resource -// Sharing) rules. -func (client FileServicesClient) SetServiceProperties(ctx context.Context, resourceGroupName string, accountName string, parameters FileServiceProperties) (result FileServiceProperties, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/FileServicesClient.SetServiceProperties") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, - {TargetValue: accountName, - Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.FileServicePropertiesProperties", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.FileServicePropertiesProperties.ShareDeleteRetentionPolicy", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.FileServicePropertiesProperties.ShareDeleteRetentionPolicy.Days", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.FileServicePropertiesProperties.ShareDeleteRetentionPolicy.Days", Name: validation.InclusiveMaximum, Rule: int64(365), Chain: nil}, - {Target: "parameters.FileServicePropertiesProperties.ShareDeleteRetentionPolicy.Days", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, - }}, - }}, - }}}}}); err != nil { - return result, validation.NewError("storage.FileServicesClient", "SetServiceProperties", err.Error()) - } - - req, err := client.SetServicePropertiesPreparer(ctx, resourceGroupName, accountName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.FileServicesClient", "SetServiceProperties", nil, "Failure preparing request") - return - } - - resp, err := client.SetServicePropertiesSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "storage.FileServicesClient", "SetServiceProperties", resp, "Failure sending request") - return - } - - result, err = client.SetServicePropertiesResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.FileServicesClient", "SetServiceProperties", resp, "Failure responding to request") - return - } - - return -} - -// SetServicePropertiesPreparer prepares the SetServiceProperties request. -func (client FileServicesClient) SetServicePropertiesPreparer(ctx context.Context, resourceGroupName string, accountName string, parameters FileServiceProperties) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "FileServicesName": autorest.Encode("path", "default"), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2021-02-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - parameters.Sku = nil - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPut(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/{FileServicesName}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// SetServicePropertiesSender sends the SetServiceProperties request. The method will close the -// http.Response Body if it receives an error. -func (client FileServicesClient) SetServicePropertiesSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// SetServicePropertiesResponder handles the response to the SetServiceProperties request. The method always -// closes the http.Response Body. -func (client FileServicesClient) SetServicePropertiesResponder(resp *http.Response) (result FileServiceProperties, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/fileshares.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/fileshares.go deleted file mode 100644 index 6ccc4bfb14f1..000000000000 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/fileshares.go +++ /dev/null @@ -1,703 +0,0 @@ -package storage - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/validation" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// FileSharesClient is the the Azure Storage Management API. -type FileSharesClient struct { - BaseClient -} - -// NewFileSharesClient creates an instance of the FileSharesClient client. -func NewFileSharesClient(subscriptionID string) FileSharesClient { - return NewFileSharesClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewFileSharesClientWithBaseURI creates an instance of the FileSharesClient client using a custom endpoint. Use this -// when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). -func NewFileSharesClientWithBaseURI(baseURI string, subscriptionID string) FileSharesClient { - return FileSharesClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// Create creates a new share under the specified account as described by request body. The share resource includes -// metadata and properties for that share. It does not include a list of the files contained by the share. -// Parameters: -// resourceGroupName - the name of the resource group within the user's subscription. The name is case -// insensitive. -// accountName - the name of the storage account within the specified resource group. Storage account names -// must be between 3 and 24 characters in length and use numbers and lower-case letters only. -// shareName - the name of the file share within the specified storage account. File share names must be -// between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every dash (-) -// character must be immediately preceded and followed by a letter or number. -// fileShare - properties of the file share to create. -// expand - optional, used to create a snapshot. -func (client FileSharesClient) Create(ctx context.Context, resourceGroupName string, accountName string, shareName string, fileShare FileShare, expand PutSharesExpand) (result FileShare, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/FileSharesClient.Create") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, - {TargetValue: accountName, - Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: shareName, - Constraints: []validation.Constraint{{Target: "shareName", Name: validation.MaxLength, Rule: 63, Chain: nil}, - {Target: "shareName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: fileShare, - Constraints: []validation.Constraint{{Target: "fileShare.FileShareProperties", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "fileShare.FileShareProperties.ShareQuota", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "fileShare.FileShareProperties.ShareQuota", Name: validation.InclusiveMaximum, Rule: int64(102400), Chain: nil}, - {Target: "fileShare.FileShareProperties.ShareQuota", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, - }}, - }}}}, - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("storage.FileSharesClient", "Create", err.Error()) - } - - req, err := client.CreatePreparer(ctx, resourceGroupName, accountName, shareName, fileShare, expand) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Create", nil, "Failure preparing request") - return - } - - resp, err := client.CreateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Create", resp, "Failure sending request") - return - } - - result, err = client.CreateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Create", resp, "Failure responding to request") - return - } - - return -} - -// CreatePreparer prepares the Create request. -func (client FileSharesClient) CreatePreparer(ctx context.Context, resourceGroupName string, accountName string, shareName string, fileShare FileShare, expand PutSharesExpand) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "shareName": autorest.Encode("path", shareName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2021-02-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if len(string(expand)) > 0 { - queryParameters["$expand"] = autorest.Encode("query", expand) - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPut(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/default/shares/{shareName}", pathParameters), - autorest.WithJSON(fileShare), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// CreateSender sends the Create request. The method will close the -// http.Response Body if it receives an error. -func (client FileSharesClient) CreateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// CreateResponder handles the response to the Create request. The method always -// closes the http.Response Body. -func (client FileSharesClient) CreateResponder(resp *http.Response) (result FileShare, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Delete deletes specified share under its account. -// Parameters: -// resourceGroupName - the name of the resource group within the user's subscription. The name is case -// insensitive. -// accountName - the name of the storage account within the specified resource group. Storage account names -// must be between 3 and 24 characters in length and use numbers and lower-case letters only. -// shareName - the name of the file share within the specified storage account. File share names must be -// between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every dash (-) -// character must be immediately preceded and followed by a letter or number. -// xMsSnapshot - optional, used to delete a snapshot. -func (client FileSharesClient) Delete(ctx context.Context, resourceGroupName string, accountName string, shareName string, xMsSnapshot string) (result autorest.Response, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/FileSharesClient.Delete") - defer func() { - sc := -1 - if result.Response != nil { - sc = result.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, - {TargetValue: accountName, - Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: shareName, - Constraints: []validation.Constraint{{Target: "shareName", Name: validation.MaxLength, Rule: 63, Chain: nil}, - {Target: "shareName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("storage.FileSharesClient", "Delete", err.Error()) - } - - req, err := client.DeletePreparer(ctx, resourceGroupName, accountName, shareName, xMsSnapshot) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Delete", nil, "Failure preparing request") - return - } - - resp, err := client.DeleteSender(req) - if err != nil { - result.Response = resp - err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Delete", resp, "Failure sending request") - return - } - - result, err = client.DeleteResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Delete", resp, "Failure responding to request") - return - } - - return -} - -// DeletePreparer prepares the Delete request. -func (client FileSharesClient) DeletePreparer(ctx context.Context, resourceGroupName string, accountName string, shareName string, xMsSnapshot string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "shareName": autorest.Encode("path", shareName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2021-02-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/default/shares/{shareName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - if len(xMsSnapshot) > 0 { - preparer = autorest.DecoratePreparer(preparer, - autorest.WithHeader("x-ms-snapshot", autorest.String(xMsSnapshot))) - } - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DeleteSender sends the Delete request. The method will close the -// http.Response Body if it receives an error. -func (client FileSharesClient) DeleteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// DeleteResponder handles the response to the Delete request. The method always -// closes the http.Response Body. -func (client FileSharesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), - autorest.ByClosing()) - result.Response = resp - return -} - -// Get gets properties of a specified share. -// Parameters: -// resourceGroupName - the name of the resource group within the user's subscription. The name is case -// insensitive. -// accountName - the name of the storage account within the specified resource group. Storage account names -// must be between 3 and 24 characters in length and use numbers and lower-case letters only. -// shareName - the name of the file share within the specified storage account. File share names must be -// between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every dash (-) -// character must be immediately preceded and followed by a letter or number. -// expand - optional, used to expand the properties within share's properties. -// xMsSnapshot - optional, used to retrieve properties of a snapshot. -func (client FileSharesClient) Get(ctx context.Context, resourceGroupName string, accountName string, shareName string, expand GetShareExpand, xMsSnapshot string) (result FileShare, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/FileSharesClient.Get") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, - {TargetValue: accountName, - Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: shareName, - Constraints: []validation.Constraint{{Target: "shareName", Name: validation.MaxLength, Rule: 63, Chain: nil}, - {Target: "shareName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("storage.FileSharesClient", "Get", err.Error()) - } - - req, err := client.GetPreparer(ctx, resourceGroupName, accountName, shareName, expand, xMsSnapshot) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Get", nil, "Failure preparing request") - return - } - - resp, err := client.GetSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Get", resp, "Failure sending request") - return - } - - result, err = client.GetResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Get", resp, "Failure responding to request") - return - } - - return -} - -// GetPreparer prepares the Get request. -func (client FileSharesClient) GetPreparer(ctx context.Context, resourceGroupName string, accountName string, shareName string, expand GetShareExpand, xMsSnapshot string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "shareName": autorest.Encode("path", shareName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2021-02-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if len(string(expand)) > 0 { - queryParameters["$expand"] = autorest.Encode("query", expand) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/default/shares/{shareName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - if len(xMsSnapshot) > 0 { - preparer = autorest.DecoratePreparer(preparer, - autorest.WithHeader("x-ms-snapshot", autorest.String(xMsSnapshot))) - } - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetSender sends the Get request. The method will close the -// http.Response Body if it receives an error. -func (client FileSharesClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// GetResponder handles the response to the Get request. The method always -// closes the http.Response Body. -func (client FileSharesClient) GetResponder(resp *http.Response) (result FileShare, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// List lists all shares. -// Parameters: -// resourceGroupName - the name of the resource group within the user's subscription. The name is case -// insensitive. -// accountName - the name of the storage account within the specified resource group. Storage account names -// must be between 3 and 24 characters in length and use numbers and lower-case letters only. -// maxpagesize - optional. Specified maximum number of shares that can be included in the list. -// filter - optional. When specified, only share names starting with the filter will be listed. -// expand - optional, used to expand the properties within share's properties. -func (client FileSharesClient) List(ctx context.Context, resourceGroupName string, accountName string, maxpagesize string, filter string, expand ListSharesExpand) (result FileShareItemsPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/FileSharesClient.List") - defer func() { - sc := -1 - if result.fsi.Response.Response != nil { - sc = result.fsi.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, - {TargetValue: accountName, - Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("storage.FileSharesClient", "List", err.Error()) - } - - result.fn = client.listNextResults - req, err := client.ListPreparer(ctx, resourceGroupName, accountName, maxpagesize, filter, expand) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "List", nil, "Failure preparing request") - return - } - - resp, err := client.ListSender(req) - if err != nil { - result.fsi.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "List", resp, "Failure sending request") - return - } - - result.fsi, err = client.ListResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "List", resp, "Failure responding to request") - return - } - if result.fsi.hasNextLink() && result.fsi.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// ListPreparer prepares the List request. -func (client FileSharesClient) ListPreparer(ctx context.Context, resourceGroupName string, accountName string, maxpagesize string, filter string, expand ListSharesExpand) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2021-02-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if len(maxpagesize) > 0 { - queryParameters["$maxpagesize"] = autorest.Encode("query", maxpagesize) - } - if len(filter) > 0 { - queryParameters["$filter"] = autorest.Encode("query", filter) - } - if len(string(expand)) > 0 { - queryParameters["$expand"] = autorest.Encode("query", expand) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/default/shares", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListSender sends the List request. The method will close the -// http.Response Body if it receives an error. -func (client FileSharesClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListResponder handles the response to the List request. The method always -// closes the http.Response Body. -func (client FileSharesClient) ListResponder(resp *http.Response) (result FileShareItems, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// listNextResults retrieves the next set of results, if any. -func (client FileSharesClient) listNextResults(ctx context.Context, lastResults FileShareItems) (result FileShareItems, err error) { - req, err := lastResults.fileShareItemsPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "storage.FileSharesClient", "listNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "storage.FileSharesClient", "listNextResults", resp, "Failure sending next results request") - } - result, err = client.ListResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "listNextResults", resp, "Failure responding to next results request") - } - return -} - -// ListComplete enumerates all values, automatically crossing page boundaries as required. -func (client FileSharesClient) ListComplete(ctx context.Context, resourceGroupName string, accountName string, maxpagesize string, filter string, expand ListSharesExpand) (result FileShareItemsIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/FileSharesClient.List") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.List(ctx, resourceGroupName, accountName, maxpagesize, filter, expand) - return -} - -// Restore restore a file share within a valid retention days if share soft delete is enabled -// Parameters: -// resourceGroupName - the name of the resource group within the user's subscription. The name is case -// insensitive. -// accountName - the name of the storage account within the specified resource group. Storage account names -// must be between 3 and 24 characters in length and use numbers and lower-case letters only. -// shareName - the name of the file share within the specified storage account. File share names must be -// between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every dash (-) -// character must be immediately preceded and followed by a letter or number. -func (client FileSharesClient) Restore(ctx context.Context, resourceGroupName string, accountName string, shareName string, deletedShare DeletedShare) (result autorest.Response, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/FileSharesClient.Restore") - defer func() { - sc := -1 - if result.Response != nil { - sc = result.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, - {TargetValue: accountName, - Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: shareName, - Constraints: []validation.Constraint{{Target: "shareName", Name: validation.MaxLength, Rule: 63, Chain: nil}, - {Target: "shareName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: deletedShare, - Constraints: []validation.Constraint{{Target: "deletedShare.DeletedShareName", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "deletedShare.DeletedShareVersion", Name: validation.Null, Rule: true, Chain: nil}}}, - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("storage.FileSharesClient", "Restore", err.Error()) - } - - req, err := client.RestorePreparer(ctx, resourceGroupName, accountName, shareName, deletedShare) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Restore", nil, "Failure preparing request") - return - } - - resp, err := client.RestoreSender(req) - if err != nil { - result.Response = resp - err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Restore", resp, "Failure sending request") - return - } - - result, err = client.RestoreResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Restore", resp, "Failure responding to request") - return - } - - return -} - -// RestorePreparer prepares the Restore request. -func (client FileSharesClient) RestorePreparer(ctx context.Context, resourceGroupName string, accountName string, shareName string, deletedShare DeletedShare) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "shareName": autorest.Encode("path", shareName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2021-02-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPost(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/default/shares/{shareName}/restore", pathParameters), - autorest.WithJSON(deletedShare), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// RestoreSender sends the Restore request. The method will close the -// http.Response Body if it receives an error. -func (client FileSharesClient) RestoreSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// RestoreResponder handles the response to the Restore request. The method always -// closes the http.Response Body. -func (client FileSharesClient) RestoreResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByClosing()) - result.Response = resp - return -} - -// Update updates share properties as specified in request body. Properties not mentioned in the request will not be -// changed. Update fails if the specified share does not already exist. -// Parameters: -// resourceGroupName - the name of the resource group within the user's subscription. The name is case -// insensitive. -// accountName - the name of the storage account within the specified resource group. Storage account names -// must be between 3 and 24 characters in length and use numbers and lower-case letters only. -// shareName - the name of the file share within the specified storage account. File share names must be -// between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every dash (-) -// character must be immediately preceded and followed by a letter or number. -// fileShare - properties to update for the file share. -func (client FileSharesClient) Update(ctx context.Context, resourceGroupName string, accountName string, shareName string, fileShare FileShare) (result FileShare, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/FileSharesClient.Update") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, - {TargetValue: accountName, - Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: shareName, - Constraints: []validation.Constraint{{Target: "shareName", Name: validation.MaxLength, Rule: 63, Chain: nil}, - {Target: "shareName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("storage.FileSharesClient", "Update", err.Error()) - } - - req, err := client.UpdatePreparer(ctx, resourceGroupName, accountName, shareName, fileShare) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Update", nil, "Failure preparing request") - return - } - - resp, err := client.UpdateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Update", resp, "Failure sending request") - return - } - - result, err = client.UpdateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Update", resp, "Failure responding to request") - return - } - - return -} - -// UpdatePreparer prepares the Update request. -func (client FileSharesClient) UpdatePreparer(ctx context.Context, resourceGroupName string, accountName string, shareName string, fileShare FileShare) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "shareName": autorest.Encode("path", shareName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2021-02-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPatch(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/default/shares/{shareName}", pathParameters), - autorest.WithJSON(fileShare), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// UpdateSender sends the Update request. The method will close the -// http.Response Body if it receives an error. -func (client FileSharesClient) UpdateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// UpdateResponder handles the response to the Update request. The method always -// closes the http.Response Body. -func (client FileSharesClient) UpdateResponder(resp *http.Response) (result FileShare, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/managementpolicies.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/managementpolicies.go deleted file mode 100644 index 77e126a8a379..000000000000 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/managementpolicies.go +++ /dev/null @@ -1,316 +0,0 @@ -package storage - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/validation" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// ManagementPoliciesClient is the the Azure Storage Management API. -type ManagementPoliciesClient struct { - BaseClient -} - -// NewManagementPoliciesClient creates an instance of the ManagementPoliciesClient client. -func NewManagementPoliciesClient(subscriptionID string) ManagementPoliciesClient { - return NewManagementPoliciesClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewManagementPoliciesClientWithBaseURI creates an instance of the ManagementPoliciesClient client using a custom -// endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure -// stack). -func NewManagementPoliciesClientWithBaseURI(baseURI string, subscriptionID string) ManagementPoliciesClient { - return ManagementPoliciesClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// CreateOrUpdate sets the managementpolicy to the specified storage account. -// Parameters: -// resourceGroupName - the name of the resource group within the user's subscription. The name is case -// insensitive. -// accountName - the name of the storage account within the specified resource group. Storage account names -// must be between 3 and 24 characters in length and use numbers and lower-case letters only. -// properties - the ManagementPolicy set to a storage account. -func (client ManagementPoliciesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, accountName string, properties ManagementPolicy) (result ManagementPolicy, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ManagementPoliciesClient.CreateOrUpdate") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, - {TargetValue: accountName, - Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: properties, - Constraints: []validation.Constraint{{Target: "properties.ManagementPolicyProperties", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "properties.ManagementPolicyProperties.Policy", Name: validation.Null, Rule: true, - Chain: []validation.Constraint{{Target: "properties.ManagementPolicyProperties.Policy.Rules", Name: validation.Null, Rule: true, Chain: nil}}}, - }}}}}); err != nil { - return result, validation.NewError("storage.ManagementPoliciesClient", "CreateOrUpdate", err.Error()) - } - - req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, accountName, properties) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.ManagementPoliciesClient", "CreateOrUpdate", nil, "Failure preparing request") - return - } - - resp, err := client.CreateOrUpdateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "storage.ManagementPoliciesClient", "CreateOrUpdate", resp, "Failure sending request") - return - } - - result, err = client.CreateOrUpdateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.ManagementPoliciesClient", "CreateOrUpdate", resp, "Failure responding to request") - return - } - - return -} - -// CreateOrUpdatePreparer prepares the CreateOrUpdate request. -func (client ManagementPoliciesClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, accountName string, properties ManagementPolicy) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "managementPolicyName": autorest.Encode("path", "default"), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2021-02-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPut(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/managementPolicies/{managementPolicyName}", pathParameters), - autorest.WithJSON(properties), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the -// http.Response Body if it receives an error. -func (client ManagementPoliciesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always -// closes the http.Response Body. -func (client ManagementPoliciesClient) CreateOrUpdateResponder(resp *http.Response) (result ManagementPolicy, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Delete deletes the managementpolicy associated with the specified storage account. -// Parameters: -// resourceGroupName - the name of the resource group within the user's subscription. The name is case -// insensitive. -// accountName - the name of the storage account within the specified resource group. Storage account names -// must be between 3 and 24 characters in length and use numbers and lower-case letters only. -func (client ManagementPoliciesClient) Delete(ctx context.Context, resourceGroupName string, accountName string) (result autorest.Response, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ManagementPoliciesClient.Delete") - defer func() { - sc := -1 - if result.Response != nil { - sc = result.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, - {TargetValue: accountName, - Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("storage.ManagementPoliciesClient", "Delete", err.Error()) - } - - req, err := client.DeletePreparer(ctx, resourceGroupName, accountName) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.ManagementPoliciesClient", "Delete", nil, "Failure preparing request") - return - } - - resp, err := client.DeleteSender(req) - if err != nil { - result.Response = resp - err = autorest.NewErrorWithError(err, "storage.ManagementPoliciesClient", "Delete", resp, "Failure sending request") - return - } - - result, err = client.DeleteResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.ManagementPoliciesClient", "Delete", resp, "Failure responding to request") - return - } - - return -} - -// DeletePreparer prepares the Delete request. -func (client ManagementPoliciesClient) DeletePreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "managementPolicyName": autorest.Encode("path", "default"), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2021-02-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/managementPolicies/{managementPolicyName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DeleteSender sends the Delete request. The method will close the -// http.Response Body if it receives an error. -func (client ManagementPoliciesClient) DeleteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// DeleteResponder handles the response to the Delete request. The method always -// closes the http.Response Body. -func (client ManagementPoliciesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), - autorest.ByClosing()) - result.Response = resp - return -} - -// Get gets the managementpolicy associated with the specified storage account. -// Parameters: -// resourceGroupName - the name of the resource group within the user's subscription. The name is case -// insensitive. -// accountName - the name of the storage account within the specified resource group. Storage account names -// must be between 3 and 24 characters in length and use numbers and lower-case letters only. -func (client ManagementPoliciesClient) Get(ctx context.Context, resourceGroupName string, accountName string) (result ManagementPolicy, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ManagementPoliciesClient.Get") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, - {TargetValue: accountName, - Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("storage.ManagementPoliciesClient", "Get", err.Error()) - } - - req, err := client.GetPreparer(ctx, resourceGroupName, accountName) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.ManagementPoliciesClient", "Get", nil, "Failure preparing request") - return - } - - resp, err := client.GetSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "storage.ManagementPoliciesClient", "Get", resp, "Failure sending request") - return - } - - result, err = client.GetResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.ManagementPoliciesClient", "Get", resp, "Failure responding to request") - return - } - - return -} - -// GetPreparer prepares the Get request. -func (client ManagementPoliciesClient) GetPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "managementPolicyName": autorest.Encode("path", "default"), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2021-02-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/managementPolicies/{managementPolicyName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetSender sends the Get request. The method will close the -// http.Response Body if it receives an error. -func (client ManagementPoliciesClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// GetResponder handles the response to the Get request. The method always -// closes the http.Response Body. -func (client ManagementPoliciesClient) GetResponder(resp *http.Response) (result ManagementPolicy, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/models.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/models.go deleted file mode 100644 index 1a7663011e13..000000000000 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/models.go +++ /dev/null @@ -1,5140 +0,0 @@ -package storage - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "encoding/json" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/date" - "github.com/Azure/go-autorest/autorest/to" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// The package's fully qualified name. -const fqdn = "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage" - -// Account the storage account. -type Account struct { - autorest.Response `json:"-"` - // Sku - READ-ONLY; Gets the SKU. - Sku *Sku `json:"sku,omitempty"` - // Kind - READ-ONLY; Gets the Kind. Possible values include: 'KindStorage', 'KindStorageV2', 'KindBlobStorage', 'KindFileStorage', 'KindBlockBlobStorage' - Kind Kind `json:"kind,omitempty"` - // Identity - The identity of the resource. - Identity *Identity `json:"identity,omitempty"` - // ExtendedLocation - The extendedLocation of the resource. - ExtendedLocation *ExtendedLocation `json:"extendedLocation,omitempty"` - // AccountProperties - Properties of the storage account. - *AccountProperties `json:"properties,omitempty"` - // Tags - Resource tags. - Tags map[string]*string `json:"tags"` - // Location - The geo-location where the resource lives - Location *string `json:"location,omitempty"` - // ID - READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; The name of the resource - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" - Type *string `json:"type,omitempty"` -} - -// MarshalJSON is the custom marshaler for Account. -func (a Account) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if a.Identity != nil { - objectMap["identity"] = a.Identity - } - if a.ExtendedLocation != nil { - objectMap["extendedLocation"] = a.ExtendedLocation - } - if a.AccountProperties != nil { - objectMap["properties"] = a.AccountProperties - } - if a.Tags != nil { - objectMap["tags"] = a.Tags - } - if a.Location != nil { - objectMap["location"] = a.Location - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for Account struct. -func (a *Account) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "sku": - if v != nil { - var sku Sku - err = json.Unmarshal(*v, &sku) - if err != nil { - return err - } - a.Sku = &sku - } - case "kind": - if v != nil { - var kind Kind - err = json.Unmarshal(*v, &kind) - if err != nil { - return err - } - a.Kind = kind - } - case "identity": - if v != nil { - var identity Identity - err = json.Unmarshal(*v, &identity) - if err != nil { - return err - } - a.Identity = &identity - } - case "extendedLocation": - if v != nil { - var extendedLocation ExtendedLocation - err = json.Unmarshal(*v, &extendedLocation) - if err != nil { - return err - } - a.ExtendedLocation = &extendedLocation - } - case "properties": - if v != nil { - var accountProperties AccountProperties - err = json.Unmarshal(*v, &accountProperties) - if err != nil { - return err - } - a.AccountProperties = &accountProperties - } - case "tags": - if v != nil { - var tags map[string]*string - err = json.Unmarshal(*v, &tags) - if err != nil { - return err - } - a.Tags = tags - } - case "location": - if v != nil { - var location string - err = json.Unmarshal(*v, &location) - if err != nil { - return err - } - a.Location = &location - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - a.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - a.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - a.Type = &typeVar - } - } - } - - return nil -} - -// AccountCheckNameAvailabilityParameters the parameters used to check the availability of the storage -// account name. -type AccountCheckNameAvailabilityParameters struct { - // Name - The storage account name. - Name *string `json:"name,omitempty"` - // Type - The type of resource, Microsoft.Storage/storageAccounts - Type *string `json:"type,omitempty"` -} - -// AccountCreateParameters the parameters used when creating a storage account. -type AccountCreateParameters struct { - // Sku - Required. Gets or sets the SKU name. - Sku *Sku `json:"sku,omitempty"` - // Kind - Required. Indicates the type of storage account. Possible values include: 'KindStorage', 'KindStorageV2', 'KindBlobStorage', 'KindFileStorage', 'KindBlockBlobStorage' - Kind Kind `json:"kind,omitempty"` - // Location - Required. Gets or sets the location of the resource. This will be one of the supported and registered Azure Geo Regions (e.g. West US, East US, Southeast Asia, etc.). The geo region of a resource cannot be changed once it is created, but if an identical geo region is specified on update, the request will succeed. - Location *string `json:"location,omitempty"` - // ExtendedLocation - Optional. Set the extended location of the resource. If not set, the storage account will be created in Azure main region. Otherwise it will be created in the specified extended location - ExtendedLocation *ExtendedLocation `json:"extendedLocation,omitempty"` - // Tags - Gets or sets a list of key value pairs that describe the resource. These tags can be used for viewing and grouping this resource (across resource groups). A maximum of 15 tags can be provided for a resource. Each tag must have a key with a length no greater than 128 characters and a value with a length no greater than 256 characters. - Tags map[string]*string `json:"tags"` - // Identity - The identity of the resource. - Identity *Identity `json:"identity,omitempty"` - // AccountPropertiesCreateParameters - The parameters used to create the storage account. - *AccountPropertiesCreateParameters `json:"properties,omitempty"` -} - -// MarshalJSON is the custom marshaler for AccountCreateParameters. -func (acp AccountCreateParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if acp.Sku != nil { - objectMap["sku"] = acp.Sku - } - if acp.Kind != "" { - objectMap["kind"] = acp.Kind - } - if acp.Location != nil { - objectMap["location"] = acp.Location - } - if acp.ExtendedLocation != nil { - objectMap["extendedLocation"] = acp.ExtendedLocation - } - if acp.Tags != nil { - objectMap["tags"] = acp.Tags - } - if acp.Identity != nil { - objectMap["identity"] = acp.Identity - } - if acp.AccountPropertiesCreateParameters != nil { - objectMap["properties"] = acp.AccountPropertiesCreateParameters - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for AccountCreateParameters struct. -func (acp *AccountCreateParameters) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "sku": - if v != nil { - var sku Sku - err = json.Unmarshal(*v, &sku) - if err != nil { - return err - } - acp.Sku = &sku - } - case "kind": - if v != nil { - var kind Kind - err = json.Unmarshal(*v, &kind) - if err != nil { - return err - } - acp.Kind = kind - } - case "location": - if v != nil { - var location string - err = json.Unmarshal(*v, &location) - if err != nil { - return err - } - acp.Location = &location - } - case "extendedLocation": - if v != nil { - var extendedLocation ExtendedLocation - err = json.Unmarshal(*v, &extendedLocation) - if err != nil { - return err - } - acp.ExtendedLocation = &extendedLocation - } - case "tags": - if v != nil { - var tags map[string]*string - err = json.Unmarshal(*v, &tags) - if err != nil { - return err - } - acp.Tags = tags - } - case "identity": - if v != nil { - var identity Identity - err = json.Unmarshal(*v, &identity) - if err != nil { - return err - } - acp.Identity = &identity - } - case "properties": - if v != nil { - var accountPropertiesCreateParameters AccountPropertiesCreateParameters - err = json.Unmarshal(*v, &accountPropertiesCreateParameters) - if err != nil { - return err - } - acp.AccountPropertiesCreateParameters = &accountPropertiesCreateParameters - } - } - } - - return nil -} - -// AccountInternetEndpoints the URIs that are used to perform a retrieval of a public blob, file, web or -// dfs object via a internet routing endpoint. -type AccountInternetEndpoints struct { - // Blob - READ-ONLY; Gets the blob endpoint. - Blob *string `json:"blob,omitempty"` - // File - READ-ONLY; Gets the file endpoint. - File *string `json:"file,omitempty"` - // Web - READ-ONLY; Gets the web endpoint. - Web *string `json:"web,omitempty"` - // Dfs - READ-ONLY; Gets the dfs endpoint. - Dfs *string `json:"dfs,omitempty"` -} - -// MarshalJSON is the custom marshaler for AccountInternetEndpoints. -func (aie AccountInternetEndpoints) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// AccountKey an access key for the storage account. -type AccountKey struct { - // KeyName - READ-ONLY; Name of the key. - KeyName *string `json:"keyName,omitempty"` - // Value - READ-ONLY; Base 64-encoded value of the key. - Value *string `json:"value,omitempty"` - // Permissions - READ-ONLY; Permissions for the key -- read-only or full permissions. Possible values include: 'KeyPermissionRead', 'KeyPermissionFull' - Permissions KeyPermission `json:"permissions,omitempty"` - // CreationTime - READ-ONLY; Creation time of the key, in round trip date format. - CreationTime *date.Time `json:"creationTime,omitempty"` -} - -// MarshalJSON is the custom marshaler for AccountKey. -func (ak AccountKey) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// AccountListKeysResult the response from the ListKeys operation. -type AccountListKeysResult struct { - autorest.Response `json:"-"` - // Keys - READ-ONLY; Gets the list of storage account keys and their properties for the specified storage account. - Keys *[]AccountKey `json:"keys,omitempty"` -} - -// MarshalJSON is the custom marshaler for AccountListKeysResult. -func (alkr AccountListKeysResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// AccountListResult the response from the List Storage Accounts operation. -type AccountListResult struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; Gets the list of storage accounts and their properties. - Value *[]Account `json:"value,omitempty"` - // NextLink - READ-ONLY; Request URL that can be used to query next page of storage accounts. Returned when total number of requested storage accounts exceed maximum page size. - NextLink *string `json:"nextLink,omitempty"` -} - -// MarshalJSON is the custom marshaler for AccountListResult. -func (alr AccountListResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// AccountListResultIterator provides access to a complete listing of Account values. -type AccountListResultIterator struct { - i int - page AccountListResultPage -} - -// NextWithContext advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *AccountListResultIterator) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/AccountListResultIterator.NextWithContext") - defer func() { - sc := -1 - if iter.Response().Response.Response != nil { - sc = iter.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err = iter.page.NextWithContext(ctx) - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (iter *AccountListResultIterator) Next() error { - return iter.NextWithContext(context.Background()) -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter AccountListResultIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter AccountListResultIterator) Response() AccountListResult { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter AccountListResultIterator) Value() Account { - if !iter.page.NotDone() { - return Account{} - } - return iter.page.Values()[iter.i] -} - -// Creates a new instance of the AccountListResultIterator type. -func NewAccountListResultIterator(page AccountListResultPage) AccountListResultIterator { - return AccountListResultIterator{page: page} -} - -// IsEmpty returns true if the ListResult contains no values. -func (alr AccountListResult) IsEmpty() bool { - return alr.Value == nil || len(*alr.Value) == 0 -} - -// hasNextLink returns true if the NextLink is not empty. -func (alr AccountListResult) hasNextLink() bool { - return alr.NextLink != nil && len(*alr.NextLink) != 0 -} - -// accountListResultPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (alr AccountListResult) accountListResultPreparer(ctx context.Context) (*http.Request, error) { - if !alr.hasNextLink() { - return nil, nil - } - return autorest.Prepare((&http.Request{}).WithContext(ctx), - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(alr.NextLink))) -} - -// AccountListResultPage contains a page of Account values. -type AccountListResultPage struct { - fn func(context.Context, AccountListResult) (AccountListResult, error) - alr AccountListResult -} - -// NextWithContext advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *AccountListResultPage) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/AccountListResultPage.NextWithContext") - defer func() { - sc := -1 - if page.Response().Response.Response != nil { - sc = page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - for { - next, err := page.fn(ctx, page.alr) - if err != nil { - return err - } - page.alr = next - if !next.hasNextLink() || !next.IsEmpty() { - break - } - } - return nil -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *AccountListResultPage) Next() error { - return page.NextWithContext(context.Background()) -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page AccountListResultPage) NotDone() bool { - return !page.alr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page AccountListResultPage) Response() AccountListResult { - return page.alr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page AccountListResultPage) Values() []Account { - if page.alr.IsEmpty() { - return nil - } - return *page.alr.Value -} - -// Creates a new instance of the AccountListResultPage type. -func NewAccountListResultPage(cur AccountListResult, getNextPage func(context.Context, AccountListResult) (AccountListResult, error)) AccountListResultPage { - return AccountListResultPage{ - fn: getNextPage, - alr: cur, - } -} - -// AccountMicrosoftEndpoints the URIs that are used to perform a retrieval of a public blob, queue, table, -// web or dfs object via a microsoft routing endpoint. -type AccountMicrosoftEndpoints struct { - // Blob - READ-ONLY; Gets the blob endpoint. - Blob *string `json:"blob,omitempty"` - // Queue - READ-ONLY; Gets the queue endpoint. - Queue *string `json:"queue,omitempty"` - // Table - READ-ONLY; Gets the table endpoint. - Table *string `json:"table,omitempty"` - // File - READ-ONLY; Gets the file endpoint. - File *string `json:"file,omitempty"` - // Web - READ-ONLY; Gets the web endpoint. - Web *string `json:"web,omitempty"` - // Dfs - READ-ONLY; Gets the dfs endpoint. - Dfs *string `json:"dfs,omitempty"` -} - -// MarshalJSON is the custom marshaler for AccountMicrosoftEndpoints. -func (ame AccountMicrosoftEndpoints) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// AccountProperties properties of the storage account. -type AccountProperties struct { - // ProvisioningState - READ-ONLY; Gets the status of the storage account at the time the operation was called. Possible values include: 'ProvisioningStateCreating', 'ProvisioningStateResolvingDNS', 'ProvisioningStateSucceeded' - ProvisioningState ProvisioningState `json:"provisioningState,omitempty"` - // PrimaryEndpoints - READ-ONLY; Gets the URLs that are used to perform a retrieval of a public blob, queue, or table object. Note that Standard_ZRS and Premium_LRS accounts only return the blob endpoint. - PrimaryEndpoints *Endpoints `json:"primaryEndpoints,omitempty"` - // PrimaryLocation - READ-ONLY; Gets the location of the primary data center for the storage account. - PrimaryLocation *string `json:"primaryLocation,omitempty"` - // StatusOfPrimary - READ-ONLY; Gets the status indicating whether the primary location of the storage account is available or unavailable. Possible values include: 'AccountStatusAvailable', 'AccountStatusUnavailable' - StatusOfPrimary AccountStatus `json:"statusOfPrimary,omitempty"` - // LastGeoFailoverTime - READ-ONLY; Gets the timestamp of the most recent instance of a failover to the secondary location. Only the most recent timestamp is retained. This element is not returned if there has never been a failover instance. Only available if the accountType is Standard_GRS or Standard_RAGRS. - LastGeoFailoverTime *date.Time `json:"lastGeoFailoverTime,omitempty"` - // SecondaryLocation - READ-ONLY; Gets the location of the geo-replicated secondary for the storage account. Only available if the accountType is Standard_GRS or Standard_RAGRS. - SecondaryLocation *string `json:"secondaryLocation,omitempty"` - // StatusOfSecondary - READ-ONLY; Gets the status indicating whether the secondary location of the storage account is available or unavailable. Only available if the SKU name is Standard_GRS or Standard_RAGRS. Possible values include: 'AccountStatusAvailable', 'AccountStatusUnavailable' - StatusOfSecondary AccountStatus `json:"statusOfSecondary,omitempty"` - // CreationTime - READ-ONLY; Gets the creation date and time of the storage account in UTC. - CreationTime *date.Time `json:"creationTime,omitempty"` - // CustomDomain - READ-ONLY; Gets the custom domain the user assigned to this storage account. - CustomDomain *CustomDomain `json:"customDomain,omitempty"` - // SasPolicy - READ-ONLY; SasPolicy assigned to the storage account. - SasPolicy *SasPolicy `json:"sasPolicy,omitempty"` - // KeyPolicy - READ-ONLY; KeyPolicy assigned to the storage account. - KeyPolicy *KeyPolicy `json:"keyPolicy,omitempty"` - // KeyCreationTime - READ-ONLY; Storage account keys creation time. - KeyCreationTime *KeyCreationTime `json:"keyCreationTime,omitempty"` - // SecondaryEndpoints - READ-ONLY; Gets the URLs that are used to perform a retrieval of a public blob, queue, or table object from the secondary location of the storage account. Only available if the SKU name is Standard_RAGRS. - SecondaryEndpoints *Endpoints `json:"secondaryEndpoints,omitempty"` - // Encryption - READ-ONLY; Gets the encryption settings on the account. If unspecified, the account is unencrypted. - Encryption *Encryption `json:"encryption,omitempty"` - // AccessTier - READ-ONLY; Required for storage accounts where kind = BlobStorage. The access tier used for billing. Possible values include: 'AccessTierHot', 'AccessTierCool' - AccessTier AccessTier `json:"accessTier,omitempty"` - // AzureFilesIdentityBasedAuthentication - Provides the identity based authentication settings for Azure Files. - AzureFilesIdentityBasedAuthentication *AzureFilesIdentityBasedAuthentication `json:"azureFilesIdentityBasedAuthentication,omitempty"` - // EnableHTTPSTrafficOnly - Allows https traffic only to storage service if sets to true. - EnableHTTPSTrafficOnly *bool `json:"supportsHttpsTrafficOnly,omitempty"` - // NetworkRuleSet - READ-ONLY; Network rule set - NetworkRuleSet *NetworkRuleSet `json:"networkAcls,omitempty"` - // IsHnsEnabled - Account HierarchicalNamespace enabled if sets to true. - IsHnsEnabled *bool `json:"isHnsEnabled,omitempty"` - // GeoReplicationStats - READ-ONLY; Geo Replication Stats - GeoReplicationStats *GeoReplicationStats `json:"geoReplicationStats,omitempty"` - // FailoverInProgress - READ-ONLY; If the failover is in progress, the value will be true, otherwise, it will be null. - FailoverInProgress *bool `json:"failoverInProgress,omitempty"` - // LargeFileSharesState - Allow large file shares if sets to Enabled. It cannot be disabled once it is enabled. Possible values include: 'LargeFileSharesStateDisabled', 'LargeFileSharesStateEnabled' - LargeFileSharesState LargeFileSharesState `json:"largeFileSharesState,omitempty"` - // PrivateEndpointConnections - READ-ONLY; List of private endpoint connection associated with the specified storage account - PrivateEndpointConnections *[]PrivateEndpointConnection `json:"privateEndpointConnections,omitempty"` - // RoutingPreference - Maintains information about the network routing choice opted by the user for data transfer - RoutingPreference *RoutingPreference `json:"routingPreference,omitempty"` - // BlobRestoreStatus - READ-ONLY; Blob restore status - BlobRestoreStatus *BlobRestoreStatus `json:"blobRestoreStatus,omitempty"` - // AllowBlobPublicAccess - Allow or disallow public access to all blobs or containers in the storage account. The default interpretation is true for this property. - AllowBlobPublicAccess *bool `json:"allowBlobPublicAccess,omitempty"` - // MinimumTLSVersion - Set the minimum TLS version to be permitted on requests to storage. The default interpretation is TLS 1.0 for this property. Possible values include: 'MinimumTLSVersionTLS10', 'MinimumTLSVersionTLS11', 'MinimumTLSVersionTLS12' - MinimumTLSVersion MinimumTLSVersion `json:"minimumTlsVersion,omitempty"` - // AllowSharedKeyAccess - Indicates whether the storage account permits requests to be authorized with the account access key via Shared Key. If false, then all requests, including shared access signatures, must be authorized with Azure Active Directory (Azure AD). The default value is null, which is equivalent to true. - AllowSharedKeyAccess *bool `json:"allowSharedKeyAccess,omitempty"` - // EnableNfsV3 - NFS 3.0 protocol support enabled if set to true. - EnableNfsV3 *bool `json:"isNfsV3Enabled,omitempty"` -} - -// MarshalJSON is the custom marshaler for AccountProperties. -func (ap AccountProperties) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if ap.AzureFilesIdentityBasedAuthentication != nil { - objectMap["azureFilesIdentityBasedAuthentication"] = ap.AzureFilesIdentityBasedAuthentication - } - if ap.EnableHTTPSTrafficOnly != nil { - objectMap["supportsHttpsTrafficOnly"] = ap.EnableHTTPSTrafficOnly - } - if ap.IsHnsEnabled != nil { - objectMap["isHnsEnabled"] = ap.IsHnsEnabled - } - if ap.LargeFileSharesState != "" { - objectMap["largeFileSharesState"] = ap.LargeFileSharesState - } - if ap.RoutingPreference != nil { - objectMap["routingPreference"] = ap.RoutingPreference - } - if ap.AllowBlobPublicAccess != nil { - objectMap["allowBlobPublicAccess"] = ap.AllowBlobPublicAccess - } - if ap.MinimumTLSVersion != "" { - objectMap["minimumTlsVersion"] = ap.MinimumTLSVersion - } - if ap.AllowSharedKeyAccess != nil { - objectMap["allowSharedKeyAccess"] = ap.AllowSharedKeyAccess - } - if ap.EnableNfsV3 != nil { - objectMap["isNfsV3Enabled"] = ap.EnableNfsV3 - } - return json.Marshal(objectMap) -} - -// AccountPropertiesCreateParameters the parameters used to create the storage account. -type AccountPropertiesCreateParameters struct { - // SasPolicy - SasPolicy assigned to the storage account. - SasPolicy *SasPolicy `json:"sasPolicy,omitempty"` - // KeyPolicy - KeyPolicy assigned to the storage account. - KeyPolicy *KeyPolicy `json:"keyPolicy,omitempty"` - // CustomDomain - User domain assigned to the storage account. Name is the CNAME source. Only one custom domain is supported per storage account at this time. To clear the existing custom domain, use an empty string for the custom domain name property. - CustomDomain *CustomDomain `json:"customDomain,omitempty"` - // Encryption - Not applicable. Azure Storage encryption is enabled for all storage accounts and cannot be disabled. - Encryption *Encryption `json:"encryption,omitempty"` - // NetworkRuleSet - Network rule set - NetworkRuleSet *NetworkRuleSet `json:"networkAcls,omitempty"` - // AccessTier - Required for storage accounts where kind = BlobStorage. The access tier used for billing. Possible values include: 'AccessTierHot', 'AccessTierCool' - AccessTier AccessTier `json:"accessTier,omitempty"` - // AzureFilesIdentityBasedAuthentication - Provides the identity based authentication settings for Azure Files. - AzureFilesIdentityBasedAuthentication *AzureFilesIdentityBasedAuthentication `json:"azureFilesIdentityBasedAuthentication,omitempty"` - // EnableHTTPSTrafficOnly - Allows https traffic only to storage service if sets to true. The default value is true since API version 2019-04-01. - EnableHTTPSTrafficOnly *bool `json:"supportsHttpsTrafficOnly,omitempty"` - // IsHnsEnabled - Account HierarchicalNamespace enabled if sets to true. - IsHnsEnabled *bool `json:"isHnsEnabled,omitempty"` - // LargeFileSharesState - Allow large file shares if sets to Enabled. It cannot be disabled once it is enabled. Possible values include: 'LargeFileSharesStateDisabled', 'LargeFileSharesStateEnabled' - LargeFileSharesState LargeFileSharesState `json:"largeFileSharesState,omitempty"` - // RoutingPreference - Maintains information about the network routing choice opted by the user for data transfer - RoutingPreference *RoutingPreference `json:"routingPreference,omitempty"` - // AllowBlobPublicAccess - Allow or disallow public access to all blobs or containers in the storage account. The default interpretation is true for this property. - AllowBlobPublicAccess *bool `json:"allowBlobPublicAccess,omitempty"` - // MinimumTLSVersion - Set the minimum TLS version to be permitted on requests to storage. The default interpretation is TLS 1.0 for this property. Possible values include: 'MinimumTLSVersionTLS10', 'MinimumTLSVersionTLS11', 'MinimumTLSVersionTLS12' - MinimumTLSVersion MinimumTLSVersion `json:"minimumTlsVersion,omitempty"` - // AllowSharedKeyAccess - Indicates whether the storage account permits requests to be authorized with the account access key via Shared Key. If false, then all requests, including shared access signatures, must be authorized with Azure Active Directory (Azure AD). The default value is null, which is equivalent to true. - AllowSharedKeyAccess *bool `json:"allowSharedKeyAccess,omitempty"` - // EnableNfsV3 - NFS 3.0 protocol support enabled if set to true. - EnableNfsV3 *bool `json:"isNfsV3Enabled,omitempty"` -} - -// AccountPropertiesUpdateParameters the parameters used when updating a storage account. -type AccountPropertiesUpdateParameters struct { - // CustomDomain - Custom domain assigned to the storage account by the user. Name is the CNAME source. Only one custom domain is supported per storage account at this time. To clear the existing custom domain, use an empty string for the custom domain name property. - CustomDomain *CustomDomain `json:"customDomain,omitempty"` - // Encryption - Provides the encryption settings on the account. The default setting is unencrypted. - Encryption *Encryption `json:"encryption,omitempty"` - // SasPolicy - SasPolicy assigned to the storage account. - SasPolicy *SasPolicy `json:"sasPolicy,omitempty"` - // KeyPolicy - KeyPolicy assigned to the storage account. - KeyPolicy *KeyPolicy `json:"keyPolicy,omitempty"` - // AccessTier - Required for storage accounts where kind = BlobStorage. The access tier used for billing. Possible values include: 'AccessTierHot', 'AccessTierCool' - AccessTier AccessTier `json:"accessTier,omitempty"` - // AzureFilesIdentityBasedAuthentication - Provides the identity based authentication settings for Azure Files. - AzureFilesIdentityBasedAuthentication *AzureFilesIdentityBasedAuthentication `json:"azureFilesIdentityBasedAuthentication,omitempty"` - // EnableHTTPSTrafficOnly - Allows https traffic only to storage service if sets to true. - EnableHTTPSTrafficOnly *bool `json:"supportsHttpsTrafficOnly,omitempty"` - // NetworkRuleSet - Network rule set - NetworkRuleSet *NetworkRuleSet `json:"networkAcls,omitempty"` - // LargeFileSharesState - Allow large file shares if sets to Enabled. It cannot be disabled once it is enabled. Possible values include: 'LargeFileSharesStateDisabled', 'LargeFileSharesStateEnabled' - LargeFileSharesState LargeFileSharesState `json:"largeFileSharesState,omitempty"` - // RoutingPreference - Maintains information about the network routing choice opted by the user for data transfer - RoutingPreference *RoutingPreference `json:"routingPreference,omitempty"` - // AllowBlobPublicAccess - Allow or disallow public access to all blobs or containers in the storage account. The default interpretation is true for this property. - AllowBlobPublicAccess *bool `json:"allowBlobPublicAccess,omitempty"` - // MinimumTLSVersion - Set the minimum TLS version to be permitted on requests to storage. The default interpretation is TLS 1.0 for this property. Possible values include: 'MinimumTLSVersionTLS10', 'MinimumTLSVersionTLS11', 'MinimumTLSVersionTLS12' - MinimumTLSVersion MinimumTLSVersion `json:"minimumTlsVersion,omitempty"` - // AllowSharedKeyAccess - Indicates whether the storage account permits requests to be authorized with the account access key via Shared Key. If false, then all requests, including shared access signatures, must be authorized with Azure Active Directory (Azure AD). The default value is null, which is equivalent to true. - AllowSharedKeyAccess *bool `json:"allowSharedKeyAccess,omitempty"` -} - -// AccountRegenerateKeyParameters the parameters used to regenerate the storage account key. -type AccountRegenerateKeyParameters struct { - // KeyName - The name of storage keys that want to be regenerated, possible values are key1, key2, kerb1, kerb2. - KeyName *string `json:"keyName,omitempty"` -} - -// AccountSasParameters the parameters to list SAS credentials of a storage account. -type AccountSasParameters struct { - // Services - The signed services accessible with the account SAS. Possible values include: Blob (b), Queue (q), Table (t), File (f). Possible values include: 'ServicesB', 'ServicesQ', 'ServicesT', 'ServicesF' - Services Services `json:"signedServices,omitempty"` - // ResourceTypes - The signed resource types that are accessible with the account SAS. Service (s): Access to service-level APIs; Container (c): Access to container-level APIs; Object (o): Access to object-level APIs for blobs, queue messages, table entities, and files. Possible values include: 'SignedResourceTypesS', 'SignedResourceTypesC', 'SignedResourceTypesO' - ResourceTypes SignedResourceTypes `json:"signedResourceTypes,omitempty"` - // Permissions - The signed permissions for the account SAS. Possible values include: Read (r), Write (w), Delete (d), List (l), Add (a), Create (c), Update (u) and Process (p). Possible values include: 'PermissionsR', 'PermissionsD', 'PermissionsW', 'PermissionsL', 'PermissionsA', 'PermissionsC', 'PermissionsU', 'PermissionsP' - Permissions Permissions `json:"signedPermission,omitempty"` - // IPAddressOrRange - An IP address or a range of IP addresses from which to accept requests. - IPAddressOrRange *string `json:"signedIp,omitempty"` - // Protocols - The protocol permitted for a request made with the account SAS. Possible values include: 'HTTPProtocolHttpshttp', 'HTTPProtocolHTTPS' - Protocols HTTPProtocol `json:"signedProtocol,omitempty"` - // SharedAccessStartTime - The time at which the SAS becomes valid. - SharedAccessStartTime *date.Time `json:"signedStart,omitempty"` - // SharedAccessExpiryTime - The time at which the shared access signature becomes invalid. - SharedAccessExpiryTime *date.Time `json:"signedExpiry,omitempty"` - // KeyToSign - The key to sign the account SAS token with. - KeyToSign *string `json:"keyToSign,omitempty"` -} - -// AccountsCreateFuture an abstraction for monitoring and retrieving the results of a long-running -// operation. -type AccountsCreateFuture struct { - azure.FutureAPI - // Result returns the result of the asynchronous operation. - // If the operation has not completed it will return an error. - Result func(AccountsClient) (Account, error) -} - -// UnmarshalJSON is the custom unmarshaller for CreateFuture. -func (future *AccountsCreateFuture) UnmarshalJSON(body []byte) error { - var azFuture azure.Future - if err := json.Unmarshal(body, &azFuture); err != nil { - return err - } - future.FutureAPI = &azFuture - future.Result = future.result - return nil -} - -// result is the default implementation for AccountsCreateFuture.Result. -func (future *AccountsCreateFuture) result(client AccountsClient) (a Account, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.AccountsCreateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - a.Response.Response = future.Response() - err = azure.NewAsyncOpIncompleteError("storage.AccountsCreateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - if a.Response.Response, err = future.GetResult(sender); err == nil && a.Response.Response.StatusCode != http.StatusNoContent { - a, err = client.CreateResponder(a.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.AccountsCreateFuture", "Result", a.Response.Response, "Failure responding to request") - } - } - return -} - -// AccountsFailoverFuture an abstraction for monitoring and retrieving the results of a long-running -// operation. -type AccountsFailoverFuture struct { - azure.FutureAPI - // Result returns the result of the asynchronous operation. - // If the operation has not completed it will return an error. - Result func(AccountsClient) (autorest.Response, error) -} - -// UnmarshalJSON is the custom unmarshaller for CreateFuture. -func (future *AccountsFailoverFuture) UnmarshalJSON(body []byte) error { - var azFuture azure.Future - if err := json.Unmarshal(body, &azFuture); err != nil { - return err - } - future.FutureAPI = &azFuture - future.Result = future.result - return nil -} - -// result is the default implementation for AccountsFailoverFuture.Result. -func (future *AccountsFailoverFuture) result(client AccountsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.AccountsFailoverFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - ar.Response = future.Response() - err = azure.NewAsyncOpIncompleteError("storage.AccountsFailoverFuture") - return - } - ar.Response = future.Response() - return -} - -// AccountsRestoreBlobRangesFuture an abstraction for monitoring and retrieving the results of a -// long-running operation. -type AccountsRestoreBlobRangesFuture struct { - azure.FutureAPI - // Result returns the result of the asynchronous operation. - // If the operation has not completed it will return an error. - Result func(AccountsClient) (BlobRestoreStatus, error) -} - -// UnmarshalJSON is the custom unmarshaller for CreateFuture. -func (future *AccountsRestoreBlobRangesFuture) UnmarshalJSON(body []byte) error { - var azFuture azure.Future - if err := json.Unmarshal(body, &azFuture); err != nil { - return err - } - future.FutureAPI = &azFuture - future.Result = future.result - return nil -} - -// result is the default implementation for AccountsRestoreBlobRangesFuture.Result. -func (future *AccountsRestoreBlobRangesFuture) result(client AccountsClient) (brs BlobRestoreStatus, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.AccountsRestoreBlobRangesFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - brs.Response.Response = future.Response() - err = azure.NewAsyncOpIncompleteError("storage.AccountsRestoreBlobRangesFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - if brs.Response.Response, err = future.GetResult(sender); err == nil && brs.Response.Response.StatusCode != http.StatusNoContent { - brs, err = client.RestoreBlobRangesResponder(brs.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.AccountsRestoreBlobRangesFuture", "Result", brs.Response.Response, "Failure responding to request") - } - } - return -} - -// AccountUpdateParameters the parameters that can be provided when updating the storage account -// properties. -type AccountUpdateParameters struct { - // Sku - Gets or sets the SKU name. Note that the SKU name cannot be updated to Standard_ZRS, Premium_LRS or Premium_ZRS, nor can accounts of those SKU names be updated to any other value. - Sku *Sku `json:"sku,omitempty"` - // Tags - Gets or sets a list of key value pairs that describe the resource. These tags can be used in viewing and grouping this resource (across resource groups). A maximum of 15 tags can be provided for a resource. Each tag must have a key no greater in length than 128 characters and a value no greater in length than 256 characters. - Tags map[string]*string `json:"tags"` - // Identity - The identity of the resource. - Identity *Identity `json:"identity,omitempty"` - // AccountPropertiesUpdateParameters - The parameters used when updating a storage account. - *AccountPropertiesUpdateParameters `json:"properties,omitempty"` - // Kind - Optional. Indicates the type of storage account. Currently only StorageV2 value supported by server. Possible values include: 'KindStorage', 'KindStorageV2', 'KindBlobStorage', 'KindFileStorage', 'KindBlockBlobStorage' - Kind Kind `json:"kind,omitempty"` -} - -// MarshalJSON is the custom marshaler for AccountUpdateParameters. -func (aup AccountUpdateParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if aup.Sku != nil { - objectMap["sku"] = aup.Sku - } - if aup.Tags != nil { - objectMap["tags"] = aup.Tags - } - if aup.Identity != nil { - objectMap["identity"] = aup.Identity - } - if aup.AccountPropertiesUpdateParameters != nil { - objectMap["properties"] = aup.AccountPropertiesUpdateParameters - } - if aup.Kind != "" { - objectMap["kind"] = aup.Kind - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for AccountUpdateParameters struct. -func (aup *AccountUpdateParameters) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "sku": - if v != nil { - var sku Sku - err = json.Unmarshal(*v, &sku) - if err != nil { - return err - } - aup.Sku = &sku - } - case "tags": - if v != nil { - var tags map[string]*string - err = json.Unmarshal(*v, &tags) - if err != nil { - return err - } - aup.Tags = tags - } - case "identity": - if v != nil { - var identity Identity - err = json.Unmarshal(*v, &identity) - if err != nil { - return err - } - aup.Identity = &identity - } - case "properties": - if v != nil { - var accountPropertiesUpdateParameters AccountPropertiesUpdateParameters - err = json.Unmarshal(*v, &accountPropertiesUpdateParameters) - if err != nil { - return err - } - aup.AccountPropertiesUpdateParameters = &accountPropertiesUpdateParameters - } - case "kind": - if v != nil { - var kind Kind - err = json.Unmarshal(*v, &kind) - if err != nil { - return err - } - aup.Kind = kind - } - } - } - - return nil -} - -// ActiveDirectoryProperties settings properties for Active Directory (AD). -type ActiveDirectoryProperties struct { - // DomainName - Specifies the primary domain that the AD DNS server is authoritative for. - DomainName *string `json:"domainName,omitempty"` - // NetBiosDomainName - Specifies the NetBIOS domain name. - NetBiosDomainName *string `json:"netBiosDomainName,omitempty"` - // ForestName - Specifies the Active Directory forest to get. - ForestName *string `json:"forestName,omitempty"` - // DomainGUID - Specifies the domain GUID. - DomainGUID *string `json:"domainGuid,omitempty"` - // DomainSid - Specifies the security identifier (SID). - DomainSid *string `json:"domainSid,omitempty"` - // AzureStorageSid - Specifies the security identifier (SID) for Azure Storage. - AzureStorageSid *string `json:"azureStorageSid,omitempty"` -} - -// AzureEntityResource the resource model definition for an Azure Resource Manager resource with an etag. -type AzureEntityResource struct { - // Etag - READ-ONLY; Resource Etag. - Etag *string `json:"etag,omitempty"` - // ID - READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; The name of the resource - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" - Type *string `json:"type,omitempty"` -} - -// MarshalJSON is the custom marshaler for AzureEntityResource. -func (aer AzureEntityResource) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// AzureFilesIdentityBasedAuthentication settings for Azure Files identity based authentication. -type AzureFilesIdentityBasedAuthentication struct { - // DirectoryServiceOptions - Indicates the directory service used. Possible values include: 'DirectoryServiceOptionsNone', 'DirectoryServiceOptionsAADDS', 'DirectoryServiceOptionsAD' - DirectoryServiceOptions DirectoryServiceOptions `json:"directoryServiceOptions,omitempty"` - // ActiveDirectoryProperties - Required if choose AD. - ActiveDirectoryProperties *ActiveDirectoryProperties `json:"activeDirectoryProperties,omitempty"` -} - -// BlobContainer properties of the blob container, including Id, resource name, resource type, Etag. -type BlobContainer struct { - autorest.Response `json:"-"` - // ContainerProperties - Properties of the blob container. - *ContainerProperties `json:"properties,omitempty"` - // Etag - READ-ONLY; Resource Etag. - Etag *string `json:"etag,omitempty"` - // ID - READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; The name of the resource - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" - Type *string `json:"type,omitempty"` -} - -// MarshalJSON is the custom marshaler for BlobContainer. -func (bc BlobContainer) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if bc.ContainerProperties != nil { - objectMap["properties"] = bc.ContainerProperties - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for BlobContainer struct. -func (bc *BlobContainer) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var containerProperties ContainerProperties - err = json.Unmarshal(*v, &containerProperties) - if err != nil { - return err - } - bc.ContainerProperties = &containerProperties - } - case "etag": - if v != nil { - var etag string - err = json.Unmarshal(*v, &etag) - if err != nil { - return err - } - bc.Etag = &etag - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - bc.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - bc.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - bc.Type = &typeVar - } - } - } - - return nil -} - -// BlobInventoryPolicy the storage account blob inventory policy. -type BlobInventoryPolicy struct { - autorest.Response `json:"-"` - // BlobInventoryPolicyProperties - Returns the storage account blob inventory policy rules. - *BlobInventoryPolicyProperties `json:"properties,omitempty"` - SystemData *SystemData `json:"systemData,omitempty"` - // ID - READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; The name of the resource - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" - Type *string `json:"type,omitempty"` -} - -// MarshalJSON is the custom marshaler for BlobInventoryPolicy. -func (bip BlobInventoryPolicy) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if bip.BlobInventoryPolicyProperties != nil { - objectMap["properties"] = bip.BlobInventoryPolicyProperties - } - if bip.SystemData != nil { - objectMap["systemData"] = bip.SystemData - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for BlobInventoryPolicy struct. -func (bip *BlobInventoryPolicy) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var blobInventoryPolicyProperties BlobInventoryPolicyProperties - err = json.Unmarshal(*v, &blobInventoryPolicyProperties) - if err != nil { - return err - } - bip.BlobInventoryPolicyProperties = &blobInventoryPolicyProperties - } - case "systemData": - if v != nil { - var systemData SystemData - err = json.Unmarshal(*v, &systemData) - if err != nil { - return err - } - bip.SystemData = &systemData - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - bip.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - bip.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - bip.Type = &typeVar - } - } - } - - return nil -} - -// BlobInventoryPolicyDefinition an object that defines the blob inventory rule. Each definition consists -// of a set of filters. -type BlobInventoryPolicyDefinition struct { - // Filters - An object that defines the filter set. - Filters *BlobInventoryPolicyFilter `json:"filters,omitempty"` -} - -// BlobInventoryPolicyFilter an object that defines the blob inventory rule filter conditions. -type BlobInventoryPolicyFilter struct { - // PrefixMatch - An array of strings for blob prefixes to be matched. - PrefixMatch *[]string `json:"prefixMatch,omitempty"` - // BlobTypes - An array of predefined enum values. Valid values include blockBlob, appendBlob, pageBlob. Hns accounts does not support pageBlobs. - BlobTypes *[]string `json:"blobTypes,omitempty"` - // IncludeBlobVersions - Includes blob versions in blob inventory when value set to true. - IncludeBlobVersions *bool `json:"includeBlobVersions,omitempty"` - // IncludeSnapshots - Includes blob snapshots in blob inventory when value set to true. - IncludeSnapshots *bool `json:"includeSnapshots,omitempty"` -} - -// BlobInventoryPolicyProperties the storage account blob inventory policy properties. -type BlobInventoryPolicyProperties struct { - // LastModifiedTime - READ-ONLY; Returns the last modified date and time of the blob inventory policy. - LastModifiedTime *date.Time `json:"lastModifiedTime,omitempty"` - // Policy - The storage account blob inventory policy object. It is composed of policy rules. - Policy *BlobInventoryPolicySchema `json:"policy,omitempty"` -} - -// MarshalJSON is the custom marshaler for BlobInventoryPolicyProperties. -func (bipp BlobInventoryPolicyProperties) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if bipp.Policy != nil { - objectMap["policy"] = bipp.Policy - } - return json.Marshal(objectMap) -} - -// BlobInventoryPolicyRule an object that wraps the blob inventory rule. Each rule is uniquely defined by -// name. -type BlobInventoryPolicyRule struct { - // Enabled - Rule is enabled when set to true. - Enabled *bool `json:"enabled,omitempty"` - // Name - A rule name can contain any combination of alpha numeric characters. Rule name is case-sensitive. It must be unique within a policy. - Name *string `json:"name,omitempty"` - // Definition - An object that defines the blob inventory policy rule. - Definition *BlobInventoryPolicyDefinition `json:"definition,omitempty"` -} - -// BlobInventoryPolicySchema the storage account blob inventory policy rules. -type BlobInventoryPolicySchema struct { - // Enabled - Policy is enabled if set to true. - Enabled *bool `json:"enabled,omitempty"` - // Destination - Container name where blob inventory files are stored. Must be pre-created. - Destination *string `json:"destination,omitempty"` - // Type - The valid value is Inventory - Type *string `json:"type,omitempty"` - // Rules - The storage account blob inventory policy rules. The rule is applied when it is enabled. - Rules *[]BlobInventoryPolicyRule `json:"rules,omitempty"` -} - -// BlobRestoreParameters blob restore parameters -type BlobRestoreParameters struct { - // TimeToRestore - Restore blob to the specified time. - TimeToRestore *date.Time `json:"timeToRestore,omitempty"` - // BlobRanges - Blob ranges to restore. - BlobRanges *[]BlobRestoreRange `json:"blobRanges,omitempty"` -} - -// BlobRestoreRange blob range -type BlobRestoreRange struct { - // StartRange - Blob start range. This is inclusive. Empty means account start. - StartRange *string `json:"startRange,omitempty"` - // EndRange - Blob end range. This is exclusive. Empty means account end. - EndRange *string `json:"endRange,omitempty"` -} - -// BlobRestoreStatus blob restore status. -type BlobRestoreStatus struct { - autorest.Response `json:"-"` - // Status - READ-ONLY; The status of blob restore progress. Possible values are: - InProgress: Indicates that blob restore is ongoing. - Complete: Indicates that blob restore has been completed successfully. - Failed: Indicates that blob restore is failed. Possible values include: 'BlobRestoreProgressStatusInProgress', 'BlobRestoreProgressStatusComplete', 'BlobRestoreProgressStatusFailed' - Status BlobRestoreProgressStatus `json:"status,omitempty"` - // FailureReason - READ-ONLY; Failure reason when blob restore is failed. - FailureReason *string `json:"failureReason,omitempty"` - // RestoreID - READ-ONLY; Id for tracking blob restore request. - RestoreID *string `json:"restoreId,omitempty"` - // Parameters - READ-ONLY; Blob restore request parameters. - Parameters *BlobRestoreParameters `json:"parameters,omitempty"` -} - -// MarshalJSON is the custom marshaler for BlobRestoreStatus. -func (brs BlobRestoreStatus) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// BlobServiceItems ... -type BlobServiceItems struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; List of blob services returned. - Value *[]BlobServiceProperties `json:"value,omitempty"` -} - -// MarshalJSON is the custom marshaler for BlobServiceItems. -func (bsi BlobServiceItems) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// BlobServiceProperties the properties of a storage account’s Blob service. -type BlobServiceProperties struct { - autorest.Response `json:"-"` - // BlobServicePropertiesProperties - The properties of a storage account’s Blob service. - *BlobServicePropertiesProperties `json:"properties,omitempty"` - // Sku - READ-ONLY; Sku name and tier. - Sku *Sku `json:"sku,omitempty"` - // ID - READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; The name of the resource - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" - Type *string `json:"type,omitempty"` -} - -// MarshalJSON is the custom marshaler for BlobServiceProperties. -func (bsp BlobServiceProperties) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if bsp.BlobServicePropertiesProperties != nil { - objectMap["properties"] = bsp.BlobServicePropertiesProperties - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for BlobServiceProperties struct. -func (bsp *BlobServiceProperties) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var blobServiceProperties BlobServicePropertiesProperties - err = json.Unmarshal(*v, &blobServiceProperties) - if err != nil { - return err - } - bsp.BlobServicePropertiesProperties = &blobServiceProperties - } - case "sku": - if v != nil { - var sku Sku - err = json.Unmarshal(*v, &sku) - if err != nil { - return err - } - bsp.Sku = &sku - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - bsp.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - bsp.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - bsp.Type = &typeVar - } - } - } - - return nil -} - -// BlobServicePropertiesProperties the properties of a storage account’s Blob service. -type BlobServicePropertiesProperties struct { - // Cors - Specifies CORS rules for the Blob service. You can include up to five CorsRule elements in the request. If no CorsRule elements are included in the request body, all CORS rules will be deleted, and CORS will be disabled for the Blob service. - Cors *CorsRules `json:"cors,omitempty"` - // DefaultServiceVersion - DefaultServiceVersion indicates the default version to use for requests to the Blob service if an incoming request’s version is not specified. Possible values include version 2008-10-27 and all more recent versions. - DefaultServiceVersion *string `json:"defaultServiceVersion,omitempty"` - // DeleteRetentionPolicy - The blob service properties for blob soft delete. - DeleteRetentionPolicy *DeleteRetentionPolicy `json:"deleteRetentionPolicy,omitempty"` - // IsVersioningEnabled - Versioning is enabled if set to true. - IsVersioningEnabled *bool `json:"isVersioningEnabled,omitempty"` - // AutomaticSnapshotPolicyEnabled - Deprecated in favor of isVersioningEnabled property. - AutomaticSnapshotPolicyEnabled *bool `json:"automaticSnapshotPolicyEnabled,omitempty"` - // ChangeFeed - The blob service properties for change feed events. - ChangeFeed *ChangeFeed `json:"changeFeed,omitempty"` - // RestorePolicy - The blob service properties for blob restore policy. - RestorePolicy *RestorePolicyProperties `json:"restorePolicy,omitempty"` - // ContainerDeleteRetentionPolicy - The blob service properties for container soft delete. - ContainerDeleteRetentionPolicy *DeleteRetentionPolicy `json:"containerDeleteRetentionPolicy,omitempty"` - // LastAccessTimeTrackingPolicy - The blob service property to configure last access time based tracking policy. - LastAccessTimeTrackingPolicy *LastAccessTimeTrackingPolicy `json:"lastAccessTimeTrackingPolicy,omitempty"` -} - -// ChangeFeed the blob service properties for change feed events. -type ChangeFeed struct { - // Enabled - Indicates whether change feed event logging is enabled for the Blob service. - Enabled *bool `json:"enabled,omitempty"` - // RetentionInDays - Indicates the duration of changeFeed retention in days. Minimum value is 1 day and maximum value is 146000 days (400 years). A null value indicates an infinite retention of the change feed. - RetentionInDays *int32 `json:"retentionInDays,omitempty"` -} - -// CheckNameAvailabilityResult the CheckNameAvailability operation response. -type CheckNameAvailabilityResult struct { - autorest.Response `json:"-"` - // NameAvailable - READ-ONLY; Gets a boolean value that indicates whether the name is available for you to use. If true, the name is available. If false, the name has already been taken or is invalid and cannot be used. - NameAvailable *bool `json:"nameAvailable,omitempty"` - // Reason - READ-ONLY; Gets the reason that a storage account name could not be used. The Reason element is only returned if NameAvailable is false. Possible values include: 'ReasonAccountNameInvalid', 'ReasonAlreadyExists' - Reason Reason `json:"reason,omitempty"` - // Message - READ-ONLY; Gets an error message explaining the Reason value in more detail. - Message *string `json:"message,omitempty"` -} - -// MarshalJSON is the custom marshaler for CheckNameAvailabilityResult. -func (cnar CheckNameAvailabilityResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// CloudError an error response from the Storage service. -type CloudError struct { - Error *CloudErrorBody `json:"error,omitempty"` -} - -// CloudErrorBody an error response from the Storage service. -type CloudErrorBody struct { - // Code - An identifier for the error. Codes are invariant and are intended to be consumed programmatically. - Code *string `json:"code,omitempty"` - // Message - A message describing the error, intended to be suitable for display in a user interface. - Message *string `json:"message,omitempty"` - // Target - The target of the particular error. For example, the name of the property in error. - Target *string `json:"target,omitempty"` - // Details - A list of additional details about the error. - Details *[]CloudErrorBody `json:"details,omitempty"` -} - -// ContainerProperties the properties of a container. -type ContainerProperties struct { - // Version - READ-ONLY; The version of the deleted blob container. - Version *string `json:"version,omitempty"` - // Deleted - READ-ONLY; Indicates whether the blob container was deleted. - Deleted *bool `json:"deleted,omitempty"` - // DeletedTime - READ-ONLY; Blob container deletion time. - DeletedTime *date.Time `json:"deletedTime,omitempty"` - // RemainingRetentionDays - READ-ONLY; Remaining retention days for soft deleted blob container. - RemainingRetentionDays *int32 `json:"remainingRetentionDays,omitempty"` - // DefaultEncryptionScope - Default the container to use specified encryption scope for all writes. - DefaultEncryptionScope *string `json:"defaultEncryptionScope,omitempty"` - // DenyEncryptionScopeOverride - Block override of encryption scope from the container default. - DenyEncryptionScopeOverride *bool `json:"denyEncryptionScopeOverride,omitempty"` - // PublicAccess - Specifies whether data in the container may be accessed publicly and the level of access. Possible values include: 'PublicAccessContainer', 'PublicAccessBlob', 'PublicAccessNone' - PublicAccess PublicAccess `json:"publicAccess,omitempty"` - // LastModifiedTime - READ-ONLY; Returns the date and time the container was last modified. - LastModifiedTime *date.Time `json:"lastModifiedTime,omitempty"` - // LeaseStatus - READ-ONLY; The lease status of the container. Possible values include: 'LeaseStatusLocked', 'LeaseStatusUnlocked' - LeaseStatus LeaseStatus `json:"leaseStatus,omitempty"` - // LeaseState - READ-ONLY; Lease state of the container. Possible values include: 'LeaseStateAvailable', 'LeaseStateLeased', 'LeaseStateExpired', 'LeaseStateBreaking', 'LeaseStateBroken' - LeaseState LeaseState `json:"leaseState,omitempty"` - // LeaseDuration - READ-ONLY; Specifies whether the lease on a container is of infinite or fixed duration, only when the container is leased. Possible values include: 'LeaseDurationInfinite', 'LeaseDurationFixed' - LeaseDuration LeaseDuration `json:"leaseDuration,omitempty"` - // Metadata - A name-value pair to associate with the container as metadata. - Metadata map[string]*string `json:"metadata"` - // ImmutabilityPolicy - READ-ONLY; The ImmutabilityPolicy property of the container. - ImmutabilityPolicy *ImmutabilityPolicyProperties `json:"immutabilityPolicy,omitempty"` - // LegalHold - READ-ONLY; The LegalHold property of the container. - LegalHold *LegalHoldProperties `json:"legalHold,omitempty"` - // HasLegalHold - READ-ONLY; The hasLegalHold public property is set to true by SRP if there are at least one existing tag. The hasLegalHold public property is set to false by SRP if all existing legal hold tags are cleared out. There can be a maximum of 1000 blob containers with hasLegalHold=true for a given account. - HasLegalHold *bool `json:"hasLegalHold,omitempty"` - // HasImmutabilityPolicy - READ-ONLY; The hasImmutabilityPolicy public property is set to true by SRP if ImmutabilityPolicy has been created for this container. The hasImmutabilityPolicy public property is set to false by SRP if ImmutabilityPolicy has not been created for this container. - HasImmutabilityPolicy *bool `json:"hasImmutabilityPolicy,omitempty"` -} - -// MarshalJSON is the custom marshaler for ContainerProperties. -func (cp ContainerProperties) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if cp.DefaultEncryptionScope != nil { - objectMap["defaultEncryptionScope"] = cp.DefaultEncryptionScope - } - if cp.DenyEncryptionScopeOverride != nil { - objectMap["denyEncryptionScopeOverride"] = cp.DenyEncryptionScopeOverride - } - if cp.PublicAccess != "" { - objectMap["publicAccess"] = cp.PublicAccess - } - if cp.Metadata != nil { - objectMap["metadata"] = cp.Metadata - } - return json.Marshal(objectMap) -} - -// CorsRule specifies a CORS rule for the Blob service. -type CorsRule struct { - // AllowedOrigins - Required if CorsRule element is present. A list of origin domains that will be allowed via CORS, or "*" to allow all domains - AllowedOrigins *[]string `json:"allowedOrigins,omitempty"` - // AllowedMethods - Required if CorsRule element is present. A list of HTTP methods that are allowed to be executed by the origin. - AllowedMethods *[]string `json:"allowedMethods,omitempty"` - // MaxAgeInSeconds - Required if CorsRule element is present. The number of seconds that the client/browser should cache a preflight response. - MaxAgeInSeconds *int32 `json:"maxAgeInSeconds,omitempty"` - // ExposedHeaders - Required if CorsRule element is present. A list of response headers to expose to CORS clients. - ExposedHeaders *[]string `json:"exposedHeaders,omitempty"` - // AllowedHeaders - Required if CorsRule element is present. A list of headers allowed to be part of the cross-origin request. - AllowedHeaders *[]string `json:"allowedHeaders,omitempty"` -} - -// CorsRules sets the CORS rules. You can include up to five CorsRule elements in the request. -type CorsRules struct { - // CorsRules - The List of CORS rules. You can include up to five CorsRule elements in the request. - CorsRules *[]CorsRule `json:"corsRules,omitempty"` -} - -// CustomDomain the custom domain assigned to this storage account. This can be set via Update. -type CustomDomain struct { - // Name - Gets or sets the custom domain name assigned to the storage account. Name is the CNAME source. - Name *string `json:"name,omitempty"` - // UseSubDomainName - Indicates whether indirect CName validation is enabled. Default value is false. This should only be set on updates. - UseSubDomainName *bool `json:"useSubDomainName,omitempty"` -} - -// DateAfterCreation object to define the number of days after creation. -type DateAfterCreation struct { - // DaysAfterCreationGreaterThan - Value indicating the age in days after creation - DaysAfterCreationGreaterThan *float64 `json:"daysAfterCreationGreaterThan,omitempty"` -} - -// DateAfterModification object to define the number of days after object last modification Or last access. -// Properties daysAfterModificationGreaterThan and daysAfterLastAccessTimeGreaterThan are mutually -// exclusive. -type DateAfterModification struct { - // DaysAfterModificationGreaterThan - Value indicating the age in days after last modification - DaysAfterModificationGreaterThan *float64 `json:"daysAfterModificationGreaterThan,omitempty"` - // DaysAfterLastAccessTimeGreaterThan - Value indicating the age in days after last blob access. This property can only be used in conjunction with last access time tracking policy - DaysAfterLastAccessTimeGreaterThan *float64 `json:"daysAfterLastAccessTimeGreaterThan,omitempty"` -} - -// DeletedAccount deleted storage account -type DeletedAccount struct { - autorest.Response `json:"-"` - // DeletedAccountProperties - Properties of the deleted account. - *DeletedAccountProperties `json:"properties,omitempty"` - // ID - READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; The name of the resource - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" - Type *string `json:"type,omitempty"` -} - -// MarshalJSON is the custom marshaler for DeletedAccount. -func (da DeletedAccount) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if da.DeletedAccountProperties != nil { - objectMap["properties"] = da.DeletedAccountProperties - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for DeletedAccount struct. -func (da *DeletedAccount) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var deletedAccountProperties DeletedAccountProperties - err = json.Unmarshal(*v, &deletedAccountProperties) - if err != nil { - return err - } - da.DeletedAccountProperties = &deletedAccountProperties - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - da.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - da.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - da.Type = &typeVar - } - } - } - - return nil -} - -// DeletedAccountListResult the response from the List Deleted Accounts operation. -type DeletedAccountListResult struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; Gets the list of deleted accounts and their properties. - Value *[]DeletedAccount `json:"value,omitempty"` - // NextLink - READ-ONLY; Request URL that can be used to query next page of deleted accounts. Returned when total number of requested deleted accounts exceed maximum page size. - NextLink *string `json:"nextLink,omitempty"` -} - -// MarshalJSON is the custom marshaler for DeletedAccountListResult. -func (dalr DeletedAccountListResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// DeletedAccountListResultIterator provides access to a complete listing of DeletedAccount values. -type DeletedAccountListResultIterator struct { - i int - page DeletedAccountListResultPage -} - -// NextWithContext advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *DeletedAccountListResultIterator) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DeletedAccountListResultIterator.NextWithContext") - defer func() { - sc := -1 - if iter.Response().Response.Response != nil { - sc = iter.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err = iter.page.NextWithContext(ctx) - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (iter *DeletedAccountListResultIterator) Next() error { - return iter.NextWithContext(context.Background()) -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter DeletedAccountListResultIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter DeletedAccountListResultIterator) Response() DeletedAccountListResult { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter DeletedAccountListResultIterator) Value() DeletedAccount { - if !iter.page.NotDone() { - return DeletedAccount{} - } - return iter.page.Values()[iter.i] -} - -// Creates a new instance of the DeletedAccountListResultIterator type. -func NewDeletedAccountListResultIterator(page DeletedAccountListResultPage) DeletedAccountListResultIterator { - return DeletedAccountListResultIterator{page: page} -} - -// IsEmpty returns true if the ListResult contains no values. -func (dalr DeletedAccountListResult) IsEmpty() bool { - return dalr.Value == nil || len(*dalr.Value) == 0 -} - -// hasNextLink returns true if the NextLink is not empty. -func (dalr DeletedAccountListResult) hasNextLink() bool { - return dalr.NextLink != nil && len(*dalr.NextLink) != 0 -} - -// deletedAccountListResultPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (dalr DeletedAccountListResult) deletedAccountListResultPreparer(ctx context.Context) (*http.Request, error) { - if !dalr.hasNextLink() { - return nil, nil - } - return autorest.Prepare((&http.Request{}).WithContext(ctx), - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(dalr.NextLink))) -} - -// DeletedAccountListResultPage contains a page of DeletedAccount values. -type DeletedAccountListResultPage struct { - fn func(context.Context, DeletedAccountListResult) (DeletedAccountListResult, error) - dalr DeletedAccountListResult -} - -// NextWithContext advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *DeletedAccountListResultPage) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DeletedAccountListResultPage.NextWithContext") - defer func() { - sc := -1 - if page.Response().Response.Response != nil { - sc = page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - for { - next, err := page.fn(ctx, page.dalr) - if err != nil { - return err - } - page.dalr = next - if !next.hasNextLink() || !next.IsEmpty() { - break - } - } - return nil -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *DeletedAccountListResultPage) Next() error { - return page.NextWithContext(context.Background()) -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page DeletedAccountListResultPage) NotDone() bool { - return !page.dalr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page DeletedAccountListResultPage) Response() DeletedAccountListResult { - return page.dalr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page DeletedAccountListResultPage) Values() []DeletedAccount { - if page.dalr.IsEmpty() { - return nil - } - return *page.dalr.Value -} - -// Creates a new instance of the DeletedAccountListResultPage type. -func NewDeletedAccountListResultPage(cur DeletedAccountListResult, getNextPage func(context.Context, DeletedAccountListResult) (DeletedAccountListResult, error)) DeletedAccountListResultPage { - return DeletedAccountListResultPage{ - fn: getNextPage, - dalr: cur, - } -} - -// DeletedAccountProperties attributes of a deleted storage account. -type DeletedAccountProperties struct { - // StorageAccountResourceID - READ-ONLY; Full resource id of the original storage account. - StorageAccountResourceID *string `json:"storageAccountResourceId,omitempty"` - // Location - READ-ONLY; Location of the deleted account. - Location *string `json:"location,omitempty"` - // RestoreReference - READ-ONLY; Can be used to attempt recovering this deleted account via PutStorageAccount API. - RestoreReference *string `json:"restoreReference,omitempty"` - // CreationTime - READ-ONLY; Creation time of the deleted account. - CreationTime *string `json:"creationTime,omitempty"` - // DeletionTime - READ-ONLY; Deletion time of the deleted account. - DeletionTime *string `json:"deletionTime,omitempty"` -} - -// MarshalJSON is the custom marshaler for DeletedAccountProperties. -func (dap DeletedAccountProperties) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// DeletedShare the deleted share to be restored. -type DeletedShare struct { - // DeletedShareName - Required. Identify the name of the deleted share that will be restored. - DeletedShareName *string `json:"deletedShareName,omitempty"` - // DeletedShareVersion - Required. Identify the version of the deleted share that will be restored. - DeletedShareVersion *string `json:"deletedShareVersion,omitempty"` -} - -// DeleteRetentionPolicy the service properties for soft delete. -type DeleteRetentionPolicy struct { - // Enabled - Indicates whether DeleteRetentionPolicy is enabled. - Enabled *bool `json:"enabled,omitempty"` - // Days - Indicates the number of days that the deleted item should be retained. The minimum specified value can be 1 and the maximum value can be 365. - Days *int32 `json:"days,omitempty"` -} - -// Dimension dimension of blobs, possibly be blob type or access tier. -type Dimension struct { - // Name - Display name of dimension. - Name *string `json:"name,omitempty"` - // DisplayName - Display name of dimension. - DisplayName *string `json:"displayName,omitempty"` -} - -// Encryption the encryption settings on the storage account. -type Encryption struct { - // Services - List of services which support encryption. - Services *EncryptionServices `json:"services,omitempty"` - // KeySource - The encryption keySource (provider). Possible values (case-insensitive): Microsoft.Storage, Microsoft.Keyvault. Possible values include: 'KeySourceMicrosoftStorage', 'KeySourceMicrosoftKeyvault' - KeySource KeySource `json:"keySource,omitempty"` - // RequireInfrastructureEncryption - A boolean indicating whether or not the service applies a secondary layer of encryption with platform managed keys for data at rest. - RequireInfrastructureEncryption *bool `json:"requireInfrastructureEncryption,omitempty"` - // KeyVaultProperties - Properties provided by key vault. - KeyVaultProperties *KeyVaultProperties `json:"keyvaultproperties,omitempty"` - // EncryptionIdentity - The identity to be used with service-side encryption at rest. - EncryptionIdentity *EncryptionIdentity `json:"identity,omitempty"` -} - -// EncryptionIdentity encryption identity for the storage account. -type EncryptionIdentity struct { - // EncryptionUserAssignedIdentity - Resource identifier of the UserAssigned identity to be associated with server-side encryption on the storage account. - EncryptionUserAssignedIdentity *string `json:"userAssignedIdentity,omitempty"` -} - -// EncryptionScope the Encryption Scope resource. -type EncryptionScope struct { - autorest.Response `json:"-"` - // EncryptionScopeProperties - Properties of the encryption scope. - *EncryptionScopeProperties `json:"properties,omitempty"` - // ID - READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; The name of the resource - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" - Type *string `json:"type,omitempty"` -} - -// MarshalJSON is the custom marshaler for EncryptionScope. -func (es EncryptionScope) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if es.EncryptionScopeProperties != nil { - objectMap["properties"] = es.EncryptionScopeProperties - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for EncryptionScope struct. -func (es *EncryptionScope) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var encryptionScopeProperties EncryptionScopeProperties - err = json.Unmarshal(*v, &encryptionScopeProperties) - if err != nil { - return err - } - es.EncryptionScopeProperties = &encryptionScopeProperties - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - es.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - es.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - es.Type = &typeVar - } - } - } - - return nil -} - -// EncryptionScopeKeyVaultProperties the key vault properties for the encryption scope. This is a required -// field if encryption scope 'source' attribute is set to 'Microsoft.KeyVault'. -type EncryptionScopeKeyVaultProperties struct { - // KeyURI - The object identifier for a key vault key object. When applied, the encryption scope will use the key referenced by the identifier to enable customer-managed key support on this encryption scope. - KeyURI *string `json:"keyUri,omitempty"` - // CurrentVersionedKeyIdentifier - READ-ONLY; The object identifier of the current versioned Key Vault Key in use. - CurrentVersionedKeyIdentifier *string `json:"currentVersionedKeyIdentifier,omitempty"` - // LastKeyRotationTimestamp - READ-ONLY; Timestamp of last rotation of the Key Vault Key. - LastKeyRotationTimestamp *date.Time `json:"lastKeyRotationTimestamp,omitempty"` -} - -// MarshalJSON is the custom marshaler for EncryptionScopeKeyVaultProperties. -func (eskvp EncryptionScopeKeyVaultProperties) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if eskvp.KeyURI != nil { - objectMap["keyUri"] = eskvp.KeyURI - } - return json.Marshal(objectMap) -} - -// EncryptionScopeListResult list of encryption scopes requested, and if paging is required, a URL to the -// next page of encryption scopes. -type EncryptionScopeListResult struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; List of encryption scopes requested. - Value *[]EncryptionScope `json:"value,omitempty"` - // NextLink - READ-ONLY; Request URL that can be used to query next page of encryption scopes. Returned when total number of requested encryption scopes exceeds the maximum page size. - NextLink *string `json:"nextLink,omitempty"` -} - -// MarshalJSON is the custom marshaler for EncryptionScopeListResult. -func (eslr EncryptionScopeListResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// EncryptionScopeListResultIterator provides access to a complete listing of EncryptionScope values. -type EncryptionScopeListResultIterator struct { - i int - page EncryptionScopeListResultPage -} - -// NextWithContext advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *EncryptionScopeListResultIterator) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/EncryptionScopeListResultIterator.NextWithContext") - defer func() { - sc := -1 - if iter.Response().Response.Response != nil { - sc = iter.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err = iter.page.NextWithContext(ctx) - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (iter *EncryptionScopeListResultIterator) Next() error { - return iter.NextWithContext(context.Background()) -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter EncryptionScopeListResultIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter EncryptionScopeListResultIterator) Response() EncryptionScopeListResult { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter EncryptionScopeListResultIterator) Value() EncryptionScope { - if !iter.page.NotDone() { - return EncryptionScope{} - } - return iter.page.Values()[iter.i] -} - -// Creates a new instance of the EncryptionScopeListResultIterator type. -func NewEncryptionScopeListResultIterator(page EncryptionScopeListResultPage) EncryptionScopeListResultIterator { - return EncryptionScopeListResultIterator{page: page} -} - -// IsEmpty returns true if the ListResult contains no values. -func (eslr EncryptionScopeListResult) IsEmpty() bool { - return eslr.Value == nil || len(*eslr.Value) == 0 -} - -// hasNextLink returns true if the NextLink is not empty. -func (eslr EncryptionScopeListResult) hasNextLink() bool { - return eslr.NextLink != nil && len(*eslr.NextLink) != 0 -} - -// encryptionScopeListResultPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (eslr EncryptionScopeListResult) encryptionScopeListResultPreparer(ctx context.Context) (*http.Request, error) { - if !eslr.hasNextLink() { - return nil, nil - } - return autorest.Prepare((&http.Request{}).WithContext(ctx), - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(eslr.NextLink))) -} - -// EncryptionScopeListResultPage contains a page of EncryptionScope values. -type EncryptionScopeListResultPage struct { - fn func(context.Context, EncryptionScopeListResult) (EncryptionScopeListResult, error) - eslr EncryptionScopeListResult -} - -// NextWithContext advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *EncryptionScopeListResultPage) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/EncryptionScopeListResultPage.NextWithContext") - defer func() { - sc := -1 - if page.Response().Response.Response != nil { - sc = page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - for { - next, err := page.fn(ctx, page.eslr) - if err != nil { - return err - } - page.eslr = next - if !next.hasNextLink() || !next.IsEmpty() { - break - } - } - return nil -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *EncryptionScopeListResultPage) Next() error { - return page.NextWithContext(context.Background()) -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page EncryptionScopeListResultPage) NotDone() bool { - return !page.eslr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page EncryptionScopeListResultPage) Response() EncryptionScopeListResult { - return page.eslr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page EncryptionScopeListResultPage) Values() []EncryptionScope { - if page.eslr.IsEmpty() { - return nil - } - return *page.eslr.Value -} - -// Creates a new instance of the EncryptionScopeListResultPage type. -func NewEncryptionScopeListResultPage(cur EncryptionScopeListResult, getNextPage func(context.Context, EncryptionScopeListResult) (EncryptionScopeListResult, error)) EncryptionScopeListResultPage { - return EncryptionScopeListResultPage{ - fn: getNextPage, - eslr: cur, - } -} - -// EncryptionScopeProperties properties of the encryption scope. -type EncryptionScopeProperties struct { - // Source - The provider for the encryption scope. Possible values (case-insensitive): Microsoft.Storage, Microsoft.KeyVault. Possible values include: 'EncryptionScopeSourceMicrosoftStorage', 'EncryptionScopeSourceMicrosoftKeyVault' - Source EncryptionScopeSource `json:"source,omitempty"` - // State - The state of the encryption scope. Possible values (case-insensitive): Enabled, Disabled. Possible values include: 'EncryptionScopeStateEnabled', 'EncryptionScopeStateDisabled' - State EncryptionScopeState `json:"state,omitempty"` - // CreationTime - READ-ONLY; Gets the creation date and time of the encryption scope in UTC. - CreationTime *date.Time `json:"creationTime,omitempty"` - // LastModifiedTime - READ-ONLY; Gets the last modification date and time of the encryption scope in UTC. - LastModifiedTime *date.Time `json:"lastModifiedTime,omitempty"` - // KeyVaultProperties - The key vault properties for the encryption scope. This is a required field if encryption scope 'source' attribute is set to 'Microsoft.KeyVault'. - KeyVaultProperties *EncryptionScopeKeyVaultProperties `json:"keyVaultProperties,omitempty"` - // RequireInfrastructureEncryption - A boolean indicating whether or not the service applies a secondary layer of encryption with platform managed keys for data at rest. - RequireInfrastructureEncryption *bool `json:"requireInfrastructureEncryption,omitempty"` -} - -// MarshalJSON is the custom marshaler for EncryptionScopeProperties. -func (esp EncryptionScopeProperties) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if esp.Source != "" { - objectMap["source"] = esp.Source - } - if esp.State != "" { - objectMap["state"] = esp.State - } - if esp.KeyVaultProperties != nil { - objectMap["keyVaultProperties"] = esp.KeyVaultProperties - } - if esp.RequireInfrastructureEncryption != nil { - objectMap["requireInfrastructureEncryption"] = esp.RequireInfrastructureEncryption - } - return json.Marshal(objectMap) -} - -// EncryptionService a service that allows server-side encryption to be used. -type EncryptionService struct { - // Enabled - A boolean indicating whether or not the service encrypts the data as it is stored. - Enabled *bool `json:"enabled,omitempty"` - // LastEnabledTime - READ-ONLY; Gets a rough estimate of the date/time when the encryption was last enabled by the user. Only returned when encryption is enabled. There might be some unencrypted blobs which were written after this time, as it is just a rough estimate. - LastEnabledTime *date.Time `json:"lastEnabledTime,omitempty"` - // KeyType - Encryption key type to be used for the encryption service. 'Account' key type implies that an account-scoped encryption key will be used. 'Service' key type implies that a default service key is used. Possible values include: 'KeyTypeService', 'KeyTypeAccount' - KeyType KeyType `json:"keyType,omitempty"` -} - -// MarshalJSON is the custom marshaler for EncryptionService. -func (es EncryptionService) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if es.Enabled != nil { - objectMap["enabled"] = es.Enabled - } - if es.KeyType != "" { - objectMap["keyType"] = es.KeyType - } - return json.Marshal(objectMap) -} - -// EncryptionServices a list of services that support encryption. -type EncryptionServices struct { - // Blob - The encryption function of the blob storage service. - Blob *EncryptionService `json:"blob,omitempty"` - // File - The encryption function of the file storage service. - File *EncryptionService `json:"file,omitempty"` - // Table - The encryption function of the table storage service. - Table *EncryptionService `json:"table,omitempty"` - // Queue - The encryption function of the queue storage service. - Queue *EncryptionService `json:"queue,omitempty"` -} - -// Endpoints the URIs that are used to perform a retrieval of a public blob, queue, table, web or dfs -// object. -type Endpoints struct { - // Blob - READ-ONLY; Gets the blob endpoint. - Blob *string `json:"blob,omitempty"` - // Queue - READ-ONLY; Gets the queue endpoint. - Queue *string `json:"queue,omitempty"` - // Table - READ-ONLY; Gets the table endpoint. - Table *string `json:"table,omitempty"` - // File - READ-ONLY; Gets the file endpoint. - File *string `json:"file,omitempty"` - // Web - READ-ONLY; Gets the web endpoint. - Web *string `json:"web,omitempty"` - // Dfs - READ-ONLY; Gets the dfs endpoint. - Dfs *string `json:"dfs,omitempty"` - // MicrosoftEndpoints - Gets the microsoft routing storage endpoints. - MicrosoftEndpoints *AccountMicrosoftEndpoints `json:"microsoftEndpoints,omitempty"` - // InternetEndpoints - Gets the internet routing storage endpoints - InternetEndpoints *AccountInternetEndpoints `json:"internetEndpoints,omitempty"` -} - -// MarshalJSON is the custom marshaler for Endpoints. -func (e Endpoints) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if e.MicrosoftEndpoints != nil { - objectMap["microsoftEndpoints"] = e.MicrosoftEndpoints - } - if e.InternetEndpoints != nil { - objectMap["internetEndpoints"] = e.InternetEndpoints - } - return json.Marshal(objectMap) -} - -// ErrorResponse an error response from the storage resource provider. -type ErrorResponse struct { - // Error - Azure Storage Resource Provider error response body. - Error *ErrorResponseBody `json:"error,omitempty"` -} - -// ErrorResponseBody error response body contract. -type ErrorResponseBody struct { - // Code - An identifier for the error. Codes are invariant and are intended to be consumed programmatically. - Code *string `json:"code,omitempty"` - // Message - A message describing the error, intended to be suitable for display in a user interface. - Message *string `json:"message,omitempty"` -} - -// ExtendedLocation the complex type of the extended location. -type ExtendedLocation struct { - // Name - The name of the extended location. - Name *string `json:"name,omitempty"` - // Type - The type of the extended location. Possible values include: 'ExtendedLocationTypesEdgeZone' - Type ExtendedLocationTypes `json:"type,omitempty"` -} - -// FileServiceItems ... -type FileServiceItems struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; List of file services returned. - Value *[]FileServiceProperties `json:"value,omitempty"` -} - -// MarshalJSON is the custom marshaler for FileServiceItems. -func (fsi FileServiceItems) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// FileServiceProperties the properties of File services in storage account. -type FileServiceProperties struct { - autorest.Response `json:"-"` - // FileServicePropertiesProperties - The properties of File services in storage account. - *FileServicePropertiesProperties `json:"properties,omitempty"` - // Sku - READ-ONLY; Sku name and tier. - Sku *Sku `json:"sku,omitempty"` - // ID - READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; The name of the resource - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" - Type *string `json:"type,omitempty"` -} - -// MarshalJSON is the custom marshaler for FileServiceProperties. -func (fsp FileServiceProperties) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if fsp.FileServicePropertiesProperties != nil { - objectMap["properties"] = fsp.FileServicePropertiesProperties - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for FileServiceProperties struct. -func (fsp *FileServiceProperties) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var fileServiceProperties FileServicePropertiesProperties - err = json.Unmarshal(*v, &fileServiceProperties) - if err != nil { - return err - } - fsp.FileServicePropertiesProperties = &fileServiceProperties - } - case "sku": - if v != nil { - var sku Sku - err = json.Unmarshal(*v, &sku) - if err != nil { - return err - } - fsp.Sku = &sku - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - fsp.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - fsp.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - fsp.Type = &typeVar - } - } - } - - return nil -} - -// FileServicePropertiesProperties the properties of File services in storage account. -type FileServicePropertiesProperties struct { - // Cors - Specifies CORS rules for the File service. You can include up to five CorsRule elements in the request. If no CorsRule elements are included in the request body, all CORS rules will be deleted, and CORS will be disabled for the File service. - Cors *CorsRules `json:"cors,omitempty"` - // ShareDeleteRetentionPolicy - The file service properties for share soft delete. - ShareDeleteRetentionPolicy *DeleteRetentionPolicy `json:"shareDeleteRetentionPolicy,omitempty"` - // ProtocolSettings - Protocol settings for file service - ProtocolSettings *ProtocolSettings `json:"protocolSettings,omitempty"` -} - -// FileShare properties of the file share, including Id, resource name, resource type, Etag. -type FileShare struct { - autorest.Response `json:"-"` - // FileShareProperties - Properties of the file share. - *FileShareProperties `json:"properties,omitempty"` - // Etag - READ-ONLY; Resource Etag. - Etag *string `json:"etag,omitempty"` - // ID - READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; The name of the resource - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" - Type *string `json:"type,omitempty"` -} - -// MarshalJSON is the custom marshaler for FileShare. -func (fs FileShare) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if fs.FileShareProperties != nil { - objectMap["properties"] = fs.FileShareProperties - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for FileShare struct. -func (fs *FileShare) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var fileShareProperties FileShareProperties - err = json.Unmarshal(*v, &fileShareProperties) - if err != nil { - return err - } - fs.FileShareProperties = &fileShareProperties - } - case "etag": - if v != nil { - var etag string - err = json.Unmarshal(*v, &etag) - if err != nil { - return err - } - fs.Etag = &etag - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - fs.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - fs.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - fs.Type = &typeVar - } - } - } - - return nil -} - -// FileShareItem the file share properties be listed out. -type FileShareItem struct { - // FileShareProperties - The file share properties be listed out. - *FileShareProperties `json:"properties,omitempty"` - // Etag - READ-ONLY; Resource Etag. - Etag *string `json:"etag,omitempty"` - // ID - READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; The name of the resource - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" - Type *string `json:"type,omitempty"` -} - -// MarshalJSON is the custom marshaler for FileShareItem. -func (fsi FileShareItem) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if fsi.FileShareProperties != nil { - objectMap["properties"] = fsi.FileShareProperties - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for FileShareItem struct. -func (fsi *FileShareItem) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var fileShareProperties FileShareProperties - err = json.Unmarshal(*v, &fileShareProperties) - if err != nil { - return err - } - fsi.FileShareProperties = &fileShareProperties - } - case "etag": - if v != nil { - var etag string - err = json.Unmarshal(*v, &etag) - if err != nil { - return err - } - fsi.Etag = &etag - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - fsi.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - fsi.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - fsi.Type = &typeVar - } - } - } - - return nil -} - -// FileShareItems response schema. Contains list of shares returned, and if paging is requested or -// required, a URL to next page of shares. -type FileShareItems struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; List of file shares returned. - Value *[]FileShareItem `json:"value,omitempty"` - // NextLink - READ-ONLY; Request URL that can be used to query next page of shares. Returned when total number of requested shares exceed maximum page size. - NextLink *string `json:"nextLink,omitempty"` -} - -// MarshalJSON is the custom marshaler for FileShareItems. -func (fsi FileShareItems) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// FileShareItemsIterator provides access to a complete listing of FileShareItem values. -type FileShareItemsIterator struct { - i int - page FileShareItemsPage -} - -// NextWithContext advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *FileShareItemsIterator) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/FileShareItemsIterator.NextWithContext") - defer func() { - sc := -1 - if iter.Response().Response.Response != nil { - sc = iter.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err = iter.page.NextWithContext(ctx) - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (iter *FileShareItemsIterator) Next() error { - return iter.NextWithContext(context.Background()) -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter FileShareItemsIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter FileShareItemsIterator) Response() FileShareItems { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter FileShareItemsIterator) Value() FileShareItem { - if !iter.page.NotDone() { - return FileShareItem{} - } - return iter.page.Values()[iter.i] -} - -// Creates a new instance of the FileShareItemsIterator type. -func NewFileShareItemsIterator(page FileShareItemsPage) FileShareItemsIterator { - return FileShareItemsIterator{page: page} -} - -// IsEmpty returns true if the ListResult contains no values. -func (fsi FileShareItems) IsEmpty() bool { - return fsi.Value == nil || len(*fsi.Value) == 0 -} - -// hasNextLink returns true if the NextLink is not empty. -func (fsi FileShareItems) hasNextLink() bool { - return fsi.NextLink != nil && len(*fsi.NextLink) != 0 -} - -// fileShareItemsPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (fsi FileShareItems) fileShareItemsPreparer(ctx context.Context) (*http.Request, error) { - if !fsi.hasNextLink() { - return nil, nil - } - return autorest.Prepare((&http.Request{}).WithContext(ctx), - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(fsi.NextLink))) -} - -// FileShareItemsPage contains a page of FileShareItem values. -type FileShareItemsPage struct { - fn func(context.Context, FileShareItems) (FileShareItems, error) - fsi FileShareItems -} - -// NextWithContext advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *FileShareItemsPage) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/FileShareItemsPage.NextWithContext") - defer func() { - sc := -1 - if page.Response().Response.Response != nil { - sc = page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - for { - next, err := page.fn(ctx, page.fsi) - if err != nil { - return err - } - page.fsi = next - if !next.hasNextLink() || !next.IsEmpty() { - break - } - } - return nil -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *FileShareItemsPage) Next() error { - return page.NextWithContext(context.Background()) -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page FileShareItemsPage) NotDone() bool { - return !page.fsi.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page FileShareItemsPage) Response() FileShareItems { - return page.fsi -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page FileShareItemsPage) Values() []FileShareItem { - if page.fsi.IsEmpty() { - return nil - } - return *page.fsi.Value -} - -// Creates a new instance of the FileShareItemsPage type. -func NewFileShareItemsPage(cur FileShareItems, getNextPage func(context.Context, FileShareItems) (FileShareItems, error)) FileShareItemsPage { - return FileShareItemsPage{ - fn: getNextPage, - fsi: cur, - } -} - -// FileShareProperties the properties of the file share. -type FileShareProperties struct { - // LastModifiedTime - READ-ONLY; Returns the date and time the share was last modified. - LastModifiedTime *date.Time `json:"lastModifiedTime,omitempty"` - // Metadata - A name-value pair to associate with the share as metadata. - Metadata map[string]*string `json:"metadata"` - // ShareQuota - The maximum size of the share, in gigabytes. Must be greater than 0, and less than or equal to 5TB (5120). For Large File Shares, the maximum size is 102400. - ShareQuota *int32 `json:"shareQuota,omitempty"` - // EnabledProtocols - The authentication protocol that is used for the file share. Can only be specified when creating a share. Possible values include: 'EnabledProtocolsSMB', 'EnabledProtocolsNFS' - EnabledProtocols EnabledProtocols `json:"enabledProtocols,omitempty"` - // RootSquash - The property is for NFS share only. The default is NoRootSquash. Possible values include: 'RootSquashTypeNoRootSquash', 'RootSquashTypeRootSquash', 'RootSquashTypeAllSquash' - RootSquash RootSquashType `json:"rootSquash,omitempty"` - // Version - READ-ONLY; The version of the share. - Version *string `json:"version,omitempty"` - // Deleted - READ-ONLY; Indicates whether the share was deleted. - Deleted *bool `json:"deleted,omitempty"` - // DeletedTime - READ-ONLY; The deleted time if the share was deleted. - DeletedTime *date.Time `json:"deletedTime,omitempty"` - // RemainingRetentionDays - READ-ONLY; Remaining retention days for share that was soft deleted. - RemainingRetentionDays *int32 `json:"remainingRetentionDays,omitempty"` - // AccessTier - Access tier for specific share. GpV2 account can choose between TransactionOptimized (default), Hot, and Cool. FileStorage account can choose Premium. Possible values include: 'ShareAccessTierTransactionOptimized', 'ShareAccessTierHot', 'ShareAccessTierCool', 'ShareAccessTierPremium' - AccessTier ShareAccessTier `json:"accessTier,omitempty"` - // AccessTierChangeTime - READ-ONLY; Indicates the last modification time for share access tier. - AccessTierChangeTime *date.Time `json:"accessTierChangeTime,omitempty"` - // AccessTierStatus - READ-ONLY; Indicates if there is a pending transition for access tier. - AccessTierStatus *string `json:"accessTierStatus,omitempty"` - // ShareUsageBytes - READ-ONLY; The approximate size of the data stored on the share. Note that this value may not include all recently created or recently resized files. - ShareUsageBytes *int64 `json:"shareUsageBytes,omitempty"` - // SnapshotTime - READ-ONLY; Creation time of share snapshot returned in the response of list shares with expand param "snapshots". - SnapshotTime *date.Time `json:"snapshotTime,omitempty"` -} - -// MarshalJSON is the custom marshaler for FileShareProperties. -func (fsp FileShareProperties) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if fsp.Metadata != nil { - objectMap["metadata"] = fsp.Metadata - } - if fsp.ShareQuota != nil { - objectMap["shareQuota"] = fsp.ShareQuota - } - if fsp.EnabledProtocols != "" { - objectMap["enabledProtocols"] = fsp.EnabledProtocols - } - if fsp.RootSquash != "" { - objectMap["rootSquash"] = fsp.RootSquash - } - if fsp.AccessTier != "" { - objectMap["accessTier"] = fsp.AccessTier - } - return json.Marshal(objectMap) -} - -// GeoReplicationStats statistics related to replication for storage account's Blob, Table, Queue and File -// services. It is only available when geo-redundant replication is enabled for the storage account. -type GeoReplicationStats struct { - // Status - READ-ONLY; The status of the secondary location. Possible values are: - Live: Indicates that the secondary location is active and operational. - Bootstrap: Indicates initial synchronization from the primary location to the secondary location is in progress.This typically occurs when replication is first enabled. - Unavailable: Indicates that the secondary location is temporarily unavailable. Possible values include: 'GeoReplicationStatusLive', 'GeoReplicationStatusBootstrap', 'GeoReplicationStatusUnavailable' - Status GeoReplicationStatus `json:"status,omitempty"` - // LastSyncTime - READ-ONLY; All primary writes preceding this UTC date/time value are guaranteed to be available for read operations. Primary writes following this point in time may or may not be available for reads. Element may be default value if value of LastSyncTime is not available, this can happen if secondary is offline or we are in bootstrap. - LastSyncTime *date.Time `json:"lastSyncTime,omitempty"` - // CanFailover - READ-ONLY; A boolean flag which indicates whether or not account failover is supported for the account. - CanFailover *bool `json:"canFailover,omitempty"` -} - -// MarshalJSON is the custom marshaler for GeoReplicationStats. -func (grs GeoReplicationStats) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// Identity identity for the resource. -type Identity struct { - // PrincipalID - READ-ONLY; The principal ID of resource identity. - PrincipalID *string `json:"principalId,omitempty"` - // TenantID - READ-ONLY; The tenant ID of resource. - TenantID *string `json:"tenantId,omitempty"` - // Type - The identity type. Possible values include: 'IdentityTypeNone', 'IdentityTypeSystemAssigned', 'IdentityTypeUserAssigned', 'IdentityTypeSystemAssignedUserAssigned' - Type IdentityType `json:"type,omitempty"` - // UserAssignedIdentities - Gets or sets a list of key value pairs that describe the set of User Assigned identities that will be used with this storage account. The key is the ARM resource identifier of the identity. Only 1 User Assigned identity is permitted here. - UserAssignedIdentities map[string]*UserAssignedIdentity `json:"userAssignedIdentities"` -} - -// MarshalJSON is the custom marshaler for Identity. -func (i Identity) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if i.Type != "" { - objectMap["type"] = i.Type - } - if i.UserAssignedIdentities != nil { - objectMap["userAssignedIdentities"] = i.UserAssignedIdentities - } - return json.Marshal(objectMap) -} - -// ImmutabilityPolicy the ImmutabilityPolicy property of a blob container, including Id, resource name, -// resource type, Etag. -type ImmutabilityPolicy struct { - autorest.Response `json:"-"` - // ImmutabilityPolicyProperty - The properties of an ImmutabilityPolicy of a blob container. - *ImmutabilityPolicyProperty `json:"properties,omitempty"` - // Etag - READ-ONLY; Resource Etag. - Etag *string `json:"etag,omitempty"` - // ID - READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; The name of the resource - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" - Type *string `json:"type,omitempty"` -} - -// MarshalJSON is the custom marshaler for ImmutabilityPolicy. -func (IP ImmutabilityPolicy) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if IP.ImmutabilityPolicyProperty != nil { - objectMap["properties"] = IP.ImmutabilityPolicyProperty - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for ImmutabilityPolicy struct. -func (IP *ImmutabilityPolicy) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var immutabilityPolicyProperty ImmutabilityPolicyProperty - err = json.Unmarshal(*v, &immutabilityPolicyProperty) - if err != nil { - return err - } - IP.ImmutabilityPolicyProperty = &immutabilityPolicyProperty - } - case "etag": - if v != nil { - var etag string - err = json.Unmarshal(*v, &etag) - if err != nil { - return err - } - IP.Etag = &etag - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - IP.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - IP.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - IP.Type = &typeVar - } - } - } - - return nil -} - -// ImmutabilityPolicyProperties the properties of an ImmutabilityPolicy of a blob container. -type ImmutabilityPolicyProperties struct { - // ImmutabilityPolicyProperty - The properties of an ImmutabilityPolicy of a blob container. - *ImmutabilityPolicyProperty `json:"properties,omitempty"` - // Etag - READ-ONLY; ImmutabilityPolicy Etag. - Etag *string `json:"etag,omitempty"` - // UpdateHistory - READ-ONLY; The ImmutabilityPolicy update history of the blob container. - UpdateHistory *[]UpdateHistoryProperty `json:"updateHistory,omitempty"` -} - -// MarshalJSON is the custom marshaler for ImmutabilityPolicyProperties. -func (ipp ImmutabilityPolicyProperties) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if ipp.ImmutabilityPolicyProperty != nil { - objectMap["properties"] = ipp.ImmutabilityPolicyProperty - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for ImmutabilityPolicyProperties struct. -func (ipp *ImmutabilityPolicyProperties) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var immutabilityPolicyProperty ImmutabilityPolicyProperty - err = json.Unmarshal(*v, &immutabilityPolicyProperty) - if err != nil { - return err - } - ipp.ImmutabilityPolicyProperty = &immutabilityPolicyProperty - } - case "etag": - if v != nil { - var etag string - err = json.Unmarshal(*v, &etag) - if err != nil { - return err - } - ipp.Etag = &etag - } - case "updateHistory": - if v != nil { - var updateHistory []UpdateHistoryProperty - err = json.Unmarshal(*v, &updateHistory) - if err != nil { - return err - } - ipp.UpdateHistory = &updateHistory - } - } - } - - return nil -} - -// ImmutabilityPolicyProperty the properties of an ImmutabilityPolicy of a blob container. -type ImmutabilityPolicyProperty struct { - // ImmutabilityPeriodSinceCreationInDays - The immutability period for the blobs in the container since the policy creation, in days. - ImmutabilityPeriodSinceCreationInDays *int32 `json:"immutabilityPeriodSinceCreationInDays,omitempty"` - // State - READ-ONLY; The ImmutabilityPolicy state of a blob container, possible values include: Locked and Unlocked. Possible values include: 'ImmutabilityPolicyStateLocked', 'ImmutabilityPolicyStateUnlocked' - State ImmutabilityPolicyState `json:"state,omitempty"` - // AllowProtectedAppendWrites - This property can only be changed for unlocked time-based retention policies. When enabled, new blocks can be written to an append blob while maintaining immutability protection and compliance. Only new blocks can be added and any existing blocks cannot be modified or deleted. This property cannot be changed with ExtendImmutabilityPolicy API - AllowProtectedAppendWrites *bool `json:"allowProtectedAppendWrites,omitempty"` -} - -// MarshalJSON is the custom marshaler for ImmutabilityPolicyProperty. -func (ipp ImmutabilityPolicyProperty) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if ipp.ImmutabilityPeriodSinceCreationInDays != nil { - objectMap["immutabilityPeriodSinceCreationInDays"] = ipp.ImmutabilityPeriodSinceCreationInDays - } - if ipp.AllowProtectedAppendWrites != nil { - objectMap["allowProtectedAppendWrites"] = ipp.AllowProtectedAppendWrites - } - return json.Marshal(objectMap) -} - -// IPRule IP rule with specific IP or IP range in CIDR format. -type IPRule struct { - // IPAddressOrRange - Specifies the IP or IP range in CIDR format. Only IPV4 address is allowed. - IPAddressOrRange *string `json:"value,omitempty"` - // Action - The action of IP ACL rule. Possible values include: 'ActionAllow' - Action Action `json:"action,omitempty"` -} - -// KeyCreationTime storage account keys creation time. -type KeyCreationTime struct { - Key1 *date.Time `json:"key1,omitempty"` - Key2 *date.Time `json:"key2,omitempty"` -} - -// KeyPolicy keyPolicy assigned to the storage account. -type KeyPolicy struct { - // KeyExpirationPeriodInDays - The key expiration period in days. - KeyExpirationPeriodInDays *int32 `json:"keyExpirationPeriodInDays,omitempty"` -} - -// KeyVaultProperties properties of key vault. -type KeyVaultProperties struct { - // KeyName - The name of KeyVault key. - KeyName *string `json:"keyname,omitempty"` - // KeyVersion - The version of KeyVault key. - KeyVersion *string `json:"keyversion,omitempty"` - // KeyVaultURI - The Uri of KeyVault. - KeyVaultURI *string `json:"keyvaulturi,omitempty"` - // CurrentVersionedKeyIdentifier - READ-ONLY; The object identifier of the current versioned Key Vault Key in use. - CurrentVersionedKeyIdentifier *string `json:"currentVersionedKeyIdentifier,omitempty"` - // LastKeyRotationTimestamp - READ-ONLY; Timestamp of last rotation of the Key Vault Key. - LastKeyRotationTimestamp *date.Time `json:"lastKeyRotationTimestamp,omitempty"` -} - -// MarshalJSON is the custom marshaler for KeyVaultProperties. -func (kvp KeyVaultProperties) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if kvp.KeyName != nil { - objectMap["keyname"] = kvp.KeyName - } - if kvp.KeyVersion != nil { - objectMap["keyversion"] = kvp.KeyVersion - } - if kvp.KeyVaultURI != nil { - objectMap["keyvaulturi"] = kvp.KeyVaultURI - } - return json.Marshal(objectMap) -} - -// LastAccessTimeTrackingPolicy the blob service properties for Last access time based tracking policy. -type LastAccessTimeTrackingPolicy struct { - // Enable - When set to true last access time based tracking is enabled. - Enable *bool `json:"enable,omitempty"` - // Name - Name of the policy. The valid value is AccessTimeTracking. This field is currently read only. Possible values include: 'NameAccessTimeTracking' - Name Name `json:"name,omitempty"` - // TrackingGranularityInDays - The field specifies blob object tracking granularity in days, typically how often the blob object should be tracked.This field is currently read only with value as 1 - TrackingGranularityInDays *int32 `json:"trackingGranularityInDays,omitempty"` - // BlobType - An array of predefined supported blob types. Only blockBlob is the supported value. This field is currently read only - BlobType *[]string `json:"blobType,omitempty"` -} - -// LeaseContainerRequest lease Container request schema. -type LeaseContainerRequest struct { - // Action - Specifies the lease action. Can be one of the available actions. Possible values include: 'Action1Acquire', 'Action1Renew', 'Action1Change', 'Action1Release', 'Action1Break' - Action Action1 `json:"action,omitempty"` - // LeaseID - Identifies the lease. Can be specified in any valid GUID string format. - LeaseID *string `json:"leaseId,omitempty"` - // BreakPeriod - Optional. For a break action, proposed duration the lease should continue before it is broken, in seconds, between 0 and 60. - BreakPeriod *int32 `json:"breakPeriod,omitempty"` - // LeaseDuration - Required for acquire. Specifies the duration of the lease, in seconds, or negative one (-1) for a lease that never expires. - LeaseDuration *int32 `json:"leaseDuration,omitempty"` - // ProposedLeaseID - Optional for acquire, required for change. Proposed lease ID, in a GUID string format. - ProposedLeaseID *string `json:"proposedLeaseId,omitempty"` -} - -// LeaseContainerResponse lease Container response schema. -type LeaseContainerResponse struct { - autorest.Response `json:"-"` - // LeaseID - Returned unique lease ID that must be included with any request to delete the container, or to renew, change, or release the lease. - LeaseID *string `json:"leaseId,omitempty"` - // LeaseTimeSeconds - Approximate time remaining in the lease period, in seconds. - LeaseTimeSeconds *string `json:"leaseTimeSeconds,omitempty"` -} - -// LegalHold the LegalHold property of a blob container. -type LegalHold struct { - autorest.Response `json:"-"` - // HasLegalHold - READ-ONLY; The hasLegalHold public property is set to true by SRP if there are at least one existing tag. The hasLegalHold public property is set to false by SRP if all existing legal hold tags are cleared out. There can be a maximum of 1000 blob containers with hasLegalHold=true for a given account. - HasLegalHold *bool `json:"hasLegalHold,omitempty"` - // Tags - Each tag should be 3 to 23 alphanumeric characters and is normalized to lower case at SRP. - Tags *[]string `json:"tags,omitempty"` -} - -// MarshalJSON is the custom marshaler for LegalHold. -func (lh LegalHold) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if lh.Tags != nil { - objectMap["tags"] = lh.Tags - } - return json.Marshal(objectMap) -} - -// LegalHoldProperties the LegalHold property of a blob container. -type LegalHoldProperties struct { - // HasLegalHold - READ-ONLY; The hasLegalHold public property is set to true by SRP if there are at least one existing tag. The hasLegalHold public property is set to false by SRP if all existing legal hold tags are cleared out. There can be a maximum of 1000 blob containers with hasLegalHold=true for a given account. - HasLegalHold *bool `json:"hasLegalHold,omitempty"` - // Tags - The list of LegalHold tags of a blob container. - Tags *[]TagProperty `json:"tags,omitempty"` -} - -// MarshalJSON is the custom marshaler for LegalHoldProperties. -func (lhp LegalHoldProperties) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if lhp.Tags != nil { - objectMap["tags"] = lhp.Tags - } - return json.Marshal(objectMap) -} - -// ListAccountSasResponse the List SAS credentials operation response. -type ListAccountSasResponse struct { - autorest.Response `json:"-"` - // AccountSasToken - READ-ONLY; List SAS credentials of storage account. - AccountSasToken *string `json:"accountSasToken,omitempty"` -} - -// MarshalJSON is the custom marshaler for ListAccountSasResponse. -func (lasr ListAccountSasResponse) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// ListBlobInventoryPolicy list of blob inventory policies returned. -type ListBlobInventoryPolicy struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; List of blob inventory policies. - Value *[]BlobInventoryPolicy `json:"value,omitempty"` -} - -// MarshalJSON is the custom marshaler for ListBlobInventoryPolicy. -func (lbip ListBlobInventoryPolicy) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// ListContainerItem the blob container properties be listed out. -type ListContainerItem struct { - // ContainerProperties - The blob container properties be listed out. - *ContainerProperties `json:"properties,omitempty"` - // Etag - READ-ONLY; Resource Etag. - Etag *string `json:"etag,omitempty"` - // ID - READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; The name of the resource - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" - Type *string `json:"type,omitempty"` -} - -// MarshalJSON is the custom marshaler for ListContainerItem. -func (lci ListContainerItem) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if lci.ContainerProperties != nil { - objectMap["properties"] = lci.ContainerProperties - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for ListContainerItem struct. -func (lci *ListContainerItem) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var containerProperties ContainerProperties - err = json.Unmarshal(*v, &containerProperties) - if err != nil { - return err - } - lci.ContainerProperties = &containerProperties - } - case "etag": - if v != nil { - var etag string - err = json.Unmarshal(*v, &etag) - if err != nil { - return err - } - lci.Etag = &etag - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - lci.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - lci.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - lci.Type = &typeVar - } - } - } - - return nil -} - -// ListContainerItems response schema. Contains list of blobs returned, and if paging is requested or -// required, a URL to next page of containers. -type ListContainerItems struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; List of blobs containers returned. - Value *[]ListContainerItem `json:"value,omitempty"` - // NextLink - READ-ONLY; Request URL that can be used to query next page of containers. Returned when total number of requested containers exceed maximum page size. - NextLink *string `json:"nextLink,omitempty"` -} - -// MarshalJSON is the custom marshaler for ListContainerItems. -func (lci ListContainerItems) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// ListContainerItemsIterator provides access to a complete listing of ListContainerItem values. -type ListContainerItemsIterator struct { - i int - page ListContainerItemsPage -} - -// NextWithContext advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *ListContainerItemsIterator) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ListContainerItemsIterator.NextWithContext") - defer func() { - sc := -1 - if iter.Response().Response.Response != nil { - sc = iter.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err = iter.page.NextWithContext(ctx) - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (iter *ListContainerItemsIterator) Next() error { - return iter.NextWithContext(context.Background()) -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter ListContainerItemsIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter ListContainerItemsIterator) Response() ListContainerItems { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter ListContainerItemsIterator) Value() ListContainerItem { - if !iter.page.NotDone() { - return ListContainerItem{} - } - return iter.page.Values()[iter.i] -} - -// Creates a new instance of the ListContainerItemsIterator type. -func NewListContainerItemsIterator(page ListContainerItemsPage) ListContainerItemsIterator { - return ListContainerItemsIterator{page: page} -} - -// IsEmpty returns true if the ListResult contains no values. -func (lci ListContainerItems) IsEmpty() bool { - return lci.Value == nil || len(*lci.Value) == 0 -} - -// hasNextLink returns true if the NextLink is not empty. -func (lci ListContainerItems) hasNextLink() bool { - return lci.NextLink != nil && len(*lci.NextLink) != 0 -} - -// listContainerItemsPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (lci ListContainerItems) listContainerItemsPreparer(ctx context.Context) (*http.Request, error) { - if !lci.hasNextLink() { - return nil, nil - } - return autorest.Prepare((&http.Request{}).WithContext(ctx), - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(lci.NextLink))) -} - -// ListContainerItemsPage contains a page of ListContainerItem values. -type ListContainerItemsPage struct { - fn func(context.Context, ListContainerItems) (ListContainerItems, error) - lci ListContainerItems -} - -// NextWithContext advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *ListContainerItemsPage) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ListContainerItemsPage.NextWithContext") - defer func() { - sc := -1 - if page.Response().Response.Response != nil { - sc = page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - for { - next, err := page.fn(ctx, page.lci) - if err != nil { - return err - } - page.lci = next - if !next.hasNextLink() || !next.IsEmpty() { - break - } - } - return nil -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *ListContainerItemsPage) Next() error { - return page.NextWithContext(context.Background()) -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page ListContainerItemsPage) NotDone() bool { - return !page.lci.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page ListContainerItemsPage) Response() ListContainerItems { - return page.lci -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page ListContainerItemsPage) Values() []ListContainerItem { - if page.lci.IsEmpty() { - return nil - } - return *page.lci.Value -} - -// Creates a new instance of the ListContainerItemsPage type. -func NewListContainerItemsPage(cur ListContainerItems, getNextPage func(context.Context, ListContainerItems) (ListContainerItems, error)) ListContainerItemsPage { - return ListContainerItemsPage{ - fn: getNextPage, - lci: cur, - } -} - -// ListQueue ... -type ListQueue struct { - // ListQueueProperties - List Queue resource properties. - *ListQueueProperties `json:"properties,omitempty"` - // ID - READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; The name of the resource - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" - Type *string `json:"type,omitempty"` -} - -// MarshalJSON is the custom marshaler for ListQueue. -func (lq ListQueue) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if lq.ListQueueProperties != nil { - objectMap["properties"] = lq.ListQueueProperties - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for ListQueue struct. -func (lq *ListQueue) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var queueProperties ListQueueProperties - err = json.Unmarshal(*v, &queueProperties) - if err != nil { - return err - } - lq.ListQueueProperties = &queueProperties - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - lq.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - lq.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - lq.Type = &typeVar - } - } - } - - return nil -} - -// ListQueueProperties ... -type ListQueueProperties struct { - // Metadata - A name-value pair that represents queue metadata. - Metadata map[string]*string `json:"metadata"` -} - -// MarshalJSON is the custom marshaler for ListQueueProperties. -func (lqp ListQueueProperties) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if lqp.Metadata != nil { - objectMap["metadata"] = lqp.Metadata - } - return json.Marshal(objectMap) -} - -// ListQueueResource response schema. Contains list of queues returned -type ListQueueResource struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; List of queues returned. - Value *[]ListQueue `json:"value,omitempty"` - // NextLink - READ-ONLY; Request URL that can be used to list next page of queues - NextLink *string `json:"nextLink,omitempty"` -} - -// MarshalJSON is the custom marshaler for ListQueueResource. -func (lqr ListQueueResource) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// ListQueueResourceIterator provides access to a complete listing of ListQueue values. -type ListQueueResourceIterator struct { - i int - page ListQueueResourcePage -} - -// NextWithContext advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *ListQueueResourceIterator) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ListQueueResourceIterator.NextWithContext") - defer func() { - sc := -1 - if iter.Response().Response.Response != nil { - sc = iter.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err = iter.page.NextWithContext(ctx) - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (iter *ListQueueResourceIterator) Next() error { - return iter.NextWithContext(context.Background()) -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter ListQueueResourceIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter ListQueueResourceIterator) Response() ListQueueResource { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter ListQueueResourceIterator) Value() ListQueue { - if !iter.page.NotDone() { - return ListQueue{} - } - return iter.page.Values()[iter.i] -} - -// Creates a new instance of the ListQueueResourceIterator type. -func NewListQueueResourceIterator(page ListQueueResourcePage) ListQueueResourceIterator { - return ListQueueResourceIterator{page: page} -} - -// IsEmpty returns true if the ListResult contains no values. -func (lqr ListQueueResource) IsEmpty() bool { - return lqr.Value == nil || len(*lqr.Value) == 0 -} - -// hasNextLink returns true if the NextLink is not empty. -func (lqr ListQueueResource) hasNextLink() bool { - return lqr.NextLink != nil && len(*lqr.NextLink) != 0 -} - -// listQueueResourcePreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (lqr ListQueueResource) listQueueResourcePreparer(ctx context.Context) (*http.Request, error) { - if !lqr.hasNextLink() { - return nil, nil - } - return autorest.Prepare((&http.Request{}).WithContext(ctx), - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(lqr.NextLink))) -} - -// ListQueueResourcePage contains a page of ListQueue values. -type ListQueueResourcePage struct { - fn func(context.Context, ListQueueResource) (ListQueueResource, error) - lqr ListQueueResource -} - -// NextWithContext advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *ListQueueResourcePage) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ListQueueResourcePage.NextWithContext") - defer func() { - sc := -1 - if page.Response().Response.Response != nil { - sc = page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - for { - next, err := page.fn(ctx, page.lqr) - if err != nil { - return err - } - page.lqr = next - if !next.hasNextLink() || !next.IsEmpty() { - break - } - } - return nil -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *ListQueueResourcePage) Next() error { - return page.NextWithContext(context.Background()) -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page ListQueueResourcePage) NotDone() bool { - return !page.lqr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page ListQueueResourcePage) Response() ListQueueResource { - return page.lqr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page ListQueueResourcePage) Values() []ListQueue { - if page.lqr.IsEmpty() { - return nil - } - return *page.lqr.Value -} - -// Creates a new instance of the ListQueueResourcePage type. -func NewListQueueResourcePage(cur ListQueueResource, getNextPage func(context.Context, ListQueueResource) (ListQueueResource, error)) ListQueueResourcePage { - return ListQueueResourcePage{ - fn: getNextPage, - lqr: cur, - } -} - -// ListQueueServices ... -type ListQueueServices struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; List of queue services returned. - Value *[]QueueServiceProperties `json:"value,omitempty"` -} - -// MarshalJSON is the custom marshaler for ListQueueServices. -func (lqs ListQueueServices) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// ListServiceSasResponse the List service SAS credentials operation response. -type ListServiceSasResponse struct { - autorest.Response `json:"-"` - // ServiceSasToken - READ-ONLY; List service SAS credentials of specific resource. - ServiceSasToken *string `json:"serviceSasToken,omitempty"` -} - -// MarshalJSON is the custom marshaler for ListServiceSasResponse. -func (lssr ListServiceSasResponse) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// ListTableResource response schema. Contains list of tables returned -type ListTableResource struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; List of tables returned. - Value *[]Table `json:"value,omitempty"` - // NextLink - READ-ONLY; Request URL that can be used to query next page of tables - NextLink *string `json:"nextLink,omitempty"` -} - -// MarshalJSON is the custom marshaler for ListTableResource. -func (ltr ListTableResource) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// ListTableResourceIterator provides access to a complete listing of Table values. -type ListTableResourceIterator struct { - i int - page ListTableResourcePage -} - -// NextWithContext advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *ListTableResourceIterator) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ListTableResourceIterator.NextWithContext") - defer func() { - sc := -1 - if iter.Response().Response.Response != nil { - sc = iter.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err = iter.page.NextWithContext(ctx) - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (iter *ListTableResourceIterator) Next() error { - return iter.NextWithContext(context.Background()) -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter ListTableResourceIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter ListTableResourceIterator) Response() ListTableResource { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter ListTableResourceIterator) Value() Table { - if !iter.page.NotDone() { - return Table{} - } - return iter.page.Values()[iter.i] -} - -// Creates a new instance of the ListTableResourceIterator type. -func NewListTableResourceIterator(page ListTableResourcePage) ListTableResourceIterator { - return ListTableResourceIterator{page: page} -} - -// IsEmpty returns true if the ListResult contains no values. -func (ltr ListTableResource) IsEmpty() bool { - return ltr.Value == nil || len(*ltr.Value) == 0 -} - -// hasNextLink returns true if the NextLink is not empty. -func (ltr ListTableResource) hasNextLink() bool { - return ltr.NextLink != nil && len(*ltr.NextLink) != 0 -} - -// listTableResourcePreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (ltr ListTableResource) listTableResourcePreparer(ctx context.Context) (*http.Request, error) { - if !ltr.hasNextLink() { - return nil, nil - } - return autorest.Prepare((&http.Request{}).WithContext(ctx), - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(ltr.NextLink))) -} - -// ListTableResourcePage contains a page of Table values. -type ListTableResourcePage struct { - fn func(context.Context, ListTableResource) (ListTableResource, error) - ltr ListTableResource -} - -// NextWithContext advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *ListTableResourcePage) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ListTableResourcePage.NextWithContext") - defer func() { - sc := -1 - if page.Response().Response.Response != nil { - sc = page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - for { - next, err := page.fn(ctx, page.ltr) - if err != nil { - return err - } - page.ltr = next - if !next.hasNextLink() || !next.IsEmpty() { - break - } - } - return nil -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *ListTableResourcePage) Next() error { - return page.NextWithContext(context.Background()) -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page ListTableResourcePage) NotDone() bool { - return !page.ltr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page ListTableResourcePage) Response() ListTableResource { - return page.ltr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page ListTableResourcePage) Values() []Table { - if page.ltr.IsEmpty() { - return nil - } - return *page.ltr.Value -} - -// Creates a new instance of the ListTableResourcePage type. -func NewListTableResourcePage(cur ListTableResource, getNextPage func(context.Context, ListTableResource) (ListTableResource, error)) ListTableResourcePage { - return ListTableResourcePage{ - fn: getNextPage, - ltr: cur, - } -} - -// ListTableServices ... -type ListTableServices struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; List of table services returned. - Value *[]TableServiceProperties `json:"value,omitempty"` -} - -// MarshalJSON is the custom marshaler for ListTableServices. -func (lts ListTableServices) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// ManagementPolicy the Get Storage Account ManagementPolicies operation response. -type ManagementPolicy struct { - autorest.Response `json:"-"` - // ManagementPolicyProperties - Returns the Storage Account Data Policies Rules. - *ManagementPolicyProperties `json:"properties,omitempty"` - // ID - READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; The name of the resource - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" - Type *string `json:"type,omitempty"` -} - -// MarshalJSON is the custom marshaler for ManagementPolicy. -func (mp ManagementPolicy) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if mp.ManagementPolicyProperties != nil { - objectMap["properties"] = mp.ManagementPolicyProperties - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for ManagementPolicy struct. -func (mp *ManagementPolicy) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var managementPolicyProperties ManagementPolicyProperties - err = json.Unmarshal(*v, &managementPolicyProperties) - if err != nil { - return err - } - mp.ManagementPolicyProperties = &managementPolicyProperties - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - mp.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - mp.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - mp.Type = &typeVar - } - } - } - - return nil -} - -// ManagementPolicyAction actions are applied to the filtered blobs when the execution condition is met. -type ManagementPolicyAction struct { - // BaseBlob - The management policy action for base blob - BaseBlob *ManagementPolicyBaseBlob `json:"baseBlob,omitempty"` - // Snapshot - The management policy action for snapshot - Snapshot *ManagementPolicySnapShot `json:"snapshot,omitempty"` - // Version - The management policy action for version - Version *ManagementPolicyVersion `json:"version,omitempty"` -} - -// ManagementPolicyBaseBlob management policy action for base blob. -type ManagementPolicyBaseBlob struct { - // TierToCool - The function to tier blobs to cool storage. Support blobs currently at Hot tier - TierToCool *DateAfterModification `json:"tierToCool,omitempty"` - // TierToArchive - The function to tier blobs to archive storage. Support blobs currently at Hot or Cool tier - TierToArchive *DateAfterModification `json:"tierToArchive,omitempty"` - // Delete - The function to delete the blob - Delete *DateAfterModification `json:"delete,omitempty"` - // EnableAutoTierToHotFromCool - This property enables auto tiering of a blob from cool to hot on a blob access. This property requires tierToCool.daysAfterLastAccessTimeGreaterThan. - EnableAutoTierToHotFromCool *bool `json:"enableAutoTierToHotFromCool,omitempty"` -} - -// ManagementPolicyDefinition an object that defines the Lifecycle rule. Each definition is made up with a -// filters set and an actions set. -type ManagementPolicyDefinition struct { - // Actions - An object that defines the action set. - Actions *ManagementPolicyAction `json:"actions,omitempty"` - // Filters - An object that defines the filter set. - Filters *ManagementPolicyFilter `json:"filters,omitempty"` -} - -// ManagementPolicyFilter filters limit rule actions to a subset of blobs within the storage account. If -// multiple filters are defined, a logical AND is performed on all filters. -type ManagementPolicyFilter struct { - // PrefixMatch - An array of strings for prefixes to be match. - PrefixMatch *[]string `json:"prefixMatch,omitempty"` - // BlobTypes - An array of predefined enum values. Currently blockBlob supports all tiering and delete actions. Only delete actions are supported for appendBlob. - BlobTypes *[]string `json:"blobTypes,omitempty"` - // BlobIndexMatch - An array of blob index tag based filters, there can be at most 10 tag filters - BlobIndexMatch *[]TagFilter `json:"blobIndexMatch,omitempty"` -} - -// ManagementPolicyProperties the Storage Account ManagementPolicy properties. -type ManagementPolicyProperties struct { - // LastModifiedTime - READ-ONLY; Returns the date and time the ManagementPolicies was last modified. - LastModifiedTime *date.Time `json:"lastModifiedTime,omitempty"` - // Policy - The Storage Account ManagementPolicy, in JSON format. See more details in: https://docs.microsoft.com/en-us/azure/storage/common/storage-lifecycle-managment-concepts. - Policy *ManagementPolicySchema `json:"policy,omitempty"` -} - -// MarshalJSON is the custom marshaler for ManagementPolicyProperties. -func (mpp ManagementPolicyProperties) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if mpp.Policy != nil { - objectMap["policy"] = mpp.Policy - } - return json.Marshal(objectMap) -} - -// ManagementPolicyRule an object that wraps the Lifecycle rule. Each rule is uniquely defined by name. -type ManagementPolicyRule struct { - // Enabled - Rule is enabled if set to true. - Enabled *bool `json:"enabled,omitempty"` - // Name - A rule name can contain any combination of alpha numeric characters. Rule name is case-sensitive. It must be unique within a policy. - Name *string `json:"name,omitempty"` - // Type - The valid value is Lifecycle - Type *string `json:"type,omitempty"` - // Definition - An object that defines the Lifecycle rule. - Definition *ManagementPolicyDefinition `json:"definition,omitempty"` -} - -// ManagementPolicySchema the Storage Account ManagementPolicies Rules. See more details in: -// https://docs.microsoft.com/en-us/azure/storage/common/storage-lifecycle-managment-concepts. -type ManagementPolicySchema struct { - // Rules - The Storage Account ManagementPolicies Rules. See more details in: https://docs.microsoft.com/en-us/azure/storage/common/storage-lifecycle-managment-concepts. - Rules *[]ManagementPolicyRule `json:"rules,omitempty"` -} - -// ManagementPolicySnapShot management policy action for snapshot. -type ManagementPolicySnapShot struct { - // TierToCool - The function to tier blob snapshot to cool storage. Support blob snapshot currently at Hot tier - TierToCool *DateAfterCreation `json:"tierToCool,omitempty"` - // TierToArchive - The function to tier blob snapshot to archive storage. Support blob snapshot currently at Hot or Cool tier - TierToArchive *DateAfterCreation `json:"tierToArchive,omitempty"` - // Delete - The function to delete the blob snapshot - Delete *DateAfterCreation `json:"delete,omitempty"` -} - -// ManagementPolicyVersion management policy action for blob version. -type ManagementPolicyVersion struct { - // TierToCool - The function to tier blob version to cool storage. Support blob version currently at Hot tier - TierToCool *DateAfterCreation `json:"tierToCool,omitempty"` - // TierToArchive - The function to tier blob version to archive storage. Support blob version currently at Hot or Cool tier - TierToArchive *DateAfterCreation `json:"tierToArchive,omitempty"` - // Delete - The function to delete the blob version - Delete *DateAfterCreation `json:"delete,omitempty"` -} - -// MetricSpecification metric specification of operation. -type MetricSpecification struct { - // Name - Name of metric specification. - Name *string `json:"name,omitempty"` - // DisplayName - Display name of metric specification. - DisplayName *string `json:"displayName,omitempty"` - // DisplayDescription - Display description of metric specification. - DisplayDescription *string `json:"displayDescription,omitempty"` - // Unit - Unit could be Bytes or Count. - Unit *string `json:"unit,omitempty"` - // Dimensions - Dimensions of blobs, including blob type and access tier. - Dimensions *[]Dimension `json:"dimensions,omitempty"` - // AggregationType - Aggregation type could be Average. - AggregationType *string `json:"aggregationType,omitempty"` - // FillGapWithZero - The property to decide fill gap with zero or not. - FillGapWithZero *bool `json:"fillGapWithZero,omitempty"` - // Category - The category this metric specification belong to, could be Capacity. - Category *string `json:"category,omitempty"` - // ResourceIDDimensionNameOverride - Account Resource Id. - ResourceIDDimensionNameOverride *string `json:"resourceIdDimensionNameOverride,omitempty"` -} - -// Multichannel multichannel setting. Applies to Premium FileStorage only. -type Multichannel struct { - // Enabled - Indicates whether multichannel is enabled - Enabled *bool `json:"enabled,omitempty"` -} - -// NetworkRuleSet network rule set -type NetworkRuleSet struct { - // Bypass - Specifies whether traffic is bypassed for Logging/Metrics/AzureServices. Possible values are any combination of Logging|Metrics|AzureServices (For example, "Logging, Metrics"), or None to bypass none of those traffics. Possible values include: 'BypassNone', 'BypassLogging', 'BypassMetrics', 'BypassAzureServices' - Bypass Bypass `json:"bypass,omitempty"` - // ResourceAccessRules - Sets the resource access rules - ResourceAccessRules *[]ResourceAccessRule `json:"resourceAccessRules,omitempty"` - // VirtualNetworkRules - Sets the virtual network rules - VirtualNetworkRules *[]VirtualNetworkRule `json:"virtualNetworkRules,omitempty"` - // IPRules - Sets the IP ACL rules - IPRules *[]IPRule `json:"ipRules,omitempty"` - // DefaultAction - Specifies the default action of allow or deny when no other rules match. Possible values include: 'DefaultActionAllow', 'DefaultActionDeny' - DefaultAction DefaultAction `json:"defaultAction,omitempty"` -} - -// ObjectReplicationPolicies list storage account object replication policies. -type ObjectReplicationPolicies struct { - autorest.Response `json:"-"` - // Value - The replication policy between two storage accounts. - Value *[]ObjectReplicationPolicy `json:"value,omitempty"` -} - -// ObjectReplicationPolicy the replication policy between two storage accounts. Multiple rules can be -// defined in one policy. -type ObjectReplicationPolicy struct { - autorest.Response `json:"-"` - // ObjectReplicationPolicyProperties - Returns the Storage Account Object Replication Policy. - *ObjectReplicationPolicyProperties `json:"properties,omitempty"` - // ID - READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; The name of the resource - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" - Type *string `json:"type,omitempty"` -} - -// MarshalJSON is the custom marshaler for ObjectReplicationPolicy. -func (orp ObjectReplicationPolicy) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if orp.ObjectReplicationPolicyProperties != nil { - objectMap["properties"] = orp.ObjectReplicationPolicyProperties - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for ObjectReplicationPolicy struct. -func (orp *ObjectReplicationPolicy) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var objectReplicationPolicyProperties ObjectReplicationPolicyProperties - err = json.Unmarshal(*v, &objectReplicationPolicyProperties) - if err != nil { - return err - } - orp.ObjectReplicationPolicyProperties = &objectReplicationPolicyProperties - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - orp.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - orp.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - orp.Type = &typeVar - } - } - } - - return nil -} - -// ObjectReplicationPolicyFilter filters limit replication to a subset of blobs within the storage account. -// A logical OR is performed on values in the filter. If multiple filters are defined, a logical AND is -// performed on all filters. -type ObjectReplicationPolicyFilter struct { - // PrefixMatch - Optional. Filters the results to replicate only blobs whose names begin with the specified prefix. - PrefixMatch *[]string `json:"prefixMatch,omitempty"` - // MinCreationTime - Blobs created after the time will be replicated to the destination. It must be in datetime format 'yyyy-MM-ddTHH:mm:ssZ'. Example: 2020-02-19T16:05:00Z - MinCreationTime *string `json:"minCreationTime,omitempty"` -} - -// ObjectReplicationPolicyProperties the Storage Account ObjectReplicationPolicy properties. -type ObjectReplicationPolicyProperties struct { - // PolicyID - READ-ONLY; A unique id for object replication policy. - PolicyID *string `json:"policyId,omitempty"` - // EnabledTime - READ-ONLY; Indicates when the policy is enabled on the source account. - EnabledTime *date.Time `json:"enabledTime,omitempty"` - // SourceAccount - Required. Source account name. - SourceAccount *string `json:"sourceAccount,omitempty"` - // DestinationAccount - Required. Destination account name. - DestinationAccount *string `json:"destinationAccount,omitempty"` - // Rules - The storage account object replication rules. - Rules *[]ObjectReplicationPolicyRule `json:"rules,omitempty"` -} - -// MarshalJSON is the custom marshaler for ObjectReplicationPolicyProperties. -func (orpp ObjectReplicationPolicyProperties) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if orpp.SourceAccount != nil { - objectMap["sourceAccount"] = orpp.SourceAccount - } - if orpp.DestinationAccount != nil { - objectMap["destinationAccount"] = orpp.DestinationAccount - } - if orpp.Rules != nil { - objectMap["rules"] = orpp.Rules - } - return json.Marshal(objectMap) -} - -// ObjectReplicationPolicyRule the replication policy rule between two containers. -type ObjectReplicationPolicyRule struct { - // RuleID - Rule Id is auto-generated for each new rule on destination account. It is required for put policy on source account. - RuleID *string `json:"ruleId,omitempty"` - // SourceContainer - Required. Source container name. - SourceContainer *string `json:"sourceContainer,omitempty"` - // DestinationContainer - Required. Destination container name. - DestinationContainer *string `json:"destinationContainer,omitempty"` - // Filters - Optional. An object that defines the filter set. - Filters *ObjectReplicationPolicyFilter `json:"filters,omitempty"` -} - -// Operation storage REST API operation definition. -type Operation struct { - // Name - Operation name: {provider}/{resource}/{operation} - Name *string `json:"name,omitempty"` - // Display - Display metadata associated with the operation. - Display *OperationDisplay `json:"display,omitempty"` - // Origin - The origin of operations. - Origin *string `json:"origin,omitempty"` - // OperationProperties - Properties of operation, include metric specifications. - *OperationProperties `json:"properties,omitempty"` -} - -// MarshalJSON is the custom marshaler for Operation. -func (o Operation) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if o.Name != nil { - objectMap["name"] = o.Name - } - if o.Display != nil { - objectMap["display"] = o.Display - } - if o.Origin != nil { - objectMap["origin"] = o.Origin - } - if o.OperationProperties != nil { - objectMap["properties"] = o.OperationProperties - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for Operation struct. -func (o *Operation) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - o.Name = &name - } - case "display": - if v != nil { - var display OperationDisplay - err = json.Unmarshal(*v, &display) - if err != nil { - return err - } - o.Display = &display - } - case "origin": - if v != nil { - var origin string - err = json.Unmarshal(*v, &origin) - if err != nil { - return err - } - o.Origin = &origin - } - case "properties": - if v != nil { - var operationProperties OperationProperties - err = json.Unmarshal(*v, &operationProperties) - if err != nil { - return err - } - o.OperationProperties = &operationProperties - } - } - } - - return nil -} - -// OperationDisplay display metadata associated with the operation. -type OperationDisplay struct { - // Provider - Service provider: Microsoft Storage. - Provider *string `json:"provider,omitempty"` - // Resource - Resource on which the operation is performed etc. - Resource *string `json:"resource,omitempty"` - // Operation - Type of operation: get, read, delete, etc. - Operation *string `json:"operation,omitempty"` - // Description - Description of the operation. - Description *string `json:"description,omitempty"` -} - -// OperationListResult result of the request to list Storage operations. It contains a list of operations -// and a URL link to get the next set of results. -type OperationListResult struct { - autorest.Response `json:"-"` - // Value - List of Storage operations supported by the Storage resource provider. - Value *[]Operation `json:"value,omitempty"` -} - -// OperationProperties properties of operation, include metric specifications. -type OperationProperties struct { - // ServiceSpecification - One property of operation, include metric specifications. - ServiceSpecification *ServiceSpecification `json:"serviceSpecification,omitempty"` -} - -// PrivateEndpoint the Private Endpoint resource. -type PrivateEndpoint struct { - // ID - READ-ONLY; The ARM identifier for Private Endpoint - ID *string `json:"id,omitempty"` -} - -// MarshalJSON is the custom marshaler for PrivateEndpoint. -func (peVar PrivateEndpoint) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// PrivateEndpointConnection the Private Endpoint Connection resource. -type PrivateEndpointConnection struct { - autorest.Response `json:"-"` - // PrivateEndpointConnectionProperties - Resource properties. - *PrivateEndpointConnectionProperties `json:"properties,omitempty"` - // ID - READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; The name of the resource - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" - Type *string `json:"type,omitempty"` -} - -// MarshalJSON is the custom marshaler for PrivateEndpointConnection. -func (pec PrivateEndpointConnection) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if pec.PrivateEndpointConnectionProperties != nil { - objectMap["properties"] = pec.PrivateEndpointConnectionProperties - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for PrivateEndpointConnection struct. -func (pec *PrivateEndpointConnection) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var privateEndpointConnectionProperties PrivateEndpointConnectionProperties - err = json.Unmarshal(*v, &privateEndpointConnectionProperties) - if err != nil { - return err - } - pec.PrivateEndpointConnectionProperties = &privateEndpointConnectionProperties - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - pec.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - pec.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - pec.Type = &typeVar - } - } - } - - return nil -} - -// PrivateEndpointConnectionListResult list of private endpoint connection associated with the specified -// storage account -type PrivateEndpointConnectionListResult struct { - autorest.Response `json:"-"` - // Value - Array of private endpoint connections - Value *[]PrivateEndpointConnection `json:"value,omitempty"` -} - -// PrivateEndpointConnectionProperties properties of the PrivateEndpointConnectProperties. -type PrivateEndpointConnectionProperties struct { - // PrivateEndpoint - The resource of private end point. - PrivateEndpoint *PrivateEndpoint `json:"privateEndpoint,omitempty"` - // PrivateLinkServiceConnectionState - A collection of information about the state of the connection between service consumer and provider. - PrivateLinkServiceConnectionState *PrivateLinkServiceConnectionState `json:"privateLinkServiceConnectionState,omitempty"` - // ProvisioningState - The provisioning state of the private endpoint connection resource. Possible values include: 'PrivateEndpointConnectionProvisioningStateSucceeded', 'PrivateEndpointConnectionProvisioningStateCreating', 'PrivateEndpointConnectionProvisioningStateDeleting', 'PrivateEndpointConnectionProvisioningStateFailed' - ProvisioningState PrivateEndpointConnectionProvisioningState `json:"provisioningState,omitempty"` -} - -// PrivateLinkResource a private link resource -type PrivateLinkResource struct { - // PrivateLinkResourceProperties - Resource properties. - *PrivateLinkResourceProperties `json:"properties,omitempty"` - // ID - READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; The name of the resource - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" - Type *string `json:"type,omitempty"` -} - -// MarshalJSON is the custom marshaler for PrivateLinkResource. -func (plr PrivateLinkResource) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if plr.PrivateLinkResourceProperties != nil { - objectMap["properties"] = plr.PrivateLinkResourceProperties - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for PrivateLinkResource struct. -func (plr *PrivateLinkResource) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var privateLinkResourceProperties PrivateLinkResourceProperties - err = json.Unmarshal(*v, &privateLinkResourceProperties) - if err != nil { - return err - } - plr.PrivateLinkResourceProperties = &privateLinkResourceProperties - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - plr.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - plr.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - plr.Type = &typeVar - } - } - } - - return nil -} - -// PrivateLinkResourceListResult a list of private link resources -type PrivateLinkResourceListResult struct { - autorest.Response `json:"-"` - // Value - Array of private link resources - Value *[]PrivateLinkResource `json:"value,omitempty"` -} - -// PrivateLinkResourceProperties properties of a private link resource. -type PrivateLinkResourceProperties struct { - // GroupID - READ-ONLY; The private link resource group id. - GroupID *string `json:"groupId,omitempty"` - // RequiredMembers - READ-ONLY; The private link resource required member names. - RequiredMembers *[]string `json:"requiredMembers,omitempty"` - // RequiredZoneNames - The private link resource Private link DNS zone name. - RequiredZoneNames *[]string `json:"requiredZoneNames,omitempty"` -} - -// MarshalJSON is the custom marshaler for PrivateLinkResourceProperties. -func (plrp PrivateLinkResourceProperties) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if plrp.RequiredZoneNames != nil { - objectMap["requiredZoneNames"] = plrp.RequiredZoneNames - } - return json.Marshal(objectMap) -} - -// PrivateLinkServiceConnectionState a collection of information about the state of the connection between -// service consumer and provider. -type PrivateLinkServiceConnectionState struct { - // Status - Indicates whether the connection has been Approved/Rejected/Removed by the owner of the service. Possible values include: 'PrivateEndpointServiceConnectionStatusPending', 'PrivateEndpointServiceConnectionStatusApproved', 'PrivateEndpointServiceConnectionStatusRejected' - Status PrivateEndpointServiceConnectionStatus `json:"status,omitempty"` - // Description - The reason for approval/rejection of the connection. - Description *string `json:"description,omitempty"` - // ActionRequired - A message indicating if changes on the service provider require any updates on the consumer. - ActionRequired *string `json:"actionRequired,omitempty"` -} - -// ProtocolSettings protocol settings for file service -type ProtocolSettings struct { - // Smb - Setting for SMB protocol - Smb *SmbSetting `json:"smb,omitempty"` -} - -// ProxyResource the resource model definition for a Azure Resource Manager proxy resource. It will not -// have tags and a location -type ProxyResource struct { - // ID - READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; The name of the resource - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" - Type *string `json:"type,omitempty"` -} - -// MarshalJSON is the custom marshaler for ProxyResource. -func (pr ProxyResource) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// Queue ... -type Queue struct { - autorest.Response `json:"-"` - // QueueProperties - Queue resource properties. - *QueueProperties `json:"properties,omitempty"` - // ID - READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; The name of the resource - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" - Type *string `json:"type,omitempty"` -} - -// MarshalJSON is the custom marshaler for Queue. -func (q Queue) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if q.QueueProperties != nil { - objectMap["properties"] = q.QueueProperties - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for Queue struct. -func (q *Queue) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var queueProperties QueueProperties - err = json.Unmarshal(*v, &queueProperties) - if err != nil { - return err - } - q.QueueProperties = &queueProperties - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - q.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - q.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - q.Type = &typeVar - } - } - } - - return nil -} - -// QueueProperties ... -type QueueProperties struct { - // Metadata - A name-value pair that represents queue metadata. - Metadata map[string]*string `json:"metadata"` - // ApproximateMessageCount - READ-ONLY; Integer indicating an approximate number of messages in the queue. This number is not lower than the actual number of messages in the queue, but could be higher. - ApproximateMessageCount *int32 `json:"approximateMessageCount,omitempty"` -} - -// MarshalJSON is the custom marshaler for QueueProperties. -func (qp QueueProperties) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if qp.Metadata != nil { - objectMap["metadata"] = qp.Metadata - } - return json.Marshal(objectMap) -} - -// QueueServiceProperties the properties of a storage account’s Queue service. -type QueueServiceProperties struct { - autorest.Response `json:"-"` - // QueueServicePropertiesProperties - The properties of a storage account’s Queue service. - *QueueServicePropertiesProperties `json:"properties,omitempty"` - // ID - READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; The name of the resource - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" - Type *string `json:"type,omitempty"` -} - -// MarshalJSON is the custom marshaler for QueueServiceProperties. -func (qsp QueueServiceProperties) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if qsp.QueueServicePropertiesProperties != nil { - objectMap["properties"] = qsp.QueueServicePropertiesProperties - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for QueueServiceProperties struct. -func (qsp *QueueServiceProperties) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var queueServiceProperties QueueServicePropertiesProperties - err = json.Unmarshal(*v, &queueServiceProperties) - if err != nil { - return err - } - qsp.QueueServicePropertiesProperties = &queueServiceProperties - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - qsp.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - qsp.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - qsp.Type = &typeVar - } - } - } - - return nil -} - -// QueueServicePropertiesProperties the properties of a storage account’s Queue service. -type QueueServicePropertiesProperties struct { - // Cors - Specifies CORS rules for the Queue service. You can include up to five CorsRule elements in the request. If no CorsRule elements are included in the request body, all CORS rules will be deleted, and CORS will be disabled for the Queue service. - Cors *CorsRules `json:"cors,omitempty"` -} - -// Resource common fields that are returned in the response for all Azure Resource Manager resources -type Resource struct { - // ID - READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; The name of the resource - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" - Type *string `json:"type,omitempty"` -} - -// MarshalJSON is the custom marshaler for Resource. -func (r Resource) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// ResourceAccessRule resource Access Rule. -type ResourceAccessRule struct { - // TenantID - Tenant Id - TenantID *string `json:"tenantId,omitempty"` - // ResourceID - Resource Id - ResourceID *string `json:"resourceId,omitempty"` -} - -// RestorePolicyProperties the blob service properties for blob restore policy -type RestorePolicyProperties struct { - // Enabled - Blob restore is enabled if set to true. - Enabled *bool `json:"enabled,omitempty"` - // Days - how long this blob can be restored. It should be great than zero and less than DeleteRetentionPolicy.days. - Days *int32 `json:"days,omitempty"` - // LastEnabledTime - READ-ONLY; Deprecated in favor of minRestoreTime property. - LastEnabledTime *date.Time `json:"lastEnabledTime,omitempty"` - // MinRestoreTime - READ-ONLY; Returns the minimum date and time that the restore can be started. - MinRestoreTime *date.Time `json:"minRestoreTime,omitempty"` -} - -// MarshalJSON is the custom marshaler for RestorePolicyProperties. -func (rpp RestorePolicyProperties) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if rpp.Enabled != nil { - objectMap["enabled"] = rpp.Enabled - } - if rpp.Days != nil { - objectMap["days"] = rpp.Days - } - return json.Marshal(objectMap) -} - -// Restriction the restriction because of which SKU cannot be used. -type Restriction struct { - // Type - READ-ONLY; The type of restrictions. As of now only possible value for this is location. - Type *string `json:"type,omitempty"` - // Values - READ-ONLY; The value of restrictions. If the restriction type is set to location. This would be different locations where the SKU is restricted. - Values *[]string `json:"values,omitempty"` - // ReasonCode - The reason for the restriction. As of now this can be "QuotaId" or "NotAvailableForSubscription". Quota Id is set when the SKU has requiredQuotas parameter as the subscription does not belong to that quota. The "NotAvailableForSubscription" is related to capacity at DC. Possible values include: 'ReasonCodeQuotaID', 'ReasonCodeNotAvailableForSubscription' - ReasonCode ReasonCode `json:"reasonCode,omitempty"` -} - -// MarshalJSON is the custom marshaler for Restriction. -func (r Restriction) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if r.ReasonCode != "" { - objectMap["reasonCode"] = r.ReasonCode - } - return json.Marshal(objectMap) -} - -// RoutingPreference routing preference defines the type of network, either microsoft or internet routing -// to be used to deliver the user data, the default option is microsoft routing -type RoutingPreference struct { - // RoutingChoice - Routing Choice defines the kind of network routing opted by the user. Possible values include: 'RoutingChoiceMicrosoftRouting', 'RoutingChoiceInternetRouting' - RoutingChoice RoutingChoice `json:"routingChoice,omitempty"` - // PublishMicrosoftEndpoints - A boolean flag which indicates whether microsoft routing storage endpoints are to be published - PublishMicrosoftEndpoints *bool `json:"publishMicrosoftEndpoints,omitempty"` - // PublishInternetEndpoints - A boolean flag which indicates whether internet routing storage endpoints are to be published - PublishInternetEndpoints *bool `json:"publishInternetEndpoints,omitempty"` -} - -// SasPolicy sasPolicy assigned to the storage account. -type SasPolicy struct { - // SasExpirationPeriod - The SAS expiration period, DD.HH:MM:SS. - SasExpirationPeriod *string `json:"sasExpirationPeriod,omitempty"` - // ExpirationAction - The SAS expiration action. Can only be Log. - ExpirationAction *string `json:"expirationAction,omitempty"` -} - -// ServiceSasParameters the parameters to list service SAS credentials of a specific resource. -type ServiceSasParameters struct { - // CanonicalizedResource - The canonical path to the signed resource. - CanonicalizedResource *string `json:"canonicalizedResource,omitempty"` - // Resource - The signed services accessible with the service SAS. Possible values include: Blob (b), Container (c), File (f), Share (s). Possible values include: 'SignedResourceB', 'SignedResourceC', 'SignedResourceF', 'SignedResourceS' - Resource SignedResource `json:"signedResource,omitempty"` - // Permissions - The signed permissions for the service SAS. Possible values include: Read (r), Write (w), Delete (d), List (l), Add (a), Create (c), Update (u) and Process (p). Possible values include: 'PermissionsR', 'PermissionsD', 'PermissionsW', 'PermissionsL', 'PermissionsA', 'PermissionsC', 'PermissionsU', 'PermissionsP' - Permissions Permissions `json:"signedPermission,omitempty"` - // IPAddressOrRange - An IP address or a range of IP addresses from which to accept requests. - IPAddressOrRange *string `json:"signedIp,omitempty"` - // Protocols - The protocol permitted for a request made with the account SAS. Possible values include: 'HTTPProtocolHttpshttp', 'HTTPProtocolHTTPS' - Protocols HTTPProtocol `json:"signedProtocol,omitempty"` - // SharedAccessStartTime - The time at which the SAS becomes valid. - SharedAccessStartTime *date.Time `json:"signedStart,omitempty"` - // SharedAccessExpiryTime - The time at which the shared access signature becomes invalid. - SharedAccessExpiryTime *date.Time `json:"signedExpiry,omitempty"` - // Identifier - A unique value up to 64 characters in length that correlates to an access policy specified for the container, queue, or table. - Identifier *string `json:"signedIdentifier,omitempty"` - // PartitionKeyStart - The start of partition key. - PartitionKeyStart *string `json:"startPk,omitempty"` - // PartitionKeyEnd - The end of partition key. - PartitionKeyEnd *string `json:"endPk,omitempty"` - // RowKeyStart - The start of row key. - RowKeyStart *string `json:"startRk,omitempty"` - // RowKeyEnd - The end of row key. - RowKeyEnd *string `json:"endRk,omitempty"` - // KeyToSign - The key to sign the account SAS token with. - KeyToSign *string `json:"keyToSign,omitempty"` - // CacheControl - The response header override for cache control. - CacheControl *string `json:"rscc,omitempty"` - // ContentDisposition - The response header override for content disposition. - ContentDisposition *string `json:"rscd,omitempty"` - // ContentEncoding - The response header override for content encoding. - ContentEncoding *string `json:"rsce,omitempty"` - // ContentLanguage - The response header override for content language. - ContentLanguage *string `json:"rscl,omitempty"` - // ContentType - The response header override for content type. - ContentType *string `json:"rsct,omitempty"` -} - -// ServiceSpecification one property of operation, include metric specifications. -type ServiceSpecification struct { - // MetricSpecifications - Metric specifications of operation. - MetricSpecifications *[]MetricSpecification `json:"metricSpecifications,omitempty"` -} - -// Sku the SKU of the storage account. -type Sku struct { - // Name - Possible values include: 'SkuNameStandardLRS', 'SkuNameStandardGRS', 'SkuNameStandardRAGRS', 'SkuNameStandardZRS', 'SkuNamePremiumLRS', 'SkuNamePremiumZRS', 'SkuNameStandardGZRS', 'SkuNameStandardRAGZRS' - Name SkuName `json:"name,omitempty"` - // Tier - Possible values include: 'SkuTierStandard', 'SkuTierPremium' - Tier SkuTier `json:"tier,omitempty"` -} - -// SKUCapability the capability information in the specified SKU, including file encryption, network ACLs, -// change notification, etc. -type SKUCapability struct { - // Name - READ-ONLY; The name of capability, The capability information in the specified SKU, including file encryption, network ACLs, change notification, etc. - Name *string `json:"name,omitempty"` - // Value - READ-ONLY; A string value to indicate states of given capability. Possibly 'true' or 'false'. - Value *string `json:"value,omitempty"` -} - -// MarshalJSON is the custom marshaler for SKUCapability. -func (sc SKUCapability) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// SkuInformation storage SKU and its properties -type SkuInformation struct { - // Name - Possible values include: 'SkuNameStandardLRS', 'SkuNameStandardGRS', 'SkuNameStandardRAGRS', 'SkuNameStandardZRS', 'SkuNamePremiumLRS', 'SkuNamePremiumZRS', 'SkuNameStandardGZRS', 'SkuNameStandardRAGZRS' - Name SkuName `json:"name,omitempty"` - // Tier - Possible values include: 'SkuTierStandard', 'SkuTierPremium' - Tier SkuTier `json:"tier,omitempty"` - // ResourceType - READ-ONLY; The type of the resource, usually it is 'storageAccounts'. - ResourceType *string `json:"resourceType,omitempty"` - // Kind - READ-ONLY; Indicates the type of storage account. Possible values include: 'KindStorage', 'KindStorageV2', 'KindBlobStorage', 'KindFileStorage', 'KindBlockBlobStorage' - Kind Kind `json:"kind,omitempty"` - // Locations - READ-ONLY; The set of locations that the SKU is available. This will be supported and registered Azure Geo Regions (e.g. West US, East US, Southeast Asia, etc.). - Locations *[]string `json:"locations,omitempty"` - // Capabilities - READ-ONLY; The capability information in the specified SKU, including file encryption, network ACLs, change notification, etc. - Capabilities *[]SKUCapability `json:"capabilities,omitempty"` - // Restrictions - The restrictions because of which SKU cannot be used. This is empty if there are no restrictions. - Restrictions *[]Restriction `json:"restrictions,omitempty"` -} - -// MarshalJSON is the custom marshaler for SkuInformation. -func (si SkuInformation) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if si.Name != "" { - objectMap["name"] = si.Name - } - if si.Tier != "" { - objectMap["tier"] = si.Tier - } - if si.Restrictions != nil { - objectMap["restrictions"] = si.Restrictions - } - return json.Marshal(objectMap) -} - -// SkuListResult the response from the List Storage SKUs operation. -type SkuListResult struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; Get the list result of storage SKUs and their properties. - Value *[]SkuInformation `json:"value,omitempty"` -} - -// MarshalJSON is the custom marshaler for SkuListResult. -func (slr SkuListResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// SmbSetting setting for SMB protocol -type SmbSetting struct { - // Multichannel - Multichannel setting. Applies to Premium FileStorage only. - Multichannel *Multichannel `json:"multichannel,omitempty"` - // Versions - SMB protocol versions supported by server. Valid values are SMB2.1, SMB3.0, SMB3.1.1. Should be passed as a string with delimiter ';'. - Versions *string `json:"versions,omitempty"` - // AuthenticationMethods - SMB authentication methods supported by server. Valid values are NTLMv2, Kerberos. Should be passed as a string with delimiter ';'. - AuthenticationMethods *string `json:"authenticationMethods,omitempty"` - // KerberosTicketEncryption - Kerberos ticket encryption supported by server. Valid values are RC4-HMAC, AES-256. Should be passed as a string with delimiter ';' - KerberosTicketEncryption *string `json:"kerberosTicketEncryption,omitempty"` - // ChannelEncryption - SMB channel encryption supported by server. Valid values are AES-128-CCM, AES-128-GCM, AES-256-GCM. Should be passed as a string with delimiter ';'. - ChannelEncryption *string `json:"channelEncryption,omitempty"` -} - -// SystemData metadata pertaining to creation and last modification of the resource. -type SystemData struct { - // CreatedBy - The identity that created the resource. - CreatedBy *string `json:"createdBy,omitempty"` - // CreatedByType - The type of identity that created the resource. Possible values include: 'CreatedByTypeUser', 'CreatedByTypeApplication', 'CreatedByTypeManagedIdentity', 'CreatedByTypeKey' - CreatedByType CreatedByType `json:"createdByType,omitempty"` - // CreatedAt - The timestamp of resource creation (UTC). - CreatedAt *date.Time `json:"createdAt,omitempty"` - // LastModifiedBy - The identity that last modified the resource. - LastModifiedBy *string `json:"lastModifiedBy,omitempty"` - // LastModifiedByType - The type of identity that last modified the resource. Possible values include: 'CreatedByTypeUser', 'CreatedByTypeApplication', 'CreatedByTypeManagedIdentity', 'CreatedByTypeKey' - LastModifiedByType CreatedByType `json:"lastModifiedByType,omitempty"` - // LastModifiedAt - The timestamp of resource last modification (UTC) - LastModifiedAt *date.Time `json:"lastModifiedAt,omitempty"` -} - -// Table properties of the table, including Id, resource name, resource type. -type Table struct { - autorest.Response `json:"-"` - // TableProperties - Table resource properties. - *TableProperties `json:"properties,omitempty"` - // ID - READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; The name of the resource - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" - Type *string `json:"type,omitempty"` -} - -// MarshalJSON is the custom marshaler for Table. -func (t Table) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if t.TableProperties != nil { - objectMap["properties"] = t.TableProperties - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for Table struct. -func (t *Table) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var tableProperties TableProperties - err = json.Unmarshal(*v, &tableProperties) - if err != nil { - return err - } - t.TableProperties = &tableProperties - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - t.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - t.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - t.Type = &typeVar - } - } - } - - return nil -} - -// TableProperties ... -type TableProperties struct { - // TableName - READ-ONLY; Table name under the specified account - TableName *string `json:"tableName,omitempty"` -} - -// MarshalJSON is the custom marshaler for TableProperties. -func (tp TableProperties) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// TableServiceProperties the properties of a storage account’s Table service. -type TableServiceProperties struct { - autorest.Response `json:"-"` - // TableServicePropertiesProperties - The properties of a storage account’s Table service. - *TableServicePropertiesProperties `json:"properties,omitempty"` - // ID - READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; The name of the resource - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" - Type *string `json:"type,omitempty"` -} - -// MarshalJSON is the custom marshaler for TableServiceProperties. -func (tsp TableServiceProperties) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if tsp.TableServicePropertiesProperties != nil { - objectMap["properties"] = tsp.TableServicePropertiesProperties - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for TableServiceProperties struct. -func (tsp *TableServiceProperties) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var tableServiceProperties TableServicePropertiesProperties - err = json.Unmarshal(*v, &tableServiceProperties) - if err != nil { - return err - } - tsp.TableServicePropertiesProperties = &tableServiceProperties - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - tsp.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - tsp.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - tsp.Type = &typeVar - } - } - } - - return nil -} - -// TableServicePropertiesProperties the properties of a storage account’s Table service. -type TableServicePropertiesProperties struct { - // Cors - Specifies CORS rules for the Table service. You can include up to five CorsRule elements in the request. If no CorsRule elements are included in the request body, all CORS rules will be deleted, and CORS will be disabled for the Table service. - Cors *CorsRules `json:"cors,omitempty"` -} - -// TagFilter blob index tag based filtering for blob objects -type TagFilter struct { - // Name - This is the filter tag name, it can have 1 - 128 characters - Name *string `json:"name,omitempty"` - // Op - This is the comparison operator which is used for object comparison and filtering. Only == (equality operator) is currently supported - Op *string `json:"op,omitempty"` - // Value - This is the filter tag value field used for tag based filtering, it can have 0 - 256 characters - Value *string `json:"value,omitempty"` -} - -// TagProperty a tag of the LegalHold of a blob container. -type TagProperty struct { - // Tag - READ-ONLY; The tag value. - Tag *string `json:"tag,omitempty"` - // Timestamp - READ-ONLY; Returns the date and time the tag was added. - Timestamp *date.Time `json:"timestamp,omitempty"` - // ObjectIdentifier - READ-ONLY; Returns the Object ID of the user who added the tag. - ObjectIdentifier *string `json:"objectIdentifier,omitempty"` - // TenantID - READ-ONLY; Returns the Tenant ID that issued the token for the user who added the tag. - TenantID *string `json:"tenantId,omitempty"` - // Upn - READ-ONLY; Returns the User Principal Name of the user who added the tag. - Upn *string `json:"upn,omitempty"` -} - -// MarshalJSON is the custom marshaler for TagProperty. -func (tp TagProperty) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// TrackedResource the resource model definition for an Azure Resource Manager tracked top level resource -// which has 'tags' and a 'location' -type TrackedResource struct { - // Tags - Resource tags. - Tags map[string]*string `json:"tags"` - // Location - The geo-location where the resource lives - Location *string `json:"location,omitempty"` - // ID - READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; The name of the resource - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" - Type *string `json:"type,omitempty"` -} - -// MarshalJSON is the custom marshaler for TrackedResource. -func (tr TrackedResource) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if tr.Tags != nil { - objectMap["tags"] = tr.Tags - } - if tr.Location != nil { - objectMap["location"] = tr.Location - } - return json.Marshal(objectMap) -} - -// UpdateHistoryProperty an update history of the ImmutabilityPolicy of a blob container. -type UpdateHistoryProperty struct { - // Update - READ-ONLY; The ImmutabilityPolicy update type of a blob container, possible values include: put, lock and extend. Possible values include: 'ImmutabilityPolicyUpdateTypePut', 'ImmutabilityPolicyUpdateTypeLock', 'ImmutabilityPolicyUpdateTypeExtend' - Update ImmutabilityPolicyUpdateType `json:"update,omitempty"` - // ImmutabilityPeriodSinceCreationInDays - READ-ONLY; The immutability period for the blobs in the container since the policy creation, in days. - ImmutabilityPeriodSinceCreationInDays *int32 `json:"immutabilityPeriodSinceCreationInDays,omitempty"` - // Timestamp - READ-ONLY; Returns the date and time the ImmutabilityPolicy was updated. - Timestamp *date.Time `json:"timestamp,omitempty"` - // ObjectIdentifier - READ-ONLY; Returns the Object ID of the user who updated the ImmutabilityPolicy. - ObjectIdentifier *string `json:"objectIdentifier,omitempty"` - // TenantID - READ-ONLY; Returns the Tenant ID that issued the token for the user who updated the ImmutabilityPolicy. - TenantID *string `json:"tenantId,omitempty"` - // Upn - READ-ONLY; Returns the User Principal Name of the user who updated the ImmutabilityPolicy. - Upn *string `json:"upn,omitempty"` -} - -// MarshalJSON is the custom marshaler for UpdateHistoryProperty. -func (uhp UpdateHistoryProperty) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// Usage describes Storage Resource Usage. -type Usage struct { - // Unit - READ-ONLY; Gets the unit of measurement. Possible values include: 'UsageUnitCount', 'UsageUnitBytes', 'UsageUnitSeconds', 'UsageUnitPercent', 'UsageUnitCountsPerSecond', 'UsageUnitBytesPerSecond' - Unit UsageUnit `json:"unit,omitempty"` - // CurrentValue - READ-ONLY; Gets the current count of the allocated resources in the subscription. - CurrentValue *int32 `json:"currentValue,omitempty"` - // Limit - READ-ONLY; Gets the maximum count of the resources that can be allocated in the subscription. - Limit *int32 `json:"limit,omitempty"` - // Name - READ-ONLY; Gets the name of the type of usage. - Name *UsageName `json:"name,omitempty"` -} - -// MarshalJSON is the custom marshaler for Usage. -func (u Usage) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// UsageListResult the response from the List Usages operation. -type UsageListResult struct { - autorest.Response `json:"-"` - // Value - Gets or sets the list of Storage Resource Usages. - Value *[]Usage `json:"value,omitempty"` -} - -// UsageName the usage names that can be used; currently limited to StorageAccount. -type UsageName struct { - // Value - READ-ONLY; Gets a string describing the resource name. - Value *string `json:"value,omitempty"` - // LocalizedValue - READ-ONLY; Gets a localized string describing the resource name. - LocalizedValue *string `json:"localizedValue,omitempty"` -} - -// MarshalJSON is the custom marshaler for UsageName. -func (un UsageName) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// UserAssignedIdentity userAssignedIdentity for the resource. -type UserAssignedIdentity struct { - // PrincipalID - READ-ONLY; The principal ID of the identity. - PrincipalID *string `json:"principalId,omitempty"` - // ClientID - READ-ONLY; The client ID of the identity. - ClientID *string `json:"clientId,omitempty"` -} - -// MarshalJSON is the custom marshaler for UserAssignedIdentity. -func (uai UserAssignedIdentity) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// VirtualNetworkRule virtual Network rule. -type VirtualNetworkRule struct { - // VirtualNetworkResourceID - Resource ID of a subnet, for example: /subscriptions/{subscriptionId}/resourceGroups/{groupName}/providers/Microsoft.Network/virtualNetworks/{vnetName}/subnets/{subnetName}. - VirtualNetworkResourceID *string `json:"id,omitempty"` - // Action - The action of virtual network rule. Possible values include: 'ActionAllow' - Action Action `json:"action,omitempty"` - // State - Gets the state of virtual network rule. Possible values include: 'StateProvisioning', 'StateDeprovisioning', 'StateSucceeded', 'StateFailed', 'StateNetworkSourceDeleted' - State State `json:"state,omitempty"` -} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/objectreplicationpolicies.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/objectreplicationpolicies.go deleted file mode 100644 index a4a9b9d476f8..000000000000 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/objectreplicationpolicies.go +++ /dev/null @@ -1,417 +0,0 @@ -package storage - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/validation" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// ObjectReplicationPoliciesClient is the the Azure Storage Management API. -type ObjectReplicationPoliciesClient struct { - BaseClient -} - -// NewObjectReplicationPoliciesClient creates an instance of the ObjectReplicationPoliciesClient client. -func NewObjectReplicationPoliciesClient(subscriptionID string) ObjectReplicationPoliciesClient { - return NewObjectReplicationPoliciesClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewObjectReplicationPoliciesClientWithBaseURI creates an instance of the ObjectReplicationPoliciesClient client -// using a custom endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign -// clouds, Azure stack). -func NewObjectReplicationPoliciesClientWithBaseURI(baseURI string, subscriptionID string) ObjectReplicationPoliciesClient { - return ObjectReplicationPoliciesClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// CreateOrUpdate create or update the object replication policy of the storage account. -// Parameters: -// resourceGroupName - the name of the resource group within the user's subscription. The name is case -// insensitive. -// accountName - the name of the storage account within the specified resource group. Storage account names -// must be between 3 and 24 characters in length and use numbers and lower-case letters only. -// objectReplicationPolicyID - the ID of object replication policy or 'default' if the policy ID is unknown. -// properties - the object replication policy set to a storage account. A unique policy ID will be created if -// absent. -func (client ObjectReplicationPoliciesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, accountName string, objectReplicationPolicyID string, properties ObjectReplicationPolicy) (result ObjectReplicationPolicy, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ObjectReplicationPoliciesClient.CreateOrUpdate") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, - {TargetValue: accountName, - Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: objectReplicationPolicyID, - Constraints: []validation.Constraint{{Target: "objectReplicationPolicyID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: properties, - Constraints: []validation.Constraint{{Target: "properties.ObjectReplicationPolicyProperties", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "properties.ObjectReplicationPolicyProperties.SourceAccount", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "properties.ObjectReplicationPolicyProperties.DestinationAccount", Name: validation.Null, Rule: true, Chain: nil}, - }}}}}); err != nil { - return result, validation.NewError("storage.ObjectReplicationPoliciesClient", "CreateOrUpdate", err.Error()) - } - - req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, accountName, objectReplicationPolicyID, properties) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.ObjectReplicationPoliciesClient", "CreateOrUpdate", nil, "Failure preparing request") - return - } - - resp, err := client.CreateOrUpdateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "storage.ObjectReplicationPoliciesClient", "CreateOrUpdate", resp, "Failure sending request") - return - } - - result, err = client.CreateOrUpdateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.ObjectReplicationPoliciesClient", "CreateOrUpdate", resp, "Failure responding to request") - return - } - - return -} - -// CreateOrUpdatePreparer prepares the CreateOrUpdate request. -func (client ObjectReplicationPoliciesClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, accountName string, objectReplicationPolicyID string, properties ObjectReplicationPolicy) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "objectReplicationPolicyId": autorest.Encode("path", objectReplicationPolicyID), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2021-02-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPut(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/objectReplicationPolicies/{objectReplicationPolicyId}", pathParameters), - autorest.WithJSON(properties), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the -// http.Response Body if it receives an error. -func (client ObjectReplicationPoliciesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always -// closes the http.Response Body. -func (client ObjectReplicationPoliciesClient) CreateOrUpdateResponder(resp *http.Response) (result ObjectReplicationPolicy, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Delete deletes the object replication policy associated with the specified storage account. -// Parameters: -// resourceGroupName - the name of the resource group within the user's subscription. The name is case -// insensitive. -// accountName - the name of the storage account within the specified resource group. Storage account names -// must be between 3 and 24 characters in length and use numbers and lower-case letters only. -// objectReplicationPolicyID - the ID of object replication policy or 'default' if the policy ID is unknown. -func (client ObjectReplicationPoliciesClient) Delete(ctx context.Context, resourceGroupName string, accountName string, objectReplicationPolicyID string) (result autorest.Response, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ObjectReplicationPoliciesClient.Delete") - defer func() { - sc := -1 - if result.Response != nil { - sc = result.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, - {TargetValue: accountName, - Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: objectReplicationPolicyID, - Constraints: []validation.Constraint{{Target: "objectReplicationPolicyID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("storage.ObjectReplicationPoliciesClient", "Delete", err.Error()) - } - - req, err := client.DeletePreparer(ctx, resourceGroupName, accountName, objectReplicationPolicyID) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.ObjectReplicationPoliciesClient", "Delete", nil, "Failure preparing request") - return - } - - resp, err := client.DeleteSender(req) - if err != nil { - result.Response = resp - err = autorest.NewErrorWithError(err, "storage.ObjectReplicationPoliciesClient", "Delete", resp, "Failure sending request") - return - } - - result, err = client.DeleteResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.ObjectReplicationPoliciesClient", "Delete", resp, "Failure responding to request") - return - } - - return -} - -// DeletePreparer prepares the Delete request. -func (client ObjectReplicationPoliciesClient) DeletePreparer(ctx context.Context, resourceGroupName string, accountName string, objectReplicationPolicyID string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "objectReplicationPolicyId": autorest.Encode("path", objectReplicationPolicyID), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2021-02-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/objectReplicationPolicies/{objectReplicationPolicyId}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DeleteSender sends the Delete request. The method will close the -// http.Response Body if it receives an error. -func (client ObjectReplicationPoliciesClient) DeleteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// DeleteResponder handles the response to the Delete request. The method always -// closes the http.Response Body. -func (client ObjectReplicationPoliciesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), - autorest.ByClosing()) - result.Response = resp - return -} - -// Get get the object replication policy of the storage account by policy ID. -// Parameters: -// resourceGroupName - the name of the resource group within the user's subscription. The name is case -// insensitive. -// accountName - the name of the storage account within the specified resource group. Storage account names -// must be between 3 and 24 characters in length and use numbers and lower-case letters only. -// objectReplicationPolicyID - the ID of object replication policy or 'default' if the policy ID is unknown. -func (client ObjectReplicationPoliciesClient) Get(ctx context.Context, resourceGroupName string, accountName string, objectReplicationPolicyID string) (result ObjectReplicationPolicy, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ObjectReplicationPoliciesClient.Get") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, - {TargetValue: accountName, - Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: objectReplicationPolicyID, - Constraints: []validation.Constraint{{Target: "objectReplicationPolicyID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("storage.ObjectReplicationPoliciesClient", "Get", err.Error()) - } - - req, err := client.GetPreparer(ctx, resourceGroupName, accountName, objectReplicationPolicyID) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.ObjectReplicationPoliciesClient", "Get", nil, "Failure preparing request") - return - } - - resp, err := client.GetSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "storage.ObjectReplicationPoliciesClient", "Get", resp, "Failure sending request") - return - } - - result, err = client.GetResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.ObjectReplicationPoliciesClient", "Get", resp, "Failure responding to request") - return - } - - return -} - -// GetPreparer prepares the Get request. -func (client ObjectReplicationPoliciesClient) GetPreparer(ctx context.Context, resourceGroupName string, accountName string, objectReplicationPolicyID string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "objectReplicationPolicyId": autorest.Encode("path", objectReplicationPolicyID), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2021-02-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/objectReplicationPolicies/{objectReplicationPolicyId}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetSender sends the Get request. The method will close the -// http.Response Body if it receives an error. -func (client ObjectReplicationPoliciesClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// GetResponder handles the response to the Get request. The method always -// closes the http.Response Body. -func (client ObjectReplicationPoliciesClient) GetResponder(resp *http.Response) (result ObjectReplicationPolicy, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// List list the object replication policies associated with the storage account. -// Parameters: -// resourceGroupName - the name of the resource group within the user's subscription. The name is case -// insensitive. -// accountName - the name of the storage account within the specified resource group. Storage account names -// must be between 3 and 24 characters in length and use numbers and lower-case letters only. -func (client ObjectReplicationPoliciesClient) List(ctx context.Context, resourceGroupName string, accountName string) (result ObjectReplicationPolicies, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ObjectReplicationPoliciesClient.List") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, - {TargetValue: accountName, - Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("storage.ObjectReplicationPoliciesClient", "List", err.Error()) - } - - req, err := client.ListPreparer(ctx, resourceGroupName, accountName) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.ObjectReplicationPoliciesClient", "List", nil, "Failure preparing request") - return - } - - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "storage.ObjectReplicationPoliciesClient", "List", resp, "Failure sending request") - return - } - - result, err = client.ListResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.ObjectReplicationPoliciesClient", "List", resp, "Failure responding to request") - return - } - - return -} - -// ListPreparer prepares the List request. -func (client ObjectReplicationPoliciesClient) ListPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2021-02-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/objectReplicationPolicies", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListSender sends the List request. The method will close the -// http.Response Body if it receives an error. -func (client ObjectReplicationPoliciesClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListResponder handles the response to the List request. The method always -// closes the http.Response Body. -func (client ObjectReplicationPoliciesClient) ListResponder(resp *http.Response) (result ObjectReplicationPolicies, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/operations.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/operations.go deleted file mode 100644 index d21dc1ca69e7..000000000000 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/operations.go +++ /dev/null @@ -1,98 +0,0 @@ -package storage - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// OperationsClient is the the Azure Storage Management API. -type OperationsClient struct { - BaseClient -} - -// NewOperationsClient creates an instance of the OperationsClient client. -func NewOperationsClient(subscriptionID string) OperationsClient { - return NewOperationsClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewOperationsClientWithBaseURI creates an instance of the OperationsClient client using a custom endpoint. Use this -// when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). -func NewOperationsClientWithBaseURI(baseURI string, subscriptionID string) OperationsClient { - return OperationsClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// List lists all of the available Storage Rest API operations. -func (client OperationsClient) List(ctx context.Context) (result OperationListResult, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/OperationsClient.List") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.ListPreparer(ctx) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.OperationsClient", "List", nil, "Failure preparing request") - return - } - - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "storage.OperationsClient", "List", resp, "Failure sending request") - return - } - - result, err = client.ListResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.OperationsClient", "List", resp, "Failure responding to request") - return - } - - return -} - -// ListPreparer prepares the List request. -func (client OperationsClient) ListPreparer(ctx context.Context) (*http.Request, error) { - const APIVersion = "2021-02-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/providers/Microsoft.Storage/operations"), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListSender sends the List request. The method will close the -// http.Response Body if it receives an error. -func (client OperationsClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// ListResponder handles the response to the List request. The method always -// closes the http.Response Body. -func (client OperationsClient) ListResponder(resp *http.Response) (result OperationListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/privateendpointconnections.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/privateendpointconnections.go deleted file mode 100644 index 7bb24a1cd68e..000000000000 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/privateendpointconnections.go +++ /dev/null @@ -1,411 +0,0 @@ -package storage - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/validation" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// PrivateEndpointConnectionsClient is the the Azure Storage Management API. -type PrivateEndpointConnectionsClient struct { - BaseClient -} - -// NewPrivateEndpointConnectionsClient creates an instance of the PrivateEndpointConnectionsClient client. -func NewPrivateEndpointConnectionsClient(subscriptionID string) PrivateEndpointConnectionsClient { - return NewPrivateEndpointConnectionsClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewPrivateEndpointConnectionsClientWithBaseURI creates an instance of the PrivateEndpointConnectionsClient client -// using a custom endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign -// clouds, Azure stack). -func NewPrivateEndpointConnectionsClientWithBaseURI(baseURI string, subscriptionID string) PrivateEndpointConnectionsClient { - return PrivateEndpointConnectionsClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// Delete deletes the specified private endpoint connection associated with the storage account. -// Parameters: -// resourceGroupName - the name of the resource group within the user's subscription. The name is case -// insensitive. -// accountName - the name of the storage account within the specified resource group. Storage account names -// must be between 3 and 24 characters in length and use numbers and lower-case letters only. -// privateEndpointConnectionName - the name of the private endpoint connection associated with the Azure -// resource -func (client PrivateEndpointConnectionsClient) Delete(ctx context.Context, resourceGroupName string, accountName string, privateEndpointConnectionName string) (result autorest.Response, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/PrivateEndpointConnectionsClient.Delete") - defer func() { - sc := -1 - if result.Response != nil { - sc = result.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, - {TargetValue: accountName, - Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("storage.PrivateEndpointConnectionsClient", "Delete", err.Error()) - } - - req, err := client.DeletePreparer(ctx, resourceGroupName, accountName, privateEndpointConnectionName) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.PrivateEndpointConnectionsClient", "Delete", nil, "Failure preparing request") - return - } - - resp, err := client.DeleteSender(req) - if err != nil { - result.Response = resp - err = autorest.NewErrorWithError(err, "storage.PrivateEndpointConnectionsClient", "Delete", resp, "Failure sending request") - return - } - - result, err = client.DeleteResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.PrivateEndpointConnectionsClient", "Delete", resp, "Failure responding to request") - return - } - - return -} - -// DeletePreparer prepares the Delete request. -func (client PrivateEndpointConnectionsClient) DeletePreparer(ctx context.Context, resourceGroupName string, accountName string, privateEndpointConnectionName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "privateEndpointConnectionName": autorest.Encode("path", privateEndpointConnectionName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2021-02-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/privateEndpointConnections/{privateEndpointConnectionName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DeleteSender sends the Delete request. The method will close the -// http.Response Body if it receives an error. -func (client PrivateEndpointConnectionsClient) DeleteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// DeleteResponder handles the response to the Delete request. The method always -// closes the http.Response Body. -func (client PrivateEndpointConnectionsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), - autorest.ByClosing()) - result.Response = resp - return -} - -// Get gets the specified private endpoint connection associated with the storage account. -// Parameters: -// resourceGroupName - the name of the resource group within the user's subscription. The name is case -// insensitive. -// accountName - the name of the storage account within the specified resource group. Storage account names -// must be between 3 and 24 characters in length and use numbers and lower-case letters only. -// privateEndpointConnectionName - the name of the private endpoint connection associated with the Azure -// resource -func (client PrivateEndpointConnectionsClient) Get(ctx context.Context, resourceGroupName string, accountName string, privateEndpointConnectionName string) (result PrivateEndpointConnection, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/PrivateEndpointConnectionsClient.Get") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, - {TargetValue: accountName, - Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("storage.PrivateEndpointConnectionsClient", "Get", err.Error()) - } - - req, err := client.GetPreparer(ctx, resourceGroupName, accountName, privateEndpointConnectionName) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.PrivateEndpointConnectionsClient", "Get", nil, "Failure preparing request") - return - } - - resp, err := client.GetSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "storage.PrivateEndpointConnectionsClient", "Get", resp, "Failure sending request") - return - } - - result, err = client.GetResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.PrivateEndpointConnectionsClient", "Get", resp, "Failure responding to request") - return - } - - return -} - -// GetPreparer prepares the Get request. -func (client PrivateEndpointConnectionsClient) GetPreparer(ctx context.Context, resourceGroupName string, accountName string, privateEndpointConnectionName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "privateEndpointConnectionName": autorest.Encode("path", privateEndpointConnectionName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2021-02-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/privateEndpointConnections/{privateEndpointConnectionName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetSender sends the Get request. The method will close the -// http.Response Body if it receives an error. -func (client PrivateEndpointConnectionsClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// GetResponder handles the response to the Get request. The method always -// closes the http.Response Body. -func (client PrivateEndpointConnectionsClient) GetResponder(resp *http.Response) (result PrivateEndpointConnection, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// List list all the private endpoint connections associated with the storage account. -// Parameters: -// resourceGroupName - the name of the resource group within the user's subscription. The name is case -// insensitive. -// accountName - the name of the storage account within the specified resource group. Storage account names -// must be between 3 and 24 characters in length and use numbers and lower-case letters only. -func (client PrivateEndpointConnectionsClient) List(ctx context.Context, resourceGroupName string, accountName string) (result PrivateEndpointConnectionListResult, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/PrivateEndpointConnectionsClient.List") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, - {TargetValue: accountName, - Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("storage.PrivateEndpointConnectionsClient", "List", err.Error()) - } - - req, err := client.ListPreparer(ctx, resourceGroupName, accountName) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.PrivateEndpointConnectionsClient", "List", nil, "Failure preparing request") - return - } - - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "storage.PrivateEndpointConnectionsClient", "List", resp, "Failure sending request") - return - } - - result, err = client.ListResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.PrivateEndpointConnectionsClient", "List", resp, "Failure responding to request") - return - } - - return -} - -// ListPreparer prepares the List request. -func (client PrivateEndpointConnectionsClient) ListPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2021-02-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/privateEndpointConnections", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListSender sends the List request. The method will close the -// http.Response Body if it receives an error. -func (client PrivateEndpointConnectionsClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListResponder handles the response to the List request. The method always -// closes the http.Response Body. -func (client PrivateEndpointConnectionsClient) ListResponder(resp *http.Response) (result PrivateEndpointConnectionListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Put update the state of specified private endpoint connection associated with the storage account. -// Parameters: -// resourceGroupName - the name of the resource group within the user's subscription. The name is case -// insensitive. -// accountName - the name of the storage account within the specified resource group. Storage account names -// must be between 3 and 24 characters in length and use numbers and lower-case letters only. -// privateEndpointConnectionName - the name of the private endpoint connection associated with the Azure -// resource -// properties - the private endpoint connection properties. -func (client PrivateEndpointConnectionsClient) Put(ctx context.Context, resourceGroupName string, accountName string, privateEndpointConnectionName string, properties PrivateEndpointConnection) (result PrivateEndpointConnection, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/PrivateEndpointConnectionsClient.Put") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, - {TargetValue: accountName, - Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: properties, - Constraints: []validation.Constraint{{Target: "properties.PrivateEndpointConnectionProperties", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "properties.PrivateEndpointConnectionProperties.PrivateLinkServiceConnectionState", Name: validation.Null, Rule: true, Chain: nil}}}}}}); err != nil { - return result, validation.NewError("storage.PrivateEndpointConnectionsClient", "Put", err.Error()) - } - - req, err := client.PutPreparer(ctx, resourceGroupName, accountName, privateEndpointConnectionName, properties) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.PrivateEndpointConnectionsClient", "Put", nil, "Failure preparing request") - return - } - - resp, err := client.PutSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "storage.PrivateEndpointConnectionsClient", "Put", resp, "Failure sending request") - return - } - - result, err = client.PutResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.PrivateEndpointConnectionsClient", "Put", resp, "Failure responding to request") - return - } - - return -} - -// PutPreparer prepares the Put request. -func (client PrivateEndpointConnectionsClient) PutPreparer(ctx context.Context, resourceGroupName string, accountName string, privateEndpointConnectionName string, properties PrivateEndpointConnection) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "privateEndpointConnectionName": autorest.Encode("path", privateEndpointConnectionName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2021-02-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPut(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/privateEndpointConnections/{privateEndpointConnectionName}", pathParameters), - autorest.WithJSON(properties), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// PutSender sends the Put request. The method will close the -// http.Response Body if it receives an error. -func (client PrivateEndpointConnectionsClient) PutSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// PutResponder handles the response to the Put request. The method always -// closes the http.Response Body. -func (client PrivateEndpointConnectionsClient) PutResponder(resp *http.Response) (result PrivateEndpointConnection, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/privatelinkresources.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/privatelinkresources.go deleted file mode 100644 index 2aab58432d4e..000000000000 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/privatelinkresources.go +++ /dev/null @@ -1,124 +0,0 @@ -package storage - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/validation" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// PrivateLinkResourcesClient is the the Azure Storage Management API. -type PrivateLinkResourcesClient struct { - BaseClient -} - -// NewPrivateLinkResourcesClient creates an instance of the PrivateLinkResourcesClient client. -func NewPrivateLinkResourcesClient(subscriptionID string) PrivateLinkResourcesClient { - return NewPrivateLinkResourcesClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewPrivateLinkResourcesClientWithBaseURI creates an instance of the PrivateLinkResourcesClient client using a custom -// endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure -// stack). -func NewPrivateLinkResourcesClientWithBaseURI(baseURI string, subscriptionID string) PrivateLinkResourcesClient { - return PrivateLinkResourcesClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// ListByStorageAccount gets the private link resources that need to be created for a storage account. -// Parameters: -// resourceGroupName - the name of the resource group within the user's subscription. The name is case -// insensitive. -// accountName - the name of the storage account within the specified resource group. Storage account names -// must be between 3 and 24 characters in length and use numbers and lower-case letters only. -func (client PrivateLinkResourcesClient) ListByStorageAccount(ctx context.Context, resourceGroupName string, accountName string) (result PrivateLinkResourceListResult, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/PrivateLinkResourcesClient.ListByStorageAccount") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, - {TargetValue: accountName, - Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("storage.PrivateLinkResourcesClient", "ListByStorageAccount", err.Error()) - } - - req, err := client.ListByStorageAccountPreparer(ctx, resourceGroupName, accountName) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.PrivateLinkResourcesClient", "ListByStorageAccount", nil, "Failure preparing request") - return - } - - resp, err := client.ListByStorageAccountSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "storage.PrivateLinkResourcesClient", "ListByStorageAccount", resp, "Failure sending request") - return - } - - result, err = client.ListByStorageAccountResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.PrivateLinkResourcesClient", "ListByStorageAccount", resp, "Failure responding to request") - return - } - - return -} - -// ListByStorageAccountPreparer prepares the ListByStorageAccount request. -func (client PrivateLinkResourcesClient) ListByStorageAccountPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2021-02-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/privateLinkResources", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListByStorageAccountSender sends the ListByStorageAccount request. The method will close the -// http.Response Body if it receives an error. -func (client PrivateLinkResourcesClient) ListByStorageAccountSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListByStorageAccountResponder handles the response to the ListByStorageAccount request. The method always -// closes the http.Response Body. -func (client PrivateLinkResourcesClient) ListByStorageAccountResponder(resp *http.Response) (result PrivateLinkResourceListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/queue.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/queue.go deleted file mode 100644 index 4a02d3cd77d9..000000000000 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/queue.go +++ /dev/null @@ -1,571 +0,0 @@ -package storage - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/validation" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// QueueClient is the the Azure Storage Management API. -type QueueClient struct { - BaseClient -} - -// NewQueueClient creates an instance of the QueueClient client. -func NewQueueClient(subscriptionID string) QueueClient { - return NewQueueClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewQueueClientWithBaseURI creates an instance of the QueueClient client using a custom endpoint. Use this when -// interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). -func NewQueueClientWithBaseURI(baseURI string, subscriptionID string) QueueClient { - return QueueClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// Create creates a new queue with the specified queue name, under the specified account. -// Parameters: -// resourceGroupName - the name of the resource group within the user's subscription. The name is case -// insensitive. -// accountName - the name of the storage account within the specified resource group. Storage account names -// must be between 3 and 24 characters in length and use numbers and lower-case letters only. -// queueName - a queue name must be unique within a storage account and must be between 3 and 63 characters.The -// name must comprise of lowercase alphanumeric and dash(-) characters only, it should begin and end with an -// alphanumeric character and it cannot have two consecutive dash(-) characters. -// queue - queue properties and metadata to be created with -func (client QueueClient) Create(ctx context.Context, resourceGroupName string, accountName string, queueName string, queue Queue) (result Queue, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/QueueClient.Create") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, - {TargetValue: accountName, - Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: queueName, - Constraints: []validation.Constraint{{Target: "queueName", Name: validation.MaxLength, Rule: 63, Chain: nil}, - {Target: "queueName", Name: validation.MinLength, Rule: 3, Chain: nil}}}}); err != nil { - return result, validation.NewError("storage.QueueClient", "Create", err.Error()) - } - - req, err := client.CreatePreparer(ctx, resourceGroupName, accountName, queueName, queue) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.QueueClient", "Create", nil, "Failure preparing request") - return - } - - resp, err := client.CreateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "storage.QueueClient", "Create", resp, "Failure sending request") - return - } - - result, err = client.CreateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.QueueClient", "Create", resp, "Failure responding to request") - return - } - - return -} - -// CreatePreparer prepares the Create request. -func (client QueueClient) CreatePreparer(ctx context.Context, resourceGroupName string, accountName string, queueName string, queue Queue) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "queueName": autorest.Encode("path", queueName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2021-02-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPut(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/queueServices/default/queues/{queueName}", pathParameters), - autorest.WithJSON(queue), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// CreateSender sends the Create request. The method will close the -// http.Response Body if it receives an error. -func (client QueueClient) CreateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// CreateResponder handles the response to the Create request. The method always -// closes the http.Response Body. -func (client QueueClient) CreateResponder(resp *http.Response) (result Queue, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Delete deletes the queue with the specified queue name, under the specified account if it exists. -// Parameters: -// resourceGroupName - the name of the resource group within the user's subscription. The name is case -// insensitive. -// accountName - the name of the storage account within the specified resource group. Storage account names -// must be between 3 and 24 characters in length and use numbers and lower-case letters only. -// queueName - a queue name must be unique within a storage account and must be between 3 and 63 characters.The -// name must comprise of lowercase alphanumeric and dash(-) characters only, it should begin and end with an -// alphanumeric character and it cannot have two consecutive dash(-) characters. -func (client QueueClient) Delete(ctx context.Context, resourceGroupName string, accountName string, queueName string) (result autorest.Response, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/QueueClient.Delete") - defer func() { - sc := -1 - if result.Response != nil { - sc = result.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, - {TargetValue: accountName, - Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: queueName, - Constraints: []validation.Constraint{{Target: "queueName", Name: validation.MaxLength, Rule: 63, Chain: nil}, - {Target: "queueName", Name: validation.MinLength, Rule: 3, Chain: nil}}}}); err != nil { - return result, validation.NewError("storage.QueueClient", "Delete", err.Error()) - } - - req, err := client.DeletePreparer(ctx, resourceGroupName, accountName, queueName) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.QueueClient", "Delete", nil, "Failure preparing request") - return - } - - resp, err := client.DeleteSender(req) - if err != nil { - result.Response = resp - err = autorest.NewErrorWithError(err, "storage.QueueClient", "Delete", resp, "Failure sending request") - return - } - - result, err = client.DeleteResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.QueueClient", "Delete", resp, "Failure responding to request") - return - } - - return -} - -// DeletePreparer prepares the Delete request. -func (client QueueClient) DeletePreparer(ctx context.Context, resourceGroupName string, accountName string, queueName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "queueName": autorest.Encode("path", queueName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2021-02-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/queueServices/default/queues/{queueName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DeleteSender sends the Delete request. The method will close the -// http.Response Body if it receives an error. -func (client QueueClient) DeleteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// DeleteResponder handles the response to the Delete request. The method always -// closes the http.Response Body. -func (client QueueClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), - autorest.ByClosing()) - result.Response = resp - return -} - -// Get gets the queue with the specified queue name, under the specified account if it exists. -// Parameters: -// resourceGroupName - the name of the resource group within the user's subscription. The name is case -// insensitive. -// accountName - the name of the storage account within the specified resource group. Storage account names -// must be between 3 and 24 characters in length and use numbers and lower-case letters only. -// queueName - a queue name must be unique within a storage account and must be between 3 and 63 characters.The -// name must comprise of lowercase alphanumeric and dash(-) characters only, it should begin and end with an -// alphanumeric character and it cannot have two consecutive dash(-) characters. -func (client QueueClient) Get(ctx context.Context, resourceGroupName string, accountName string, queueName string) (result Queue, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/QueueClient.Get") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, - {TargetValue: accountName, - Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: queueName, - Constraints: []validation.Constraint{{Target: "queueName", Name: validation.MaxLength, Rule: 63, Chain: nil}, - {Target: "queueName", Name: validation.MinLength, Rule: 3, Chain: nil}}}}); err != nil { - return result, validation.NewError("storage.QueueClient", "Get", err.Error()) - } - - req, err := client.GetPreparer(ctx, resourceGroupName, accountName, queueName) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.QueueClient", "Get", nil, "Failure preparing request") - return - } - - resp, err := client.GetSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "storage.QueueClient", "Get", resp, "Failure sending request") - return - } - - result, err = client.GetResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.QueueClient", "Get", resp, "Failure responding to request") - return - } - - return -} - -// GetPreparer prepares the Get request. -func (client QueueClient) GetPreparer(ctx context.Context, resourceGroupName string, accountName string, queueName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "queueName": autorest.Encode("path", queueName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2021-02-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/queueServices/default/queues/{queueName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetSender sends the Get request. The method will close the -// http.Response Body if it receives an error. -func (client QueueClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// GetResponder handles the response to the Get request. The method always -// closes the http.Response Body. -func (client QueueClient) GetResponder(resp *http.Response) (result Queue, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// List gets a list of all the queues under the specified storage account -// Parameters: -// resourceGroupName - the name of the resource group within the user's subscription. The name is case -// insensitive. -// accountName - the name of the storage account within the specified resource group. Storage account names -// must be between 3 and 24 characters in length and use numbers and lower-case letters only. -// maxpagesize - optional, a maximum number of queues that should be included in a list queue response -// filter - optional, When specified, only the queues with a name starting with the given filter will be -// listed. -func (client QueueClient) List(ctx context.Context, resourceGroupName string, accountName string, maxpagesize string, filter string) (result ListQueueResourcePage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/QueueClient.List") - defer func() { - sc := -1 - if result.lqr.Response.Response != nil { - sc = result.lqr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, - {TargetValue: accountName, - Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("storage.QueueClient", "List", err.Error()) - } - - result.fn = client.listNextResults - req, err := client.ListPreparer(ctx, resourceGroupName, accountName, maxpagesize, filter) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.QueueClient", "List", nil, "Failure preparing request") - return - } - - resp, err := client.ListSender(req) - if err != nil { - result.lqr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "storage.QueueClient", "List", resp, "Failure sending request") - return - } - - result.lqr, err = client.ListResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.QueueClient", "List", resp, "Failure responding to request") - return - } - if result.lqr.hasNextLink() && result.lqr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// ListPreparer prepares the List request. -func (client QueueClient) ListPreparer(ctx context.Context, resourceGroupName string, accountName string, maxpagesize string, filter string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2021-02-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if len(maxpagesize) > 0 { - queryParameters["$maxpagesize"] = autorest.Encode("query", maxpagesize) - } - if len(filter) > 0 { - queryParameters["$filter"] = autorest.Encode("query", filter) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/queueServices/default/queues", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListSender sends the List request. The method will close the -// http.Response Body if it receives an error. -func (client QueueClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListResponder handles the response to the List request. The method always -// closes the http.Response Body. -func (client QueueClient) ListResponder(resp *http.Response) (result ListQueueResource, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// listNextResults retrieves the next set of results, if any. -func (client QueueClient) listNextResults(ctx context.Context, lastResults ListQueueResource) (result ListQueueResource, err error) { - req, err := lastResults.listQueueResourcePreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "storage.QueueClient", "listNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "storage.QueueClient", "listNextResults", resp, "Failure sending next results request") - } - result, err = client.ListResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.QueueClient", "listNextResults", resp, "Failure responding to next results request") - } - return -} - -// ListComplete enumerates all values, automatically crossing page boundaries as required. -func (client QueueClient) ListComplete(ctx context.Context, resourceGroupName string, accountName string, maxpagesize string, filter string) (result ListQueueResourceIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/QueueClient.List") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.List(ctx, resourceGroupName, accountName, maxpagesize, filter) - return -} - -// Update creates a new queue with the specified queue name, under the specified account. -// Parameters: -// resourceGroupName - the name of the resource group within the user's subscription. The name is case -// insensitive. -// accountName - the name of the storage account within the specified resource group. Storage account names -// must be between 3 and 24 characters in length and use numbers and lower-case letters only. -// queueName - a queue name must be unique within a storage account and must be between 3 and 63 characters.The -// name must comprise of lowercase alphanumeric and dash(-) characters only, it should begin and end with an -// alphanumeric character and it cannot have two consecutive dash(-) characters. -// queue - queue properties and metadata to be created with -func (client QueueClient) Update(ctx context.Context, resourceGroupName string, accountName string, queueName string, queue Queue) (result Queue, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/QueueClient.Update") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, - {TargetValue: accountName, - Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: queueName, - Constraints: []validation.Constraint{{Target: "queueName", Name: validation.MaxLength, Rule: 63, Chain: nil}, - {Target: "queueName", Name: validation.MinLength, Rule: 3, Chain: nil}}}}); err != nil { - return result, validation.NewError("storage.QueueClient", "Update", err.Error()) - } - - req, err := client.UpdatePreparer(ctx, resourceGroupName, accountName, queueName, queue) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.QueueClient", "Update", nil, "Failure preparing request") - return - } - - resp, err := client.UpdateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "storage.QueueClient", "Update", resp, "Failure sending request") - return - } - - result, err = client.UpdateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.QueueClient", "Update", resp, "Failure responding to request") - return - } - - return -} - -// UpdatePreparer prepares the Update request. -func (client QueueClient) UpdatePreparer(ctx context.Context, resourceGroupName string, accountName string, queueName string, queue Queue) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "queueName": autorest.Encode("path", queueName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2021-02-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPatch(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/queueServices/default/queues/{queueName}", pathParameters), - autorest.WithJSON(queue), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// UpdateSender sends the Update request. The method will close the -// http.Response Body if it receives an error. -func (client QueueClient) UpdateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// UpdateResponder handles the response to the Update request. The method always -// closes the http.Response Body. -func (client QueueClient) UpdateResponder(resp *http.Response) (result Queue, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/queueservices.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/queueservices.go deleted file mode 100644 index 3223926b972d..000000000000 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/queueservices.go +++ /dev/null @@ -1,313 +0,0 @@ -package storage - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/validation" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// QueueServicesClient is the the Azure Storage Management API. -type QueueServicesClient struct { - BaseClient -} - -// NewQueueServicesClient creates an instance of the QueueServicesClient client. -func NewQueueServicesClient(subscriptionID string) QueueServicesClient { - return NewQueueServicesClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewQueueServicesClientWithBaseURI creates an instance of the QueueServicesClient client using a custom endpoint. -// Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). -func NewQueueServicesClientWithBaseURI(baseURI string, subscriptionID string) QueueServicesClient { - return QueueServicesClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// GetServiceProperties gets the properties of a storage account’s Queue service, including properties for Storage -// Analytics and CORS (Cross-Origin Resource Sharing) rules. -// Parameters: -// resourceGroupName - the name of the resource group within the user's subscription. The name is case -// insensitive. -// accountName - the name of the storage account within the specified resource group. Storage account names -// must be between 3 and 24 characters in length and use numbers and lower-case letters only. -func (client QueueServicesClient) GetServiceProperties(ctx context.Context, resourceGroupName string, accountName string) (result QueueServiceProperties, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/QueueServicesClient.GetServiceProperties") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, - {TargetValue: accountName, - Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("storage.QueueServicesClient", "GetServiceProperties", err.Error()) - } - - req, err := client.GetServicePropertiesPreparer(ctx, resourceGroupName, accountName) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.QueueServicesClient", "GetServiceProperties", nil, "Failure preparing request") - return - } - - resp, err := client.GetServicePropertiesSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "storage.QueueServicesClient", "GetServiceProperties", resp, "Failure sending request") - return - } - - result, err = client.GetServicePropertiesResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.QueueServicesClient", "GetServiceProperties", resp, "Failure responding to request") - return - } - - return -} - -// GetServicePropertiesPreparer prepares the GetServiceProperties request. -func (client QueueServicesClient) GetServicePropertiesPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "queueServiceName": autorest.Encode("path", "default"), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2021-02-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/queueServices/{queueServiceName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetServicePropertiesSender sends the GetServiceProperties request. The method will close the -// http.Response Body if it receives an error. -func (client QueueServicesClient) GetServicePropertiesSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// GetServicePropertiesResponder handles the response to the GetServiceProperties request. The method always -// closes the http.Response Body. -func (client QueueServicesClient) GetServicePropertiesResponder(resp *http.Response) (result QueueServiceProperties, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// List list all queue services for the storage account -// Parameters: -// resourceGroupName - the name of the resource group within the user's subscription. The name is case -// insensitive. -// accountName - the name of the storage account within the specified resource group. Storage account names -// must be between 3 and 24 characters in length and use numbers and lower-case letters only. -func (client QueueServicesClient) List(ctx context.Context, resourceGroupName string, accountName string) (result ListQueueServices, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/QueueServicesClient.List") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, - {TargetValue: accountName, - Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("storage.QueueServicesClient", "List", err.Error()) - } - - req, err := client.ListPreparer(ctx, resourceGroupName, accountName) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.QueueServicesClient", "List", nil, "Failure preparing request") - return - } - - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "storage.QueueServicesClient", "List", resp, "Failure sending request") - return - } - - result, err = client.ListResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.QueueServicesClient", "List", resp, "Failure responding to request") - return - } - - return -} - -// ListPreparer prepares the List request. -func (client QueueServicesClient) ListPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2021-02-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/queueServices", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListSender sends the List request. The method will close the -// http.Response Body if it receives an error. -func (client QueueServicesClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListResponder handles the response to the List request. The method always -// closes the http.Response Body. -func (client QueueServicesClient) ListResponder(resp *http.Response) (result ListQueueServices, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// SetServiceProperties sets the properties of a storage account’s Queue service, including properties for Storage -// Analytics and CORS (Cross-Origin Resource Sharing) rules. -// Parameters: -// resourceGroupName - the name of the resource group within the user's subscription. The name is case -// insensitive. -// accountName - the name of the storage account within the specified resource group. Storage account names -// must be between 3 and 24 characters in length and use numbers and lower-case letters only. -// parameters - the properties of a storage account’s Queue service, only properties for Storage Analytics and -// CORS (Cross-Origin Resource Sharing) rules can be specified. -func (client QueueServicesClient) SetServiceProperties(ctx context.Context, resourceGroupName string, accountName string, parameters QueueServiceProperties) (result QueueServiceProperties, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/QueueServicesClient.SetServiceProperties") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, - {TargetValue: accountName, - Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("storage.QueueServicesClient", "SetServiceProperties", err.Error()) - } - - req, err := client.SetServicePropertiesPreparer(ctx, resourceGroupName, accountName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.QueueServicesClient", "SetServiceProperties", nil, "Failure preparing request") - return - } - - resp, err := client.SetServicePropertiesSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "storage.QueueServicesClient", "SetServiceProperties", resp, "Failure sending request") - return - } - - result, err = client.SetServicePropertiesResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.QueueServicesClient", "SetServiceProperties", resp, "Failure responding to request") - return - } - - return -} - -// SetServicePropertiesPreparer prepares the SetServiceProperties request. -func (client QueueServicesClient) SetServicePropertiesPreparer(ctx context.Context, resourceGroupName string, accountName string, parameters QueueServiceProperties) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "queueServiceName": autorest.Encode("path", "default"), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2021-02-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPut(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/queueServices/{queueServiceName}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// SetServicePropertiesSender sends the SetServiceProperties request. The method will close the -// http.Response Body if it receives an error. -func (client QueueServicesClient) SetServicePropertiesSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// SetServicePropertiesResponder handles the response to the SetServiceProperties request. The method always -// closes the http.Response Body. -func (client QueueServicesClient) SetServicePropertiesResponder(resp *http.Response) (result QueueServiceProperties, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/skus.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/skus.go deleted file mode 100644 index dd77993de20a..000000000000 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/skus.go +++ /dev/null @@ -1,109 +0,0 @@ -package storage - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/validation" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// SkusClient is the the Azure Storage Management API. -type SkusClient struct { - BaseClient -} - -// NewSkusClient creates an instance of the SkusClient client. -func NewSkusClient(subscriptionID string) SkusClient { - return NewSkusClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewSkusClientWithBaseURI creates an instance of the SkusClient client using a custom endpoint. Use this when -// interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). -func NewSkusClientWithBaseURI(baseURI string, subscriptionID string) SkusClient { - return SkusClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// List lists the available SKUs supported by Microsoft.Storage for given subscription. -func (client SkusClient) List(ctx context.Context) (result SkuListResult, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/SkusClient.List") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("storage.SkusClient", "List", err.Error()) - } - - req, err := client.ListPreparer(ctx) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.SkusClient", "List", nil, "Failure preparing request") - return - } - - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "storage.SkusClient", "List", resp, "Failure sending request") - return - } - - result, err = client.ListResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.SkusClient", "List", resp, "Failure responding to request") - return - } - - return -} - -// ListPreparer prepares the List request. -func (client SkusClient) ListPreparer(ctx context.Context) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2021-02-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Storage/skus", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListSender sends the List request. The method will close the -// http.Response Body if it receives an error. -func (client SkusClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListResponder handles the response to the List request. The method always -// closes the http.Response Body. -func (client SkusClient) ListResponder(resp *http.Response) (result SkuListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/table.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/table.go deleted file mode 100644 index 4dd557991563..000000000000 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/table.go +++ /dev/null @@ -1,556 +0,0 @@ -package storage - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/validation" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// TableClient is the the Azure Storage Management API. -type TableClient struct { - BaseClient -} - -// NewTableClient creates an instance of the TableClient client. -func NewTableClient(subscriptionID string) TableClient { - return NewTableClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewTableClientWithBaseURI creates an instance of the TableClient client using a custom endpoint. Use this when -// interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). -func NewTableClientWithBaseURI(baseURI string, subscriptionID string) TableClient { - return TableClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// Create creates a new table with the specified table name, under the specified account. -// Parameters: -// resourceGroupName - the name of the resource group within the user's subscription. The name is case -// insensitive. -// accountName - the name of the storage account within the specified resource group. Storage account names -// must be between 3 and 24 characters in length and use numbers and lower-case letters only. -// tableName - a table name must be unique within a storage account and must be between 3 and 63 characters.The -// name must comprise of only alphanumeric characters and it cannot begin with a numeric character. -func (client TableClient) Create(ctx context.Context, resourceGroupName string, accountName string, tableName string) (result Table, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/TableClient.Create") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, - {TargetValue: accountName, - Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: tableName, - Constraints: []validation.Constraint{{Target: "tableName", Name: validation.MaxLength, Rule: 63, Chain: nil}, - {Target: "tableName", Name: validation.MinLength, Rule: 3, Chain: nil}, - {Target: "tableName", Name: validation.Pattern, Rule: `^[A-Za-z][A-Za-z0-9]{2,62}$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("storage.TableClient", "Create", err.Error()) - } - - req, err := client.CreatePreparer(ctx, resourceGroupName, accountName, tableName) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.TableClient", "Create", nil, "Failure preparing request") - return - } - - resp, err := client.CreateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "storage.TableClient", "Create", resp, "Failure sending request") - return - } - - result, err = client.CreateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.TableClient", "Create", resp, "Failure responding to request") - return - } - - return -} - -// CreatePreparer prepares the Create request. -func (client TableClient) CreatePreparer(ctx context.Context, resourceGroupName string, accountName string, tableName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - "tableName": autorest.Encode("path", tableName), - } - - const APIVersion = "2021-02-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsPut(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/tableServices/default/tables/{tableName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// CreateSender sends the Create request. The method will close the -// http.Response Body if it receives an error. -func (client TableClient) CreateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// CreateResponder handles the response to the Create request. The method always -// closes the http.Response Body. -func (client TableClient) CreateResponder(resp *http.Response) (result Table, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Delete deletes the table with the specified table name, under the specified account if it exists. -// Parameters: -// resourceGroupName - the name of the resource group within the user's subscription. The name is case -// insensitive. -// accountName - the name of the storage account within the specified resource group. Storage account names -// must be between 3 and 24 characters in length and use numbers and lower-case letters only. -// tableName - a table name must be unique within a storage account and must be between 3 and 63 characters.The -// name must comprise of only alphanumeric characters and it cannot begin with a numeric character. -func (client TableClient) Delete(ctx context.Context, resourceGroupName string, accountName string, tableName string) (result autorest.Response, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/TableClient.Delete") - defer func() { - sc := -1 - if result.Response != nil { - sc = result.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, - {TargetValue: accountName, - Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: tableName, - Constraints: []validation.Constraint{{Target: "tableName", Name: validation.MaxLength, Rule: 63, Chain: nil}, - {Target: "tableName", Name: validation.MinLength, Rule: 3, Chain: nil}, - {Target: "tableName", Name: validation.Pattern, Rule: `^[A-Za-z][A-Za-z0-9]{2,62}$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("storage.TableClient", "Delete", err.Error()) - } - - req, err := client.DeletePreparer(ctx, resourceGroupName, accountName, tableName) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.TableClient", "Delete", nil, "Failure preparing request") - return - } - - resp, err := client.DeleteSender(req) - if err != nil { - result.Response = resp - err = autorest.NewErrorWithError(err, "storage.TableClient", "Delete", resp, "Failure sending request") - return - } - - result, err = client.DeleteResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.TableClient", "Delete", resp, "Failure responding to request") - return - } - - return -} - -// DeletePreparer prepares the Delete request. -func (client TableClient) DeletePreparer(ctx context.Context, resourceGroupName string, accountName string, tableName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - "tableName": autorest.Encode("path", tableName), - } - - const APIVersion = "2021-02-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/tableServices/default/tables/{tableName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DeleteSender sends the Delete request. The method will close the -// http.Response Body if it receives an error. -func (client TableClient) DeleteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// DeleteResponder handles the response to the Delete request. The method always -// closes the http.Response Body. -func (client TableClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), - autorest.ByClosing()) - result.Response = resp - return -} - -// Get gets the table with the specified table name, under the specified account if it exists. -// Parameters: -// resourceGroupName - the name of the resource group within the user's subscription. The name is case -// insensitive. -// accountName - the name of the storage account within the specified resource group. Storage account names -// must be between 3 and 24 characters in length and use numbers and lower-case letters only. -// tableName - a table name must be unique within a storage account and must be between 3 and 63 characters.The -// name must comprise of only alphanumeric characters and it cannot begin with a numeric character. -func (client TableClient) Get(ctx context.Context, resourceGroupName string, accountName string, tableName string) (result Table, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/TableClient.Get") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, - {TargetValue: accountName, - Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: tableName, - Constraints: []validation.Constraint{{Target: "tableName", Name: validation.MaxLength, Rule: 63, Chain: nil}, - {Target: "tableName", Name: validation.MinLength, Rule: 3, Chain: nil}, - {Target: "tableName", Name: validation.Pattern, Rule: `^[A-Za-z][A-Za-z0-9]{2,62}$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("storage.TableClient", "Get", err.Error()) - } - - req, err := client.GetPreparer(ctx, resourceGroupName, accountName, tableName) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.TableClient", "Get", nil, "Failure preparing request") - return - } - - resp, err := client.GetSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "storage.TableClient", "Get", resp, "Failure sending request") - return - } - - result, err = client.GetResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.TableClient", "Get", resp, "Failure responding to request") - return - } - - return -} - -// GetPreparer prepares the Get request. -func (client TableClient) GetPreparer(ctx context.Context, resourceGroupName string, accountName string, tableName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - "tableName": autorest.Encode("path", tableName), - } - - const APIVersion = "2021-02-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/tableServices/default/tables/{tableName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetSender sends the Get request. The method will close the -// http.Response Body if it receives an error. -func (client TableClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// GetResponder handles the response to the Get request. The method always -// closes the http.Response Body. -func (client TableClient) GetResponder(resp *http.Response) (result Table, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// List gets a list of all the tables under the specified storage account -// Parameters: -// resourceGroupName - the name of the resource group within the user's subscription. The name is case -// insensitive. -// accountName - the name of the storage account within the specified resource group. Storage account names -// must be between 3 and 24 characters in length and use numbers and lower-case letters only. -func (client TableClient) List(ctx context.Context, resourceGroupName string, accountName string) (result ListTableResourcePage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/TableClient.List") - defer func() { - sc := -1 - if result.ltr.Response.Response != nil { - sc = result.ltr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, - {TargetValue: accountName, - Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("storage.TableClient", "List", err.Error()) - } - - result.fn = client.listNextResults - req, err := client.ListPreparer(ctx, resourceGroupName, accountName) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.TableClient", "List", nil, "Failure preparing request") - return - } - - resp, err := client.ListSender(req) - if err != nil { - result.ltr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "storage.TableClient", "List", resp, "Failure sending request") - return - } - - result.ltr, err = client.ListResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.TableClient", "List", resp, "Failure responding to request") - return - } - if result.ltr.hasNextLink() && result.ltr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// ListPreparer prepares the List request. -func (client TableClient) ListPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2021-02-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/tableServices/default/tables", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListSender sends the List request. The method will close the -// http.Response Body if it receives an error. -func (client TableClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListResponder handles the response to the List request. The method always -// closes the http.Response Body. -func (client TableClient) ListResponder(resp *http.Response) (result ListTableResource, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// listNextResults retrieves the next set of results, if any. -func (client TableClient) listNextResults(ctx context.Context, lastResults ListTableResource) (result ListTableResource, err error) { - req, err := lastResults.listTableResourcePreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "storage.TableClient", "listNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "storage.TableClient", "listNextResults", resp, "Failure sending next results request") - } - result, err = client.ListResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.TableClient", "listNextResults", resp, "Failure responding to next results request") - } - return -} - -// ListComplete enumerates all values, automatically crossing page boundaries as required. -func (client TableClient) ListComplete(ctx context.Context, resourceGroupName string, accountName string) (result ListTableResourceIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/TableClient.List") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.List(ctx, resourceGroupName, accountName) - return -} - -// Update creates a new table with the specified table name, under the specified account. -// Parameters: -// resourceGroupName - the name of the resource group within the user's subscription. The name is case -// insensitive. -// accountName - the name of the storage account within the specified resource group. Storage account names -// must be between 3 and 24 characters in length and use numbers and lower-case letters only. -// tableName - a table name must be unique within a storage account and must be between 3 and 63 characters.The -// name must comprise of only alphanumeric characters and it cannot begin with a numeric character. -func (client TableClient) Update(ctx context.Context, resourceGroupName string, accountName string, tableName string) (result Table, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/TableClient.Update") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, - {TargetValue: accountName, - Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: tableName, - Constraints: []validation.Constraint{{Target: "tableName", Name: validation.MaxLength, Rule: 63, Chain: nil}, - {Target: "tableName", Name: validation.MinLength, Rule: 3, Chain: nil}, - {Target: "tableName", Name: validation.Pattern, Rule: `^[A-Za-z][A-Za-z0-9]{2,62}$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("storage.TableClient", "Update", err.Error()) - } - - req, err := client.UpdatePreparer(ctx, resourceGroupName, accountName, tableName) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.TableClient", "Update", nil, "Failure preparing request") - return - } - - resp, err := client.UpdateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "storage.TableClient", "Update", resp, "Failure sending request") - return - } - - result, err = client.UpdateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.TableClient", "Update", resp, "Failure responding to request") - return - } - - return -} - -// UpdatePreparer prepares the Update request. -func (client TableClient) UpdatePreparer(ctx context.Context, resourceGroupName string, accountName string, tableName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - "tableName": autorest.Encode("path", tableName), - } - - const APIVersion = "2021-02-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsPatch(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/tableServices/default/tables/{tableName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// UpdateSender sends the Update request. The method will close the -// http.Response Body if it receives an error. -func (client TableClient) UpdateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// UpdateResponder handles the response to the Update request. The method always -// closes the http.Response Body. -func (client TableClient) UpdateResponder(resp *http.Response) (result Table, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/tableservices.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/tableservices.go deleted file mode 100644 index 2a6b66bae40e..000000000000 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/tableservices.go +++ /dev/null @@ -1,313 +0,0 @@ -package storage - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/validation" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// TableServicesClient is the the Azure Storage Management API. -type TableServicesClient struct { - BaseClient -} - -// NewTableServicesClient creates an instance of the TableServicesClient client. -func NewTableServicesClient(subscriptionID string) TableServicesClient { - return NewTableServicesClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewTableServicesClientWithBaseURI creates an instance of the TableServicesClient client using a custom endpoint. -// Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). -func NewTableServicesClientWithBaseURI(baseURI string, subscriptionID string) TableServicesClient { - return TableServicesClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// GetServiceProperties gets the properties of a storage account’s Table service, including properties for Storage -// Analytics and CORS (Cross-Origin Resource Sharing) rules. -// Parameters: -// resourceGroupName - the name of the resource group within the user's subscription. The name is case -// insensitive. -// accountName - the name of the storage account within the specified resource group. Storage account names -// must be between 3 and 24 characters in length and use numbers and lower-case letters only. -func (client TableServicesClient) GetServiceProperties(ctx context.Context, resourceGroupName string, accountName string) (result TableServiceProperties, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/TableServicesClient.GetServiceProperties") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, - {TargetValue: accountName, - Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("storage.TableServicesClient", "GetServiceProperties", err.Error()) - } - - req, err := client.GetServicePropertiesPreparer(ctx, resourceGroupName, accountName) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.TableServicesClient", "GetServiceProperties", nil, "Failure preparing request") - return - } - - resp, err := client.GetServicePropertiesSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "storage.TableServicesClient", "GetServiceProperties", resp, "Failure sending request") - return - } - - result, err = client.GetServicePropertiesResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.TableServicesClient", "GetServiceProperties", resp, "Failure responding to request") - return - } - - return -} - -// GetServicePropertiesPreparer prepares the GetServiceProperties request. -func (client TableServicesClient) GetServicePropertiesPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - "tableServiceName": autorest.Encode("path", "default"), - } - - const APIVersion = "2021-02-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/tableServices/{tableServiceName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetServicePropertiesSender sends the GetServiceProperties request. The method will close the -// http.Response Body if it receives an error. -func (client TableServicesClient) GetServicePropertiesSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// GetServicePropertiesResponder handles the response to the GetServiceProperties request. The method always -// closes the http.Response Body. -func (client TableServicesClient) GetServicePropertiesResponder(resp *http.Response) (result TableServiceProperties, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// List list all table services for the storage account. -// Parameters: -// resourceGroupName - the name of the resource group within the user's subscription. The name is case -// insensitive. -// accountName - the name of the storage account within the specified resource group. Storage account names -// must be between 3 and 24 characters in length and use numbers and lower-case letters only. -func (client TableServicesClient) List(ctx context.Context, resourceGroupName string, accountName string) (result ListTableServices, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/TableServicesClient.List") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, - {TargetValue: accountName, - Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("storage.TableServicesClient", "List", err.Error()) - } - - req, err := client.ListPreparer(ctx, resourceGroupName, accountName) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.TableServicesClient", "List", nil, "Failure preparing request") - return - } - - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "storage.TableServicesClient", "List", resp, "Failure sending request") - return - } - - result, err = client.ListResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.TableServicesClient", "List", resp, "Failure responding to request") - return - } - - return -} - -// ListPreparer prepares the List request. -func (client TableServicesClient) ListPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2021-02-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/tableServices", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListSender sends the List request. The method will close the -// http.Response Body if it receives an error. -func (client TableServicesClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListResponder handles the response to the List request. The method always -// closes the http.Response Body. -func (client TableServicesClient) ListResponder(resp *http.Response) (result ListTableServices, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// SetServiceProperties sets the properties of a storage account’s Table service, including properties for Storage -// Analytics and CORS (Cross-Origin Resource Sharing) rules. -// Parameters: -// resourceGroupName - the name of the resource group within the user's subscription. The name is case -// insensitive. -// accountName - the name of the storage account within the specified resource group. Storage account names -// must be between 3 and 24 characters in length and use numbers and lower-case letters only. -// parameters - the properties of a storage account’s Table service, only properties for Storage Analytics and -// CORS (Cross-Origin Resource Sharing) rules can be specified. -func (client TableServicesClient) SetServiceProperties(ctx context.Context, resourceGroupName string, accountName string, parameters TableServiceProperties) (result TableServiceProperties, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/TableServicesClient.SetServiceProperties") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, - {TargetValue: accountName, - Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("storage.TableServicesClient", "SetServiceProperties", err.Error()) - } - - req, err := client.SetServicePropertiesPreparer(ctx, resourceGroupName, accountName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.TableServicesClient", "SetServiceProperties", nil, "Failure preparing request") - return - } - - resp, err := client.SetServicePropertiesSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "storage.TableServicesClient", "SetServiceProperties", resp, "Failure sending request") - return - } - - result, err = client.SetServicePropertiesResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.TableServicesClient", "SetServiceProperties", resp, "Failure responding to request") - return - } - - return -} - -// SetServicePropertiesPreparer prepares the SetServiceProperties request. -func (client TableServicesClient) SetServicePropertiesPreparer(ctx context.Context, resourceGroupName string, accountName string, parameters TableServiceProperties) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accountName": autorest.Encode("path", accountName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - "tableServiceName": autorest.Encode("path", "default"), - } - - const APIVersion = "2021-02-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPut(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/tableServices/{tableServiceName}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// SetServicePropertiesSender sends the SetServiceProperties request. The method will close the -// http.Response Body if it receives an error. -func (client TableServicesClient) SetServicePropertiesSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// SetServicePropertiesResponder handles the response to the SetServiceProperties request. The method always -// closes the http.Response Body. -func (client TableServicesClient) SetServicePropertiesResponder(resp *http.Response) (result TableServiceProperties, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/usages.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/usages.go deleted file mode 100644 index 210c45b38cff..000000000000 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/usages.go +++ /dev/null @@ -1,112 +0,0 @@ -package storage - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/validation" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// UsagesClient is the the Azure Storage Management API. -type UsagesClient struct { - BaseClient -} - -// NewUsagesClient creates an instance of the UsagesClient client. -func NewUsagesClient(subscriptionID string) UsagesClient { - return NewUsagesClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewUsagesClientWithBaseURI creates an instance of the UsagesClient client using a custom endpoint. Use this when -// interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). -func NewUsagesClientWithBaseURI(baseURI string, subscriptionID string) UsagesClient { - return UsagesClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// ListByLocation gets the current usage count and the limit for the resources of the location under the subscription. -// Parameters: -// location - the location of the Azure Storage resource. -func (client UsagesClient) ListByLocation(ctx context.Context, location string) (result UsageListResult, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/UsagesClient.ListByLocation") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("storage.UsagesClient", "ListByLocation", err.Error()) - } - - req, err := client.ListByLocationPreparer(ctx, location) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.UsagesClient", "ListByLocation", nil, "Failure preparing request") - return - } - - resp, err := client.ListByLocationSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "storage.UsagesClient", "ListByLocation", resp, "Failure sending request") - return - } - - result, err = client.ListByLocationResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.UsagesClient", "ListByLocation", resp, "Failure responding to request") - return - } - - return -} - -// ListByLocationPreparer prepares the ListByLocation request. -func (client UsagesClient) ListByLocationPreparer(ctx context.Context, location string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "location": autorest.Encode("path", location), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2021-02-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Storage/locations/{location}/usages", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListByLocationSender sends the ListByLocation request. The method will close the -// http.Response Body if it receives an error. -func (client UsagesClient) ListByLocationSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListByLocationResponder handles the response to the ListByLocation request. The method always -// closes the http.Response Body. -func (client UsagesClient) ListByLocationResponder(resp *http.Response) (result UsageListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/version.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/version.go deleted file mode 100644 index de079b92f235..000000000000 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/version.go +++ /dev/null @@ -1,19 +0,0 @@ -package storage - -import "github.com/Azure/azure-sdk-for-go/version" - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -// UserAgent returns the UserAgent string to use when sending http.Requests. -func UserAgent() string { - return "Azure-SDK-For-Go/" + Version() + " storage/2021-02-01" -} - -// Version returns the semantic version (see http://semver.org) of the client. -func Version() string { - return version.Number -} diff --git a/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/adal/README.md b/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/adal/README.md index b11eb07884b0..97434ea7f770 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/adal/README.md +++ b/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/adal/README.md @@ -160,7 +160,7 @@ if (err == nil) { ```Go certificatePath := "./example-app.pfx" -certData, err := ioutil.ReadFile(certificatePath) +certData, err := os.ReadFile(certificatePath) if err != nil { return nil, fmt.Errorf("failed to read the certificate file (%s): %v", certificatePath, err) } diff --git a/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go b/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go index 9daa4b58b881..f040e2ac6b45 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go +++ b/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go @@ -27,7 +27,7 @@ import ( "context" "encoding/json" "fmt" - "io/ioutil" + "io" "net/http" "net/url" "strings" @@ -116,7 +116,7 @@ func InitiateDeviceAuthWithContext(ctx context.Context, sender Sender, oauthConf } s := v.Encode() - body := ioutil.NopCloser(strings.NewReader(s)) + body := io.NopCloser(strings.NewReader(s)) req, err := http.NewRequest(http.MethodPost, oauthConfig.DeviceCodeEndpoint.String(), body) if err != nil { @@ -131,7 +131,7 @@ func InitiateDeviceAuthWithContext(ctx context.Context, sender Sender, oauthConf } defer resp.Body.Close() - rb, err := ioutil.ReadAll(resp.Body) + rb, err := io.ReadAll(resp.Body) if err != nil { return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err.Error()) } @@ -175,7 +175,7 @@ func CheckForUserCompletionWithContext(ctx context.Context, sender Sender, code } s := v.Encode() - body := ioutil.NopCloser(strings.NewReader(s)) + body := io.NopCloser(strings.NewReader(s)) req, err := http.NewRequest(http.MethodPost, code.OAuthConfig.TokenEndpoint.String(), body) if err != nil { @@ -190,7 +190,7 @@ func CheckForUserCompletionWithContext(ctx context.Context, sender Sender, code } defer resp.Body.Close() - rb, err := ioutil.ReadAll(resp.Body) + rb, err := io.ReadAll(resp.Body) if err != nil { return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err.Error()) } diff --git a/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go b/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go index 2a974a39b3cd..fb54a43235ba 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go +++ b/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go @@ -20,7 +20,6 @@ import ( "encoding/json" "errors" "fmt" - "io/ioutil" "os" "path/filepath" @@ -62,7 +61,7 @@ func SaveToken(path string, mode os.FileMode, token Token) error { return fmt.Errorf("failed to create directory (%s) to store token in: %v", dir, err) } - newFile, err := ioutil.TempFile(dir, "token") + newFile, err := os.CreateTemp(dir, "token") if err != nil { return fmt.Errorf("failed to create the temp file to write the token: %v", err) } diff --git a/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/adal/token.go b/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/adal/token.go index 2a24ab80cf16..67baecd83ffe 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/adal/token.go +++ b/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/adal/token.go @@ -25,7 +25,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "math" "net/http" "net/url" @@ -1061,7 +1060,7 @@ func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource } else if msiSecret.clientResourceID != "" { data.Set("msi_res_id", msiSecret.clientResourceID) } - req.Body = ioutil.NopCloser(strings.NewReader(data.Encode())) + req.Body = io.NopCloser(strings.NewReader(data.Encode())) req.Header.Set("Content-Type", "application/x-www-form-urlencoded") break case msiTypeIMDS: @@ -1096,7 +1095,7 @@ func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource } s := v.Encode() - body := ioutil.NopCloser(strings.NewReader(s)) + body := io.NopCloser(strings.NewReader(s)) req.ContentLength = int64(len(s)) req.Header.Set(contentType, mimeTypeFormPost) req.Body = body @@ -1113,7 +1112,7 @@ func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource logger.Instance.WriteResponse(resp, logger.Filter{Body: authBodyFilter}) defer resp.Body.Close() - rb, err := ioutil.ReadAll(resp.Body) + rb, err := io.ReadAll(resp.Body) if resp.StatusCode != http.StatusOK { if err != nil { @@ -1235,7 +1234,7 @@ func retryForIMDS(sender Sender, req *http.Request, maxAttempts int) (resp *http for attempt < maxAttempts { if resp != nil && resp.Body != nil { - io.Copy(ioutil.Discard, resp.Body) + io.Copy(io.Discard, resp.Body) resp.Body.Close() } resp, err = sender.Do(req) diff --git a/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/azure/auth/README.md b/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/azure/auth/README.md new file mode 100644 index 000000000000..05bef8a8002d --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/azure/auth/README.md @@ -0,0 +1,152 @@ +# NOTE: This module will go out of support by March 31, 2023. For authenticating with Azure AD, use module [azidentity](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity) instead. For help migrating from `auth` to `azidentiy` please consult the [migration guide](https://aka.ms/azsdk/go/identity/migration). General information about the retirement of this and other legacy modules can be found [here](https://azure.microsoft.com/updates/support-for-azure-sdk-libraries-that-do-not-conform-to-our-current-azure-sdk-guidelines-will-be-retired-as-of-31-march-2023/). + +## Authentication + +Typical SDK operations must be authenticated and authorized. The `autorest.Authorizer` +interface allows use of any auth style in requests, such as inserting an OAuth2 +Authorization header and bearer token received from Azure AD. + +The SDK itself provides a simple way to get an authorizer which first checks +for OAuth client credentials in environment variables and then falls back to +Azure's [Managed Service Identity]() when available, e.g. when on an Azure +VM. The following snippet from [the previous section](#use) demonstrates +this helper. + +```go +import "github.com/Azure/go-autorest/autorest/azure/auth" + +// create a VirtualNetworks client +vnetClient := network.NewVirtualNetworksClient("") + +// create an authorizer from env vars or Azure Managed Service Idenity +authorizer, err := auth.NewAuthorizerFromEnvironment() +if err != nil { + handle(err) +} + +vnetClient.Authorizer = authorizer + +// call the VirtualNetworks CreateOrUpdate API +vnetClient.CreateOrUpdate(context.Background(), +// ... +``` + +The following environment variables help determine authentication configuration: + +- `AZURE_ENVIRONMENT`: Specifies the Azure Environment to use. If not set, it + defaults to `AzurePublicCloud`. Not applicable to authentication with Managed + Service Identity (MSI). +- `AZURE_AD_RESOURCE`: Specifies the AAD resource ID to use. If not set, it + defaults to `ResourceManagerEndpoint` for operations with Azure Resource + Manager. You can also choose an alternate resource programmatically with + `auth.NewAuthorizerFromEnvironmentWithResource(resource string)`. + +### More Authentication Details + +The previous is the first and most recommended of several authentication +options offered by the SDK because it allows seamless use of both service +principals and [Azure Managed Service Identity][]. Other options are listed +below. + +> Note: If you need to create a new service principal, run `az ad sp create-for-rbac -n ""` in the +> [azure-cli](https://github.com/Azure/azure-cli). See [these +> docs](https://docs.microsoft.com/cli/azure/create-an-azure-service-principal-azure-cli?view=azure-cli-latest) +> for more info. Copy the new principal's ID, secret, and tenant ID for use in +> your app, or consider the `--sdk-auth` parameter for serialized output. + +[azure managed service identity]: https://docs.microsoft.com/azure/active-directory/msi-overview + +- The `auth.NewAuthorizerFromEnvironment()` described above creates an authorizer + from the first available of the following configuration: + + 1. **Client Credentials**: Azure AD Application ID and Secret. + + - `AZURE_TENANT_ID`: Specifies the Tenant to which to authenticate. + - `AZURE_CLIENT_ID`: Specifies the app client ID to use. + - `AZURE_CLIENT_SECRET`: Specifies the app secret to use. + + 2. **Client Certificate**: Azure AD Application ID and X.509 Certificate. + + - `AZURE_TENANT_ID`: Specifies the Tenant to which to authenticate. + - `AZURE_CLIENT_ID`: Specifies the app client ID to use. + - `AZURE_CERTIFICATE_PATH`: Specifies the certificate Path to use. + - `AZURE_CERTIFICATE_PASSWORD`: Specifies the certificate password to use. + + 3. **Resource Owner Password**: Azure AD User and Password. This grant type is *not + recommended*, use device login instead if you need interactive login. + + - `AZURE_TENANT_ID`: Specifies the Tenant to which to authenticate. + - `AZURE_CLIENT_ID`: Specifies the app client ID to use. + - `AZURE_USERNAME`: Specifies the username to use. + - `AZURE_PASSWORD`: Specifies the password to use. + + 4. **Azure Managed Service Identity**: Delegate credential management to the + platform. Requires that code is running in Azure, e.g. on a VM. All + configuration is handled by Azure. See [Azure Managed Service + Identity](https://docs.microsoft.com/azure/active-directory/msi-overview) + for more details. + +- The `auth.NewAuthorizerFromFile()` method creates an authorizer using + credentials from an auth file created by the [Azure CLI][]. Follow these + steps to utilize: + + 1. Create a service principal and output an auth file using `az ad sp create-for-rbac --sdk-auth > client_credentials.json`. + 2. Set environment variable `AZURE_AUTH_LOCATION` to the path of the saved + output file. + 3. Use the authorizer returned by `auth.NewAuthorizerFromFile()` in your + client as described above. + +- The `auth.NewAuthorizerFromCLI()` method creates an authorizer which + uses [Azure CLI][] to obtain its credentials. + + The default audience being requested is `https://management.azure.com` (Azure ARM API). + To specify your own audience, export `AZURE_AD_RESOURCE` as an evironment variable. + This is read by `auth.NewAuthorizerFromCLI()` and passed to Azure CLI to acquire the access token. + + For example, to request an access token for Azure Key Vault, export + ``` + AZURE_AD_RESOURCE="https://vault.azure.net" + ``` + +- `auth.NewAuthorizerFromCLIWithResource(AUDIENCE_URL_OR_APPLICATION_ID)` - this method is self contained and does + not require exporting environment variables. For example, to request an access token for Azure Key Vault: + ``` + auth.NewAuthorizerFromCLIWithResource("https://vault.azure.net") + ``` + + To use `NewAuthorizerFromCLI()` or `NewAuthorizerFromCLIWithResource()`, follow these steps: + + 1. Install [Azure CLI v2.0.12](https://docs.microsoft.com/cli/azure/install-azure-cli) or later. Upgrade earlier versions. + 2. Use `az login` to sign in to Azure. + + If you receive an error, use `az account get-access-token` to verify access. + + If Azure CLI is not installed to the default directory, you may receive an error + reporting that `az` cannot be found. + Use the `AzureCLIPath` environment variable to define the Azure CLI installation folder. + + If you are signed in to Azure CLI using multiple accounts or your account has + access to multiple subscriptions, you need to specify the specific subscription + to be used. To do so, use: + + ``` + az account set --subscription + ``` + + To verify the current account settings, use: + + ``` + az account list + ``` + +[azure cli]: https://github.com/Azure/azure-cli + +- Finally, you can use OAuth's [Device Flow][] by calling + `auth.NewDeviceFlowConfig()` and extracting the Authorizer as follows: + + ```go + config := auth.NewDeviceFlowConfig(clientID, tenantID) + a, err := config.Authorizer() + ``` + +[device flow]: https://oauth.net/2/device-flow/ diff --git a/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/azure/auth/auth.go b/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/azure/auth/auth.go index 85acf1c9bc98..25697b3c854c 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/azure/auth/auth.go +++ b/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/azure/auth/auth.go @@ -16,11 +16,12 @@ package auth import ( "bytes" + "context" "encoding/binary" "encoding/json" "errors" "fmt" - "io/ioutil" + "io" "log" "os" "strings" @@ -232,6 +233,9 @@ func (settings EnvironmentSettings) GetAuthorizer() (autorest.Authorizer, error) } // 4. MSI + if !adal.MSIAvailable(context.Background(), nil) { + return nil, errors.New("MSI not available") + } logger.Instance.Writeln(logger.LogInfo, "EnvironmentSettings.GetAuthorizer() using MSI authentication") return settings.GetMSI().Authorizer() } @@ -246,6 +250,17 @@ func NewAuthorizerFromFile(resourceBaseURI string) (autorest.Authorizer, error) if err != nil { return nil, err } + return settings.GetAuthorizer(resourceBaseURI) +} + +// GetAuthorizer create an Authorizer in the following order. +// 1. Client credentials +// 2. Client certificate +// resourceBaseURI - used to determine the resource type +func (settings FileSettings) GetAuthorizer(resourceBaseURI string) (autorest.Authorizer, error) { + if resourceBaseURI == "" { + resourceBaseURI = azure.PublicCloud.ServiceManagementEndpoint + } if a, err := settings.ClientCredentialsAuthorizer(resourceBaseURI); err == nil { return a, err } @@ -310,7 +325,7 @@ func GetSettingsFromFile() (FileSettings, error) { return s, errors.New("environment variable AZURE_AUTH_LOCATION is not set") } - contents, err := ioutil.ReadFile(fileLocation) + contents, err := os.ReadFile(fileLocation) if err != nil { return s, err } @@ -473,7 +488,7 @@ func decode(b []byte) ([]byte, error) { } return []byte(string(utf16.Decode(u16))), nil } - return ioutil.ReadAll(reader) + return io.ReadAll(reader) } func (settings FileSettings) getResourceForToken(baseURI string) (string, error) { @@ -555,7 +570,7 @@ func NewDeviceFlowConfig(clientID string, tenantID string) DeviceFlowConfig { } } -//AuthorizerConfig provides an authorizer from the configuration provided. +// AuthorizerConfig provides an authorizer from the configuration provided. type AuthorizerConfig interface { Authorizer() (autorest.Authorizer, error) } @@ -621,7 +636,7 @@ func (ccc ClientCertificateConfig) ServicePrincipalToken() (*adal.ServicePrincip if err != nil { return nil, err } - certData, err := ioutil.ReadFile(ccc.CertificatePath) + certData, err := os.ReadFile(ccc.CertificatePath) if err != nil { return nil, fmt.Errorf("failed to read the certificate file (%s): %v", ccc.CertificatePath, err) } @@ -638,7 +653,7 @@ func (ccc ClientCertificateConfig) MultiTenantServicePrincipalToken() (*adal.Mul if err != nil { return nil, err } - certData, err := ioutil.ReadFile(ccc.CertificatePath) + certData, err := os.ReadFile(ccc.CertificatePath) if err != nil { return nil, fmt.Errorf("failed to read the certificate file (%s): %v", ccc.CertificatePath, err) } diff --git a/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/azure/auth/go_mod_tidy_hack.go b/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/azure/auth/go_mod_tidy_hack.go index 38e4900ad0fb..f7eb26fd366b 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/azure/auth/go_mod_tidy_hack.go +++ b/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/azure/auth/go_mod_tidy_hack.go @@ -1,3 +1,4 @@ +//go:build modhack // +build modhack package auth diff --git a/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go_mod_tidy_hack.go b/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go_mod_tidy_hack.go index 861ce2984e64..50d6f0391a5e 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go_mod_tidy_hack.go +++ b/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go_mod_tidy_hack.go @@ -1,3 +1,4 @@ +//go:build modhack // +build modhack package cli diff --git a/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/azure/cli/token.go b/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/azure/cli/token.go index 44ff446f6697..486619111c67 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/azure/cli/token.go +++ b/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/azure/cli/token.go @@ -124,24 +124,91 @@ func LoadTokens(path string) ([]Token, error) { // GetTokenFromCLI gets a token using Azure CLI 2.0 for local development scenarios. func GetTokenFromCLI(resource string) (*Token, error) { - // This is the path that a developer can set to tell this class what the install path for Azure CLI is. - const azureCLIPath = "AzureCLIPath" + return GetTokenFromCLIWithParams(GetAccessTokenParams{Resource: resource}) +} - // The default install paths are used to find Azure CLI. This is for security, so that any path in the calling program's Path environment is not used to execute Azure CLI. - azureCLIDefaultPathWindows := fmt.Sprintf("%s\\Microsoft SDKs\\Azure\\CLI2\\wbin; %s\\Microsoft SDKs\\Azure\\CLI2\\wbin", os.Getenv("ProgramFiles(x86)"), os.Getenv("ProgramFiles")) +// GetAccessTokenParams is the parameter struct of GetTokenFromCLIWithParams +type GetAccessTokenParams struct { + Resource string + ResourceType string + Subscription string + Tenant string +} - // Default path for non-Windows. - const azureCLIDefaultPath = "/bin:/sbin:/usr/bin:/usr/local/bin" +// GetTokenFromCLIWithParams gets a token using Azure CLI 2.0 for local development scenarios. +func GetTokenFromCLIWithParams(params GetAccessTokenParams) (*Token, error) { + cliCmd := GetAzureCLICommand() - // Validate resource, since it gets sent as a command line argument to Azure CLI - const invalidResourceErrorTemplate = "Resource %s is not in expected format. Only alphanumeric characters, [dot], [colon], [hyphen], and [forward slash] are allowed." - match, err := regexp.MatchString("^[0-9a-zA-Z-.:/]+$", resource) + cliCmd.Args = append(cliCmd.Args, "account", "get-access-token", "-o", "json") + if params.Resource != "" { + if err := validateParameter(params.Resource); err != nil { + return nil, err + } + cliCmd.Args = append(cliCmd.Args, "--resource", params.Resource) + } + if params.ResourceType != "" { + if err := validateParameter(params.ResourceType); err != nil { + return nil, err + } + cliCmd.Args = append(cliCmd.Args, "--resource-type", params.ResourceType) + } + if params.Subscription != "" { + if err := validateParameter(params.Subscription); err != nil { + return nil, err + } + cliCmd.Args = append(cliCmd.Args, "--subscription", params.Subscription) + } + if params.Tenant != "" { + if err := validateParameter(params.Tenant); err != nil { + return nil, err + } + cliCmd.Args = append(cliCmd.Args, "--tenant", params.Tenant) + } + + var stderr bytes.Buffer + cliCmd.Stderr = &stderr + + output, err := cliCmd.Output() + if err != nil { + if stderr.Len() > 0 { + return nil, fmt.Errorf("Invoking Azure CLI failed with the following error: %s", stderr.String()) + } + + return nil, fmt.Errorf("Invoking Azure CLI failed with the following error: %s", err.Error()) + } + + tokenResponse := Token{} + err = json.Unmarshal(output, &tokenResponse) if err != nil { return nil, err } + + return &tokenResponse, err +} + +func validateParameter(param string) error { + // Validate parameters, since it gets sent as a command line argument to Azure CLI + const invalidResourceErrorTemplate = "Parameter %s is not in expected format. Only alphanumeric characters, [dot], [colon], [hyphen], and [forward slash] are allowed." + match, err := regexp.MatchString("^[0-9a-zA-Z-.:/]+$", param) + if err != nil { + return err + } if !match { - return nil, fmt.Errorf(invalidResourceErrorTemplate, resource) + return fmt.Errorf(invalidResourceErrorTemplate, param) } + return nil +} + +// GetAzureCLICommand can be used to run arbitrary Azure CLI command +func GetAzureCLICommand() *exec.Cmd { + // This is the path that a developer can set to tell this class what the install path for Azure CLI is. + const azureCLIPath = "AzureCLIPath" + + // The default install paths are used to find Azure CLI. This is for security, so that any path in the calling program's Path environment is not used to execute Azure CLI. + azureCLIDefaultPathWindows := fmt.Sprintf("%s\\Microsoft SDKs\\Azure\\CLI2\\wbin; %s\\Microsoft SDKs\\Azure\\CLI2\\wbin", os.Getenv("ProgramFiles(x86)"), os.Getenv("ProgramFiles")) + + // Default path for non-Windows. + const azureCLIDefaultPath = "/bin:/sbin:/usr/bin:/usr/local/bin" // Execute Azure CLI to get token var cliCmd *exec.Cmd @@ -155,21 +222,6 @@ func GetTokenFromCLI(resource string) (*Token, error) { cliCmd.Env = os.Environ() cliCmd.Env = append(cliCmd.Env, fmt.Sprintf("PATH=%s:%s", os.Getenv(azureCLIPath), azureCLIDefaultPath)) } - cliCmd.Args = append(cliCmd.Args, "account", "get-access-token", "-o", "json", "--resource", resource) - var stderr bytes.Buffer - cliCmd.Stderr = &stderr - - output, err := cliCmd.Output() - if err != nil { - return nil, fmt.Errorf("Invoking Azure CLI failed with the following error: %s", stderr.String()) - } - - tokenResponse := Token{} - err = json.Unmarshal(output, &tokenResponse) - if err != nil { - return nil, err - } - - return &tokenResponse, err + return cliCmd } diff --git a/cluster-autoscaler/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go b/cluster-autoscaler/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go index 1841d146f5f5..f86286051def 100644 --- a/cluster-autoscaler/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go +++ b/cluster-autoscaler/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go @@ -48,8 +48,8 @@ duplication. .Net People, Take note on X509: This uses x509.Certificates and private keys. x509 does not store private keys. .Net -has some x509.Certificate2 thing that has private keys, but that is just some bullcrap that .Net -added, it doesn't exist in real life. As such I've put a PEM decoder into here. +has a x509.Certificate2 abstraction that has private keys, but that just a strange invention. +As such I've put a PEM decoder into here. */ // TODO(msal): This should have example code for each method on client using Go's example doc framework. diff --git a/cluster-autoscaler/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/storage.go b/cluster-autoscaler/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/storage.go index 11263822be1d..2221e60c437f 100644 --- a/cluster-autoscaler/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/storage.go +++ b/cluster-autoscaler/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/storage.go @@ -82,6 +82,39 @@ func isMatchingScopes(scopesOne []string, scopesTwo string) bool { return scopeCounter == len(scopesOne) } +// needsUpgrade returns true if the given key follows the v1.0 schema i.e., +// it contains an uppercase character (v1.1+ keys are all lowercase) +func needsUpgrade(key string) bool { + for _, r := range key { + if 'A' <= r && r <= 'Z' { + return true + } + } + return false +} + +// upgrade a v1.0 cache item by adding a v1.1+ item having the same value and deleting +// the v1.0 item. Callers must hold an exclusive lock on m. +func upgrade[T any](m map[string]T, k string) T { + v1_1Key := strings.ToLower(k) + v, ok := m[k] + if !ok { + // another goroutine did the upgrade while this one was waiting for the write lock + return m[v1_1Key] + } + if v2, ok := m[v1_1Key]; ok { + // cache has an equivalent v1.1+ item, which we prefer because we know it was added + // by a newer version of the module and is therefore more likely to remain valid. + // The v1.0 item may have expired because only v1.0 or earlier would update it. + v = v2 + } else { + // add an equivalent item according to the v1.1 schema + m[v1_1Key] = v + } + delete(m, k) + return v +} + // Read reads a storage token from the cache if it exists. func (m *Manager) Read(ctx context.Context, authParameters authority.AuthParams) (TokenResponse, error) { tr := TokenResponse{} @@ -255,21 +288,25 @@ func (m *Manager) aadMetadata(ctx context.Context, authorityInfo authority.Info) func (m *Manager) readAccessToken(homeID string, envAliases []string, realm, clientID string, scopes []string, tokenType, authnSchemeKeyID string) AccessToken { m.contractMu.RLock() - defer m.contractMu.RUnlock() // TODO: linear search (over a map no less) is slow for a large number (thousands) of tokens. // this shows up as the dominating node in a profile. for real-world scenarios this likely isn't // an issue, however if it does become a problem then we know where to look. - for _, at := range m.contract.AccessTokens { + for k, at := range m.contract.AccessTokens { if at.HomeAccountID == homeID && at.Realm == realm && at.ClientID == clientID { - if (at.TokenType == tokenType && at.AuthnSchemeKeyID == authnSchemeKeyID) || (at.TokenType == "" && (tokenType == "" || tokenType == "Bearer")) { - if checkAlias(at.Environment, envAliases) { - if isMatchingScopes(scopes, at.Scopes) { - return at + if (strings.EqualFold(at.TokenType, tokenType) && at.AuthnSchemeKeyID == authnSchemeKeyID) || (at.TokenType == "" && (tokenType == "" || tokenType == "Bearer")) { + if checkAlias(at.Environment, envAliases) && isMatchingScopes(scopes, at.Scopes) { + m.contractMu.RUnlock() + if needsUpgrade(k) { + m.contractMu.Lock() + defer m.contractMu.Unlock() + at = upgrade(m.contract.AccessTokens, k) } + return at } } } } + m.contractMu.RUnlock() return AccessToken{} } @@ -310,15 +347,21 @@ func (m *Manager) readRefreshToken(homeID string, envAliases []string, familyID, // If app is part of the family or if we DO NOT KNOW if it's part of the family, search by family ID, then by client_id (we will know if an app is part of the family after the first token response). // https://github.com/AzureAD/microsoft-authentication-library-for-dotnet/blob/311fe8b16e7c293462806f397e189a6aa1159769/src/client/Microsoft.Identity.Client/Internal/Requests/Silent/CacheSilentStrategy.cs#L95 m.contractMu.RLock() - defer m.contractMu.RUnlock() for _, matcher := range matchers { - for _, rt := range m.contract.RefreshTokens { + for k, rt := range m.contract.RefreshTokens { if matcher(rt) { + m.contractMu.RUnlock() + if needsUpgrade(k) { + m.contractMu.Lock() + defer m.contractMu.Unlock() + rt = upgrade(m.contract.RefreshTokens, k) + } return rt, nil } } } + m.contractMu.RUnlock() return accesstokens.RefreshToken{}, fmt.Errorf("refresh token not found") } @@ -340,14 +383,20 @@ func (m *Manager) writeRefreshToken(refreshToken accesstokens.RefreshToken) erro func (m *Manager) readIDToken(homeID string, envAliases []string, realm, clientID string) (IDToken, error) { m.contractMu.RLock() - defer m.contractMu.RUnlock() - for _, idt := range m.contract.IDTokens { + for k, idt := range m.contract.IDTokens { if idt.HomeAccountID == homeID && idt.Realm == realm && idt.ClientID == clientID { if checkAlias(idt.Environment, envAliases) { + m.contractMu.RUnlock() + if needsUpgrade(k) { + m.contractMu.Lock() + defer m.contractMu.Unlock() + idt = upgrade(m.contract.IDTokens, k) + } return idt, nil } } } + m.contractMu.RUnlock() return IDToken{}, fmt.Errorf("token not found") } @@ -386,7 +435,6 @@ func (m *Manager) Account(homeAccountID string) shared.Account { func (m *Manager) readAccount(homeAccountID string, envAliases []string, realm string) (shared.Account, error) { m.contractMu.RLock() - defer m.contractMu.RUnlock() // You might ask why, if cache.Accounts is a map, we would loop through all of these instead of using a key. // We only use a map because the storage contract shared between all language implementations says use a map. @@ -394,11 +442,18 @@ func (m *Manager) readAccount(homeAccountID string, envAliases []string, realm s // a match in multiple envs (envAlias). That means we either need to hash each possible keyand do the lookup // or just statically check. Since the design is to have a storage.Manager per user, the amount of keys stored // is really low (say 2). Each hash is more expensive than the entire iteration. - for _, acc := range m.contract.Accounts { + for k, acc := range m.contract.Accounts { if acc.HomeAccountID == homeAccountID && checkAlias(acc.Environment, envAliases) && acc.Realm == realm { + m.contractMu.RUnlock() + if needsUpgrade(k) { + m.contractMu.Lock() + defer m.contractMu.Unlock() + acc = upgrade(m.contract.Accounts, k) + } return acc, nil } } + m.contractMu.RUnlock() return shared.Account{}, fmt.Errorf("account not found") } @@ -412,13 +467,18 @@ func (m *Manager) writeAccount(account shared.Account) error { func (m *Manager) readAppMetaData(envAliases []string, clientID string) (AppMetaData, error) { m.contractMu.RLock() - defer m.contractMu.RUnlock() - - for _, app := range m.contract.AppMetaData { + for k, app := range m.contract.AppMetaData { if checkAlias(app.Environment, envAliases) && app.ClientID == clientID { + m.contractMu.RUnlock() + if needsUpgrade(k) { + m.contractMu.Lock() + defer m.contractMu.Unlock() + app = upgrade(m.contract.AppMetaData, k) + } return app, nil } } + m.contractMu.RUnlock() return AppMetaData{}, fmt.Errorf("not found") } diff --git a/cluster-autoscaler/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go b/cluster-autoscaler/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go index 2221b3d3391f..392e5e43f7dc 100644 --- a/cluster-autoscaler/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go +++ b/cluster-autoscaler/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go @@ -51,7 +51,7 @@ type AuthenticationScheme = authority.AuthenticationScheme type Account = shared.Account -var errNoAccount = errors.New("no account was specified with public.WithAccount(), or the specified account is invalid") +var errNoAccount = errors.New("no account was specified with public.WithSilentAccount(), or the specified account is invalid") // clientOptions configures the Client's behavior. type clientOptions struct { @@ -217,11 +217,13 @@ func WithClaims(claims string) interface { func WithAuthenticationScheme(authnScheme AuthenticationScheme) interface { AcquireSilentOption AcquireInteractiveOption + AcquireByUsernamePasswordOption options.CallOption } { return struct { AcquireSilentOption AcquireInteractiveOption + AcquireByUsernamePasswordOption options.CallOption }{ CallOption: options.NewCallOption( @@ -231,6 +233,8 @@ func WithAuthenticationScheme(authnScheme AuthenticationScheme) interface { t.authnScheme = authnScheme case *interactiveAuthOptions: t.authnScheme = authnScheme + case *acquireTokenByUsernamePasswordOptions: + t.authnScheme = authnScheme default: return fmt.Errorf("unexpected options type %T", a) } @@ -349,6 +353,7 @@ func (pca Client) AcquireTokenSilent(ctx context.Context, scopes []string, opts // acquireTokenByUsernamePasswordOptions contains optional configuration for AcquireTokenByUsernamePassword type acquireTokenByUsernamePasswordOptions struct { claims, tenantID string + authnScheme AuthenticationScheme } // AcquireByUsernamePasswordOption is implemented by options for AcquireTokenByUsernamePassword @@ -374,6 +379,9 @@ func (pca Client) AcquireTokenByUsernamePassword(ctx context.Context, scopes []s authParams.Claims = o.claims authParams.Username = username authParams.Password = password + if o.authnScheme != nil { + authParams.AuthnScheme = o.authnScheme + } token, err := pca.base.Token.UsernamePassword(ctx, authParams) if err != nil { diff --git a/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v5/MIGRATION_GUIDE.md b/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v5/MIGRATION_GUIDE.md index 6ad1c22bbe39..ff9c57e1d844 100644 --- a/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v5/MIGRATION_GUIDE.md +++ b/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v5/MIGRATION_GUIDE.md @@ -17,7 +17,7 @@ and corresponding updates for existing programs. ## Parsing and Validation Options -Under the hood, a new `validator` struct takes care of validating the claims. A +Under the hood, a new `Validator` struct takes care of validating the claims. A long awaited feature has been the option to fine-tune the validation of tokens. This is now possible with several `ParserOption` functions that can be appended to most `Parse` functions, such as `ParseWithClaims`. The most important options @@ -68,6 +68,16 @@ type Claims interface { } ``` +Users that previously directly called the `Valid` function on their claims, +e.g., to perform validation independently of parsing/verifying a token, can now +use the `jwt.NewValidator` function to create a `Validator` independently of the +`Parser`. + +```go +var v = jwt.NewValidator(jwt.WithLeeway(5*time.Second)) +v.Validate(myClaims) +``` + ### Supported Claim Types and Removal of `StandardClaims` The two standard claim types supported by this library, `MapClaims` and @@ -169,7 +179,7 @@ be a drop-in replacement, if you're having troubles migrating, please open an issue. You can replace all occurrences of `github.com/dgrijalva/jwt-go` or -`github.com/golang-jwt/jwt` with `github.com/golang-jwt/jwt/v5`, either manually +`github.com/golang-jwt/jwt` with `github.com/golang-jwt/jwt/v4`, either manually or by using tools such as `sed` or `gofmt`. And then you'd typically run: diff --git a/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v5/ecdsa.go b/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v5/ecdsa.go index 4ccae2a857de..c929e4a02fcb 100644 --- a/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v5/ecdsa.go +++ b/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v5/ecdsa.go @@ -62,7 +62,7 @@ func (m *SigningMethodECDSA) Verify(signingString string, sig []byte, key interf case *ecdsa.PublicKey: ecdsaKey = k default: - return ErrInvalidKeyType + return newError("ECDSA verify expects *ecdsa.PublicKey", ErrInvalidKeyType) } if len(sig) != 2*m.KeySize { @@ -96,7 +96,7 @@ func (m *SigningMethodECDSA) Sign(signingString string, key interface{}) ([]byte case *ecdsa.PrivateKey: ecdsaKey = k default: - return nil, ErrInvalidKeyType + return nil, newError("ECDSA sign expects *ecdsa.PrivateKey", ErrInvalidKeyType) } // Create the hasher diff --git a/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v5/ed25519.go b/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v5/ed25519.go index 3db00e4a233b..c2138119e512 100644 --- a/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v5/ed25519.go +++ b/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v5/ed25519.go @@ -1,11 +1,10 @@ package jwt import ( - "errors" - "crypto" "crypto/ed25519" "crypto/rand" + "errors" ) var ( @@ -39,7 +38,7 @@ func (m *SigningMethodEd25519) Verify(signingString string, sig []byte, key inte var ok bool if ed25519Key, ok = key.(ed25519.PublicKey); !ok { - return ErrInvalidKeyType + return newError("Ed25519 verify expects ed25519.PublicKey", ErrInvalidKeyType) } if len(ed25519Key) != ed25519.PublicKeySize { @@ -61,7 +60,7 @@ func (m *SigningMethodEd25519) Sign(signingString string, key interface{}) ([]by var ok bool if ed25519Key, ok = key.(crypto.Signer); !ok { - return nil, ErrInvalidKeyType + return nil, newError("Ed25519 sign expects crypto.Signer", ErrInvalidKeyType) } if _, ok := ed25519Key.Public().(ed25519.PublicKey); !ok { diff --git a/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v5/errors_go_other.go b/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v5/errors_go_other.go index 3afb04e648fb..2ad542f00ca3 100644 --- a/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v5/errors_go_other.go +++ b/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v5/errors_go_other.go @@ -22,7 +22,7 @@ func (je joinedError) Is(err error) bool { // wrappedErrors is a workaround for wrapping multiple errors in environments // where Go 1.20 is not available. It basically uses the already implemented -// functionatlity of joinedError to handle multiple errors with supplies a +// functionality of joinedError to handle multiple errors with supplies a // custom error message that is identical to the one we produce in Go 1.20 using // multiple %w directives. type wrappedErrors struct { diff --git a/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v5/hmac.go b/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v5/hmac.go index 91b688ba9f11..aca600ce1b00 100644 --- a/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v5/hmac.go +++ b/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v5/hmac.go @@ -59,7 +59,7 @@ func (m *SigningMethodHMAC) Verify(signingString string, sig []byte, key interfa // Verify the key is the right type keyBytes, ok := key.([]byte) if !ok { - return ErrInvalidKeyType + return newError("HMAC verify expects []byte", ErrInvalidKeyType) } // Can we use the specified hashing method? @@ -100,5 +100,5 @@ func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) ([]byte, return hasher.Sum(nil), nil } - return nil, ErrInvalidKeyType + return nil, newError("HMAC sign expects []byte", ErrInvalidKeyType) } diff --git a/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v5/none.go b/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v5/none.go index c93daa584958..685c2ea30655 100644 --- a/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v5/none.go +++ b/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v5/none.go @@ -32,7 +32,7 @@ func (m *signingMethodNone) Verify(signingString string, sig []byte, key interfa return NoneSignatureTypeDisallowedError } // If signing method is none, signature must be an empty string - if string(sig) != "" { + if len(sig) != 0 { return newError("'none' signing method with non-empty signature", ErrTokenUnverifiable) } diff --git a/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v5/parser.go b/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v5/parser.go index f4386fbaace9..ecf99af78f97 100644 --- a/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v5/parser.go +++ b/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v5/parser.go @@ -18,7 +18,7 @@ type Parser struct { // Skip claims validation during token parsing. skipClaimsValidation bool - validator *validator + validator *Validator decodeStrict bool @@ -28,7 +28,7 @@ type Parser struct { // NewParser creates a new Parser with the specified options func NewParser(options ...ParserOption) *Parser { p := &Parser{ - validator: &validator{}, + validator: &Validator{}, } // Loop through our parsing options and apply them @@ -74,24 +74,40 @@ func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyf } } - // Lookup key - var key interface{} + // Decode signature + token.Signature, err = p.DecodeSegment(parts[2]) + if err != nil { + return token, newError("could not base64 decode signature", ErrTokenMalformed, err) + } + text := strings.Join(parts[0:2], ".") + + // Lookup key(s) if keyFunc == nil { // keyFunc was not provided. short circuiting validation return token, newError("no keyfunc was provided", ErrTokenUnverifiable) } - if key, err = keyFunc(token); err != nil { - return token, newError("error while executing keyfunc", ErrTokenUnverifiable, err) - } - // Decode signature - token.Signature, err = p.DecodeSegment(parts[2]) + got, err := keyFunc(token) if err != nil { - return token, newError("could not base64 decode signature", ErrTokenMalformed, err) + return token, newError("error while executing keyfunc", ErrTokenUnverifiable, err) } - // Perform signature validation - if err = token.Method.Verify(strings.Join(parts[0:2], "."), token.Signature, key); err != nil { + switch have := got.(type) { + case VerificationKeySet: + if len(have.Keys) == 0 { + return token, newError("keyfunc returned empty verification key set", ErrTokenUnverifiable) + } + // Iterate through keys and verify signature, skipping the rest when a match is found. + // Return the last error if no match is found. + for _, key := range have.Keys { + if err = token.Method.Verify(text, token.Signature, key); err == nil { + break + } + } + default: + err = token.Method.Verify(text, token.Signature, have) + } + if err != nil { return token, newError("", ErrTokenSignatureInvalid, err) } @@ -99,7 +115,7 @@ func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyf if !p.skipClaimsValidation { // Make sure we have at least a default validator if p.validator == nil { - p.validator = newValidator() + p.validator = NewValidator() } if err := p.validator.Validate(claims); err != nil { @@ -117,8 +133,8 @@ func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyf // // WARNING: Don't use this method unless you know what you're doing. // -// It's only ever useful in cases where you know the signature is valid (because it has -// been checked previously in the stack) and you want to extract values from it. +// It's only ever useful in cases where you know the signature is valid (since it has already +// been or will be checked elsewhere in the stack) and you want to extract values from it. func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Token, parts []string, err error) { parts = strings.Split(tokenString, ".") if len(parts) != 3 { @@ -130,9 +146,6 @@ func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Toke // parse Header var headerBytes []byte if headerBytes, err = p.DecodeSegment(parts[0]); err != nil { - if strings.HasPrefix(strings.ToLower(tokenString), "bearer ") { - return token, parts, newError("tokenstring should not contain 'bearer '", ErrTokenMalformed) - } return token, parts, newError("could not base64 decode header", ErrTokenMalformed, err) } if err = json.Unmarshal(headerBytes, &token.Header); err != nil { @@ -140,23 +153,33 @@ func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Toke } // parse Claims - var claimBytes []byte token.Claims = claims - if claimBytes, err = p.DecodeSegment(parts[1]); err != nil { + claimBytes, err := p.DecodeSegment(parts[1]) + if err != nil { return token, parts, newError("could not base64 decode claim", ErrTokenMalformed, err) } - dec := json.NewDecoder(bytes.NewBuffer(claimBytes)) - if p.useJSONNumber { - dec.UseNumber() - } - // JSON Decode. Special case for map type to avoid weird pointer behavior - if c, ok := token.Claims.(MapClaims); ok { - err = dec.Decode(&c) + + // If `useJSONNumber` is enabled then we must use *json.Decoder to decode + // the claims. However, this comes with a performance penalty so only use + // it if we must and, otherwise, simple use json.Unmarshal. + if !p.useJSONNumber { + // JSON Unmarshal. Special case for map type to avoid weird pointer behavior. + if c, ok := token.Claims.(MapClaims); ok { + err = json.Unmarshal(claimBytes, &c) + } else { + err = json.Unmarshal(claimBytes, &claims) + } } else { - err = dec.Decode(&claims) + dec := json.NewDecoder(bytes.NewBuffer(claimBytes)) + dec.UseNumber() + // JSON Decode. Special case for map type to avoid weird pointer behavior. + if c, ok := token.Claims.(MapClaims); ok { + err = dec.Decode(&c) + } else { + err = dec.Decode(&claims) + } } - // Handle decode error if err != nil { return token, parts, newError("could not JSON decode claim", ErrTokenMalformed, err) } diff --git a/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v5/parser_option.go b/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v5/parser_option.go index 1b5af970f66a..88a780fbd4a2 100644 --- a/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v5/parser_option.go +++ b/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v5/parser_option.go @@ -58,6 +58,14 @@ func WithIssuedAt() ParserOption { } } +// WithExpirationRequired returns the ParserOption to make exp claim required. +// By default exp claim is optional. +func WithExpirationRequired() ParserOption { + return func(p *Parser) { + p.validator.requireExp = true + } +} + // WithAudience configures the validator to require the specified audience in // the `aud` claim. Validation will fail if the audience is not listed in the // token or the `aud` claim is missing. diff --git a/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v5/rsa.go b/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v5/rsa.go index daff094313d8..83cbee6ae2ba 100644 --- a/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v5/rsa.go +++ b/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v5/rsa.go @@ -51,7 +51,7 @@ func (m *SigningMethodRSA) Verify(signingString string, sig []byte, key interfac var ok bool if rsaKey, ok = key.(*rsa.PublicKey); !ok { - return ErrInvalidKeyType + return newError("RSA verify expects *rsa.PublicKey", ErrInvalidKeyType) } // Create hasher @@ -73,7 +73,7 @@ func (m *SigningMethodRSA) Sign(signingString string, key interface{}) ([]byte, // Validate type of key if rsaKey, ok = key.(*rsa.PrivateKey); !ok { - return nil, ErrInvalidKey + return nil, newError("RSA sign expects *rsa.PrivateKey", ErrInvalidKeyType) } // Create the hasher diff --git a/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v5/rsa_pss.go b/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v5/rsa_pss.go index 9599f0a46c00..28c386ec43aa 100644 --- a/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v5/rsa_pss.go +++ b/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v5/rsa_pss.go @@ -88,7 +88,7 @@ func (m *SigningMethodRSAPSS) Verify(signingString string, sig []byte, key inter case *rsa.PublicKey: rsaKey = k default: - return ErrInvalidKey + return newError("RSA-PSS verify expects *rsa.PublicKey", ErrInvalidKeyType) } // Create hasher @@ -115,7 +115,7 @@ func (m *SigningMethodRSAPSS) Sign(signingString string, key interface{}) ([]byt case *rsa.PrivateKey: rsaKey = k default: - return nil, ErrInvalidKeyType + return nil, newError("RSA-PSS sign expects *rsa.PrivateKey", ErrInvalidKeyType) } // Create the hasher diff --git a/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v5/token.go b/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v5/token.go index c8ad7c7834de..352873a2d9c2 100644 --- a/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v5/token.go +++ b/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v5/token.go @@ -1,6 +1,7 @@ package jwt import ( + "crypto" "encoding/base64" "encoding/json" ) @@ -9,8 +10,21 @@ import ( // the key for verification. The function receives the parsed, but unverified // Token. This allows you to use properties in the Header of the token (such as // `kid`) to identify which key to use. +// +// The returned interface{} may be a single key or a VerificationKeySet containing +// multiple keys. type Keyfunc func(*Token) (interface{}, error) +// VerificationKey represents a public or secret key for verifying a token's signature. +type VerificationKey interface { + crypto.PublicKey | []uint8 +} + +// VerificationKeySet is a set of public or secret keys. It is used by the parser to verify a token. +type VerificationKeySet struct { + Keys []VerificationKey +} + // Token represents a JWT Token. Different fields will be used depending on // whether you're creating or parsing/verifying a token. type Token struct { diff --git a/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v5/types.go b/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v5/types.go index b82b38867d0d..b2655a9e6d26 100644 --- a/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v5/types.go +++ b/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v5/types.go @@ -4,7 +4,6 @@ import ( "encoding/json" "fmt" "math" - "reflect" "strconv" "time" ) @@ -121,14 +120,14 @@ func (s *ClaimStrings) UnmarshalJSON(data []byte) (err error) { for _, vv := range v { vs, ok := vv.(string) if !ok { - return &json.UnsupportedTypeError{Type: reflect.TypeOf(vv)} + return ErrInvalidType } aud = append(aud, vs) } case nil: return nil default: - return &json.UnsupportedTypeError{Type: reflect.TypeOf(v)} + return ErrInvalidType } *s = aud diff --git a/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v5/validator.go b/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v5/validator.go index 3850438939d0..008ecd8712ec 100644 --- a/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v5/validator.go +++ b/cluster-autoscaler/vendor/github.com/golang-jwt/jwt/v5/validator.go @@ -28,13 +28,12 @@ type ClaimsValidator interface { Validate() error } -// validator is the core of the new Validation API. It is automatically used by +// Validator is the core of the new Validation API. It is automatically used by // a [Parser] during parsing and can be modified with various parser options. // -// Note: This struct is intentionally not exported (yet) as we want to -// internally finalize its API. In the future, we might make it publicly -// available. -type validator struct { +// The [NewValidator] function should be used to create an instance of this +// struct. +type Validator struct { // leeway is an optional leeway that can be provided to account for clock skew. leeway time.Duration @@ -42,6 +41,9 @@ type validator struct { // validation. If unspecified, this defaults to time.Now. timeFunc func() time.Time + // requireExp specifies whether the exp claim is required + requireExp bool + // verifyIat specifies whether the iat (Issued At) claim will be verified. // According to https://www.rfc-editor.org/rfc/rfc7519#section-4.1.6 this // only specifies the age of the token, but no validation check is @@ -62,16 +64,28 @@ type validator struct { expectedSub string } -// newValidator can be used to create a stand-alone validator with the supplied +// NewValidator can be used to create a stand-alone validator with the supplied // options. This validator can then be used to validate already parsed claims. -func newValidator(opts ...ParserOption) *validator { +// +// Note: Under normal circumstances, explicitly creating a validator is not +// needed and can potentially be dangerous; instead functions of the [Parser] +// class should be used. +// +// The [Validator] is only checking the *validity* of the claims, such as its +// expiration time, but it does NOT perform *signature verification* of the +// token. +func NewValidator(opts ...ParserOption) *Validator { p := NewParser(opts...) return p.validator } // Validate validates the given claims. It will also perform any custom // validation if claims implements the [ClaimsValidator] interface. -func (v *validator) Validate(claims Claims) error { +// +// Note: It will NOT perform any *signature verification* on the token that +// contains the claims and expects that the [Claim] was already successfully +// verified. +func (v *Validator) Validate(claims Claims) error { var ( now time.Time errs []error = make([]error, 0, 6) @@ -86,8 +100,9 @@ func (v *validator) Validate(claims Claims) error { } // We always need to check the expiration time, but usage of the claim - // itself is OPTIONAL. - if err = v.verifyExpiresAt(claims, now, false); err != nil { + // itself is OPTIONAL by default. requireExp overrides this behavior + // and makes the exp claim mandatory. + if err = v.verifyExpiresAt(claims, now, v.requireExp); err != nil { errs = append(errs, err) } @@ -149,7 +164,7 @@ func (v *validator) Validate(claims Claims) error { // // Additionally, if any error occurs while retrieving the claim, e.g., when its // the wrong type, an ErrTokenUnverifiable error will be returned. -func (v *validator) verifyExpiresAt(claims Claims, cmp time.Time, required bool) error { +func (v *Validator) verifyExpiresAt(claims Claims, cmp time.Time, required bool) error { exp, err := claims.GetExpirationTime() if err != nil { return err @@ -170,7 +185,7 @@ func (v *validator) verifyExpiresAt(claims Claims, cmp time.Time, required bool) // // Additionally, if any error occurs while retrieving the claim, e.g., when its // the wrong type, an ErrTokenUnverifiable error will be returned. -func (v *validator) verifyIssuedAt(claims Claims, cmp time.Time, required bool) error { +func (v *Validator) verifyIssuedAt(claims Claims, cmp time.Time, required bool) error { iat, err := claims.GetIssuedAt() if err != nil { return err @@ -191,7 +206,7 @@ func (v *validator) verifyIssuedAt(claims Claims, cmp time.Time, required bool) // // Additionally, if any error occurs while retrieving the claim, e.g., when its // the wrong type, an ErrTokenUnverifiable error will be returned. -func (v *validator) verifyNotBefore(claims Claims, cmp time.Time, required bool) error { +func (v *Validator) verifyNotBefore(claims Claims, cmp time.Time, required bool) error { nbf, err := claims.GetNotBefore() if err != nil { return err @@ -211,7 +226,7 @@ func (v *validator) verifyNotBefore(claims Claims, cmp time.Time, required bool) // // Additionally, if any error occurs while retrieving the claim, e.g., when its // the wrong type, an ErrTokenUnverifiable error will be returned. -func (v *validator) verifyAudience(claims Claims, cmp string, required bool) error { +func (v *Validator) verifyAudience(claims Claims, cmp string, required bool) error { aud, err := claims.GetAudience() if err != nil { return err @@ -247,7 +262,7 @@ func (v *validator) verifyAudience(claims Claims, cmp string, required bool) err // // Additionally, if any error occurs while retrieving the claim, e.g., when its // the wrong type, an ErrTokenUnverifiable error will be returned. -func (v *validator) verifyIssuer(claims Claims, cmp string, required bool) error { +func (v *Validator) verifyIssuer(claims Claims, cmp string, required bool) error { iss, err := claims.GetIssuer() if err != nil { return err @@ -267,7 +282,7 @@ func (v *validator) verifyIssuer(claims Claims, cmp string, required bool) error // // Additionally, if any error occurs while retrieving the claim, e.g., when its // the wrong type, an ErrTokenUnverifiable error will be returned. -func (v *validator) verifySubject(claims Claims, cmp string, required bool) error { +func (v *Validator) verifySubject(claims Claims, cmp string, required bool) error { sub, err := claims.GetSubject() if err != nil { return err diff --git a/cluster-autoscaler/vendor/github.com/google/uuid/CHANGELOG.md b/cluster-autoscaler/vendor/github.com/google/uuid/CHANGELOG.md index c9fb829dc64f..7ec5ac7ea909 100644 --- a/cluster-autoscaler/vendor/github.com/google/uuid/CHANGELOG.md +++ b/cluster-autoscaler/vendor/github.com/google/uuid/CHANGELOG.md @@ -1,5 +1,18 @@ # Changelog +## [1.6.0](https://github.com/google/uuid/compare/v1.5.0...v1.6.0) (2024-01-16) + + +### Features + +* add Max UUID constant ([#149](https://github.com/google/uuid/issues/149)) ([c58770e](https://github.com/google/uuid/commit/c58770eb495f55fe2ced6284f93c5158a62e53e3)) + + +### Bug Fixes + +* fix typo in version 7 uuid documentation ([#153](https://github.com/google/uuid/issues/153)) ([016b199](https://github.com/google/uuid/commit/016b199544692f745ffc8867b914129ecb47ef06)) +* Monotonicity in UUIDv7 ([#150](https://github.com/google/uuid/issues/150)) ([a2b2b32](https://github.com/google/uuid/commit/a2b2b32373ff0b1a312b7fdf6d38a977099698a6)) + ## [1.5.0](https://github.com/google/uuid/compare/v1.4.0...v1.5.0) (2023-12-12) diff --git a/cluster-autoscaler/vendor/github.com/google/uuid/hash.go b/cluster-autoscaler/vendor/github.com/google/uuid/hash.go index b404f4bec274..dc60082d3b3b 100644 --- a/cluster-autoscaler/vendor/github.com/google/uuid/hash.go +++ b/cluster-autoscaler/vendor/github.com/google/uuid/hash.go @@ -17,6 +17,12 @@ var ( NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8")) NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8")) Nil UUID // empty UUID, all zeros + + // The Max UUID is special form of UUID that is specified to have all 128 bits set to 1. + Max = UUID{ + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + } ) // NewHash returns a new UUID derived from the hash of space concatenated with diff --git a/cluster-autoscaler/vendor/github.com/google/uuid/version7.go b/cluster-autoscaler/vendor/github.com/google/uuid/version7.go index ba9dd5eb689b..3167b643d459 100644 --- a/cluster-autoscaler/vendor/github.com/google/uuid/version7.go +++ b/cluster-autoscaler/vendor/github.com/google/uuid/version7.go @@ -44,7 +44,7 @@ func NewV7FromReader(r io.Reader) (UUID, error) { // makeV7 fill 48 bits time (uuid[0] - uuid[5]), set version b0111 (uuid[6]) // uuid[8] already has the right version number (Variant is 10) -// see function NewV7 and NewV7FromReader +// see function NewV7 and NewV7FromReader func makeV7(uuid []byte) { /* 0 1 2 3 @@ -52,7 +52,7 @@ func makeV7(uuid []byte) { +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | unix_ts_ms | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | unix_ts_ms | ver | rand_a | + | unix_ts_ms | ver | rand_a (12 bit seq) | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |var| rand_b | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ @@ -61,7 +61,7 @@ func makeV7(uuid []byte) { */ _ = uuid[15] // bounds check - t := timeNow().UnixMilli() + t, s := getV7Time() uuid[0] = byte(t >> 40) uuid[1] = byte(t >> 32) @@ -70,6 +70,35 @@ func makeV7(uuid []byte) { uuid[4] = byte(t >> 8) uuid[5] = byte(t) - uuid[6] = 0x70 | (uuid[6] & 0x0F) - // uuid[8] has already has right version + uuid[6] = 0x70 | (0x0F & byte(s>>8)) + uuid[7] = byte(s) +} + +// lastV7time is the last time we returned stored as: +// +// 52 bits of time in milliseconds since epoch +// 12 bits of (fractional nanoseconds) >> 8 +var lastV7time int64 + +const nanoPerMilli = 1000000 + +// getV7Time returns the time in milliseconds and nanoseconds / 256. +// The returned (milli << 12 + seq) is guarenteed to be greater than +// (milli << 12 + seq) returned by any previous call to getV7Time. +func getV7Time() (milli, seq int64) { + timeMu.Lock() + defer timeMu.Unlock() + + nano := timeNow().UnixNano() + milli = nano / nanoPerMilli + // Sequence number is between 0 and 3906 (nanoPerMilli>>8) + seq = (nano - milli*nanoPerMilli) >> 8 + now := milli<<12 + seq + if now <= lastV7time { + now = lastV7time + 1 + milli = now >> 12 + seq = now & 0xfff + } + lastV7time = now + return milli, seq } diff --git a/cluster-autoscaler/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md b/cluster-autoscaler/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md index ec91408f9903..44222220a383 100644 --- a/cluster-autoscaler/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md +++ b/cluster-autoscaler/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md @@ -1,3 +1,81 @@ +## 2.17.1 + +### Fixes +- If the user sets --seed=0, make sure all parallel nodes get the same seed [af0330d] + +## 2.17.0 + +### Features + +- add `--github-output` for nicer output in github actions [e8a2056] + +### Maintenance + +- fix typo in core_dsl.go [977bc6f] +- Fix typo in docs [e297e7b] + +## 2.16.0 + +### Features +- add SpecContext to reporting nodes + +### Fixes +- merge coverages instead of combining them (#1329) (#1340) [23f0cc5] +- core_dsl: disable Getwd() with environment variable (#1357) [cd418b7] + +### Maintenance +- docs/index.md: Typo [2cebe8d] +- fix docs [06de431] +- chore: test with Go 1.22 (#1352) [898cba9] +- Bump golang.org/x/tools from 0.16.1 to 0.17.0 (#1336) [17ae120] +- Bump golang.org/x/sys from 0.15.0 to 0.16.0 (#1327) [5a179ed] +- Bump github.com/go-logr/logr from 1.3.0 to 1.4.1 (#1321) [a1e6b69] +- Bump github-pages and jekyll-feed in /docs (#1351) [d52951d] +- Fix docs for handling failures in goroutines (#1339) [4471b2e] + +## 2.15.0 + +### Features + +- JUnit reports now interpret Label(owner:X) and set owner to X. [8f3bd70] +- include cancellation reason when cancelling spec context [96e915c] + +### Fixes + +- emit output of failed go tool cover invocation so users can try to debug things for themselves [c245d09] +- fix outline when using nodot in ginkgo v2 [dca77c8] +- Document areas where GinkgoT() behaves differently from testing.T [dbaf18f] +- bugfix(docs): use Unsetenv instead of Clearenv (#1337) [6f67a14] + +### Maintenance + +- Bump to go 1.20 [4fcd0b3] + +## 2.14.0 + +### Features +You can now use `GinkgoTB()` when you need an instance of `testing.TB` to pass to a library. + +Prior to this release table testing only supported generating individual `It`s for each test entry. `DescribeTableSubtree` extends table testing support to entire testing subtrees - under the hood `DescrieTableSubtree` generates a new container for each entry and invokes your function to fill our the container. See the [docs](https://onsi.github.io/ginkgo/#generating-subtree-tables) to learn more. + +- Introduce DescribeTableSubtree [65ec56d] +- add GinkgoTB() to docs [4a2c832] +- Add GinkgoTB() function (#1333) [92b6744] + +### Fixes +- Fix typo in internal/suite.go (#1332) [beb9507] +- Fix typo in docs/index.md (#1319) [4ac3a13] +- allow wasm to compile with ginkgo present (#1311) [b2e5bc5] + +### Maintenance +- Bump golang.org/x/tools from 0.16.0 to 0.16.1 (#1316) [465a8ec] +- Bump actions/setup-go from 4 to 5 (#1313) [eab0e40] +- Bump github/codeql-action from 2 to 3 (#1317) [fbf9724] +- Bump golang.org/x/crypto (#1318) [3ee80ee] +- Bump golang.org/x/tools from 0.14.0 to 0.16.0 (#1306) [123e1d5] +- Bump github.com/onsi/gomega from 1.29.0 to 1.30.0 (#1297) [558f6e0] +- Bump golang.org/x/net from 0.17.0 to 0.19.0 (#1307) [84ff7f3] + ## 2.13.2 ### Fixes diff --git a/cluster-autoscaler/vendor/github.com/onsi/ginkgo/v2/core_dsl.go b/cluster-autoscaler/vendor/github.com/onsi/ginkgo/v2/core_dsl.go index 2d7a70eccbb0..a3e8237e938b 100644 --- a/cluster-autoscaler/vendor/github.com/onsi/ginkgo/v2/core_dsl.go +++ b/cluster-autoscaler/vendor/github.com/onsi/ginkgo/v2/core_dsl.go @@ -292,7 +292,7 @@ func RunSpecs(t GinkgoTestingT, description string, args ...interface{}) bool { err = global.Suite.BuildTree() exitIfErr(err) - suitePath, err := os.Getwd() + suitePath, err := getwd() exitIfErr(err) suitePath, err = filepath.Abs(suitePath) exitIfErr(err) @@ -345,6 +345,15 @@ func extractSuiteConfiguration(args []interface{}) Labels { return suiteLabels } +func getwd() (string, error) { + if !strings.EqualFold(os.Getenv("GINKGO_PRESERVE_CACHE"), "true") { + // Getwd calls os.Getenv("PWD"), which breaks test caching if the cache + // is shared between two different directories with the same test code. + return os.Getwd() + } + return "", nil +} + /* PreviewSpecs walks the testing tree and produces a report without actually invoking the specs. See http://onsi.github.io/ginkgo/#previewing-specs for more information. @@ -369,7 +378,7 @@ func PreviewSpecs(description string, args ...any) Report { err = global.Suite.BuildTree() exitIfErr(err) - suitePath, err := os.Getwd() + suitePath, err := getwd() exitIfErr(err) suitePath, err = filepath.Abs(suitePath) exitIfErr(err) @@ -783,8 +792,8 @@ DeferCleanup can be passed: For example: BeforeEach(func() { - DeferCleanup(os.SetEnv, "FOO", os.GetEnv("FOO")) - os.SetEnv("FOO", "BAR") + DeferCleanup(os.Setenv, "FOO", os.GetEnv("FOO")) + os.Setenv("FOO", "BAR") }) will register a cleanup handler that will set the environment variable "FOO" to its current value (obtained by os.GetEnv("FOO")) after the spec runs and then sets the environment variable "FOO" to "BAR" for the current spec. diff --git a/cluster-autoscaler/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/gocovmerge.go b/cluster-autoscaler/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/gocovmerge.go new file mode 100644 index 000000000000..3c5079ff4c74 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/gocovmerge.go @@ -0,0 +1,129 @@ +// Copyright (c) 2015, Wade Simmons +// All rights reserved. + +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: + +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. + +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package gocovmerge takes the results from multiple `go test -coverprofile` +// runs and merges them into one profile + +// this file was originally taken from the gocovmerge project +// see also: https://go.shabbyrobe.org/gocovmerge +package internal + +import ( + "fmt" + "io" + "sort" + + "golang.org/x/tools/cover" +) + +func AddCoverProfile(profiles []*cover.Profile, p *cover.Profile) []*cover.Profile { + i := sort.Search(len(profiles), func(i int) bool { return profiles[i].FileName >= p.FileName }) + if i < len(profiles) && profiles[i].FileName == p.FileName { + MergeCoverProfiles(profiles[i], p) + } else { + profiles = append(profiles, nil) + copy(profiles[i+1:], profiles[i:]) + profiles[i] = p + } + return profiles +} + +func DumpCoverProfiles(profiles []*cover.Profile, out io.Writer) error { + if len(profiles) == 0 { + return nil + } + if _, err := fmt.Fprintf(out, "mode: %s\n", profiles[0].Mode); err != nil { + return err + } + for _, p := range profiles { + for _, b := range p.Blocks { + if _, err := fmt.Fprintf(out, "%s:%d.%d,%d.%d %d %d\n", p.FileName, b.StartLine, b.StartCol, b.EndLine, b.EndCol, b.NumStmt, b.Count); err != nil { + return err + } + } + } + return nil +} + +func MergeCoverProfiles(into *cover.Profile, merge *cover.Profile) error { + if into.Mode != merge.Mode { + return fmt.Errorf("cannot merge profiles with different modes") + } + // Since the blocks are sorted, we can keep track of where the last block + // was inserted and only look at the blocks after that as targets for merge + startIndex := 0 + for _, b := range merge.Blocks { + var err error + startIndex, err = mergeProfileBlock(into, b, startIndex) + if err != nil { + return err + } + } + return nil +} + +func mergeProfileBlock(p *cover.Profile, pb cover.ProfileBlock, startIndex int) (int, error) { + sortFunc := func(i int) bool { + pi := p.Blocks[i+startIndex] + return pi.StartLine >= pb.StartLine && (pi.StartLine != pb.StartLine || pi.StartCol >= pb.StartCol) + } + + i := 0 + if sortFunc(i) != true { + i = sort.Search(len(p.Blocks)-startIndex, sortFunc) + } + + i += startIndex + if i < len(p.Blocks) && p.Blocks[i].StartLine == pb.StartLine && p.Blocks[i].StartCol == pb.StartCol { + if p.Blocks[i].EndLine != pb.EndLine || p.Blocks[i].EndCol != pb.EndCol { + return i, fmt.Errorf("gocovmerge: overlapping merge %v %v %v", p.FileName, p.Blocks[i], pb) + } + switch p.Mode { + case "set": + p.Blocks[i].Count |= pb.Count + case "count", "atomic": + p.Blocks[i].Count += pb.Count + default: + return i, fmt.Errorf("gocovmerge: unsupported covermode '%s'", p.Mode) + } + + } else { + if i > 0 { + pa := p.Blocks[i-1] + if pa.EndLine >= pb.EndLine && (pa.EndLine != pb.EndLine || pa.EndCol > pb.EndCol) { + return i, fmt.Errorf("gocovmerge: overlap before %v %v %v", p.FileName, pa, pb) + } + } + if i < len(p.Blocks)-1 { + pa := p.Blocks[i+1] + if pa.StartLine <= pb.StartLine && (pa.StartLine != pb.StartLine || pa.StartCol < pb.StartCol) { + return i, fmt.Errorf("gocovmerge: overlap after %v %v %v", p.FileName, pa, pb) + } + } + p.Blocks = append(p.Blocks, cover.ProfileBlock{}) + copy(p.Blocks[i+1:], p.Blocks[i:]) + p.Blocks[i] = pb + } + + return i + 1, nil +} diff --git a/cluster-autoscaler/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go b/cluster-autoscaler/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go index bd3c6d0287a5..5f35864ddba7 100644 --- a/cluster-autoscaler/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go +++ b/cluster-autoscaler/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go @@ -1,7 +1,6 @@ package internal import ( - "bytes" "fmt" "os" "os/exec" @@ -12,6 +11,7 @@ import ( "github.com/google/pprof/profile" "github.com/onsi/ginkgo/v2/reporters" "github.com/onsi/ginkgo/v2/types" + "golang.org/x/tools/cover" ) func AbsPathForGeneratedAsset(assetName string, suite TestSuite, cliConfig types.CLIConfig, process int) string { @@ -144,38 +144,26 @@ func FinalizeProfilesAndReportsForSuites(suites TestSuites, cliConfig types.CLIC return messages, nil } -//loads each profile, combines them, deletes them, stores them in destination +// loads each profile, merges them, deletes them, stores them in destination func MergeAndCleanupCoverProfiles(profiles []string, destination string) error { - combined := &bytes.Buffer{} - modeRegex := regexp.MustCompile(`^mode: .*\n`) - for i, profile := range profiles { - contents, err := os.ReadFile(profile) + var merged []*cover.Profile + for _, file := range profiles { + parsedProfiles, err := cover.ParseProfiles(file) if err != nil { - return fmt.Errorf("Unable to read coverage file %s:\n%s", profile, err.Error()) + return err } - os.Remove(profile) - - // remove the cover mode line from every file - // except the first one - if i > 0 { - contents = modeRegex.ReplaceAll(contents, []byte{}) - } - - _, err = combined.Write(contents) - - // Add a newline to the end of every file if missing. - if err == nil && len(contents) > 0 && contents[len(contents)-1] != '\n' { - _, err = combined.Write([]byte("\n")) - } - - if err != nil { - return fmt.Errorf("Unable to append to coverprofile:\n%s", err.Error()) + os.Remove(file) + for _, p := range parsedProfiles { + merged = AddCoverProfile(merged, p) } } - - err := os.WriteFile(destination, combined.Bytes(), 0666) + dst, err := os.OpenFile(destination, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + if err != nil { + return err + } + err = DumpCoverProfiles(merged, dst) if err != nil { - return fmt.Errorf("Unable to create combined cover profile:\n%s", err.Error()) + return err } return nil } @@ -184,7 +172,7 @@ func GetCoverageFromCoverProfile(profile string) (float64, error) { cmd := exec.Command("go", "tool", "cover", "-func", profile) output, err := cmd.CombinedOutput() if err != nil { - return 0, fmt.Errorf("Could not process Coverprofile %s: %s", profile, err.Error()) + return 0, fmt.Errorf("Could not process Coverprofile %s: %s - %s", profile, err.Error(), string(output)) } re := regexp.MustCompile(`total:\s*\(statements\)\s*(\d*\.\d*)\%`) matches := re.FindStringSubmatch(string(output)) diff --git a/cluster-autoscaler/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go b/cluster-autoscaler/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go index 958daccbfa8c..5d8d00bb17f4 100644 --- a/cluster-autoscaler/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go +++ b/cluster-autoscaler/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go @@ -1,10 +1,11 @@ package outline import ( - "github.com/onsi/ginkgo/v2/types" "go/ast" "go/token" "strconv" + + "github.com/onsi/ginkgo/v2/types" ) const ( diff --git a/cluster-autoscaler/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go b/cluster-autoscaler/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go index 67ec5ab75798..f0a6b5d26cd0 100644 --- a/cluster-autoscaler/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go +++ b/cluster-autoscaler/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go @@ -28,14 +28,7 @@ func packageNameForImport(f *ast.File, path string) *string { } name := spec.Name.String() if name == "" { - // If the package name is not explicitly specified, - // make an educated guess. This is not guaranteed to be correct. - lastSlash := strings.LastIndex(path, "/") - if lastSlash == -1 { - name = path - } else { - name = path[lastSlash+1:] - } + name = "ginkgo" } if name == "." { name = "" diff --git a/cluster-autoscaler/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go b/cluster-autoscaler/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go index 28447ffdd261..02c6739e5be7 100644 --- a/cluster-autoscaler/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go +++ b/cluster-autoscaler/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go @@ -1,7 +1,10 @@ package ginkgo import ( + "testing" + "github.com/onsi/ginkgo/v2/internal/testingtproxy" + "github.com/onsi/ginkgo/v2/types" ) /* @@ -12,10 +15,15 @@ GinkgoT() is analogous to *testing.T and implements the majority of *testing.T's GinkgoT() takes an optional offset argument that can be used to get the correct line number associated with the failure - though you do not need to use this if you call GinkgoHelper() or GinkgoT().Helper() appropriately +GinkgoT() attempts to mimic the behavior of `testing.T` with the exception of the following: + +- Error/Errorf: failures in Ginkgo always immediately stop execution and there is no mechanism to log a failure without aborting the test. As such Error/Errorf are equivalent to Fatal/Fatalf. +- Parallel() is a no-op as Ginkgo's multi-process parallelism model is substantially different from go test's in-process model. + You can learn more here: https://onsi.github.io/ginkgo/#using-third-party-libraries */ func GinkgoT(optionalOffset ...int) FullGinkgoTInterface { - offset := 3 + offset := 1 if len(optionalOffset) > 0 { offset = optionalOffset[0] } @@ -41,21 +49,21 @@ The portion of the interface returned by GinkgoT() that maps onto methods in the type GinkgoTInterface interface { Cleanup(func()) Setenv(kev, value string) - Error(args ...interface{}) - Errorf(format string, args ...interface{}) + Error(args ...any) + Errorf(format string, args ...any) Fail() FailNow() Failed() bool - Fatal(args ...interface{}) - Fatalf(format string, args ...interface{}) + Fatal(args ...any) + Fatalf(format string, args ...any) Helper() - Log(args ...interface{}) - Logf(format string, args ...interface{}) + Log(args ...any) + Logf(format string, args ...any) Name() string Parallel() - Skip(args ...interface{}) + Skip(args ...any) SkipNow() - Skipf(format string, args ...interface{}) + Skipf(format string, args ...any) Skipped() bool TempDir() string } @@ -71,9 +79,9 @@ type FullGinkgoTInterface interface { AddReportEntryVisibilityNever(name string, args ...any) //Prints to the GinkgoWriter - Print(a ...interface{}) - Printf(format string, a ...interface{}) - Println(a ...interface{}) + Print(a ...any) + Printf(format string, a ...any) + Println(a ...any) //Provides access to Ginkgo's color formatting, correctly configured to match the color settings specified in the invocation of ginkgo F(format string, args ...any) string @@ -92,3 +100,81 @@ type FullGinkgoTInterface interface { AttachProgressReporter(func() string) func() } + +/* +GinkgoTB() implements a wrapper that exactly matches the testing.TB interface. + +In go 1.18 a new private() function was added to the testing.TB interface. Any function which accepts testing.TB as input needs to be passed in something that directly implements testing.TB. + +This wrapper satisfies the testing.TB interface and intended to be used as a drop-in replacement with third party libraries that accept testing.TB. + +Similar to GinkgoT(), GinkgoTB() takes an optional offset argument that can be used to get the +correct line number associated with the failure - though you do not need to use this if you call GinkgoHelper() or GinkgoT().Helper() appropriately +*/ +func GinkgoTB(optionalOffset ...int) *GinkgoTBWrapper { + offset := 2 + if len(optionalOffset) > 0 { + offset = optionalOffset[0] + } + return &GinkgoTBWrapper{GinkgoT: GinkgoT(offset)} +} + +type GinkgoTBWrapper struct { + testing.TB + GinkgoT FullGinkgoTInterface +} + +func (g *GinkgoTBWrapper) Cleanup(f func()) { + g.GinkgoT.Cleanup(f) +} +func (g *GinkgoTBWrapper) Error(args ...any) { + g.GinkgoT.Error(args...) +} +func (g *GinkgoTBWrapper) Errorf(format string, args ...any) { + g.GinkgoT.Errorf(format, args...) +} +func (g *GinkgoTBWrapper) Fail() { + g.GinkgoT.Fail() +} +func (g *GinkgoTBWrapper) FailNow() { + g.GinkgoT.FailNow() +} +func (g *GinkgoTBWrapper) Failed() bool { + return g.GinkgoT.Failed() +} +func (g *GinkgoTBWrapper) Fatal(args ...any) { + g.GinkgoT.Fatal(args...) +} +func (g *GinkgoTBWrapper) Fatalf(format string, args ...any) { + g.GinkgoT.Fatalf(format, args...) +} +func (g *GinkgoTBWrapper) Helper() { + types.MarkAsHelper(1) +} +func (g *GinkgoTBWrapper) Log(args ...any) { + g.GinkgoT.Log(args...) +} +func (g *GinkgoTBWrapper) Logf(format string, args ...any) { + g.GinkgoT.Logf(format, args...) +} +func (g *GinkgoTBWrapper) Name() string { + return g.GinkgoT.Name() +} +func (g *GinkgoTBWrapper) Setenv(key, value string) { + g.GinkgoT.Setenv(key, value) +} +func (g *GinkgoTBWrapper) Skip(args ...any) { + g.GinkgoT.Skip(args...) +} +func (g *GinkgoTBWrapper) SkipNow() { + g.GinkgoT.SkipNow() +} +func (g *GinkgoTBWrapper) Skipf(format string, args ...any) { + g.GinkgoT.Skipf(format, args...) +} +func (g *GinkgoTBWrapper) Skipped() bool { + return g.GinkgoT.Skipped() +} +func (g *GinkgoTBWrapper) TempDir() string { + return g.GinkgoT.TempDir() +} diff --git a/cluster-autoscaler/vendor/github.com/onsi/ginkgo/v2/internal/node.go b/cluster-autoscaler/vendor/github.com/onsi/ginkgo/v2/internal/node.go index 16f0dc227825..6a15f19ae04a 100644 --- a/cluster-autoscaler/vendor/github.com/onsi/ginkgo/v2/internal/node.go +++ b/cluster-autoscaler/vendor/github.com/onsi/ginkgo/v2/internal/node.go @@ -5,9 +5,8 @@ import ( "fmt" "reflect" "sort" - "time" - "sync" + "time" "github.com/onsi/ginkgo/v2/types" ) @@ -16,8 +15,8 @@ var _global_node_id_counter = uint(0) var _global_id_mutex = &sync.Mutex{} func UniqueNodeID() uint { - //There's a reace in the internal integration tests if we don't make - //accessing _global_node_id_counter safe across goroutines. + // There's a reace in the internal integration tests if we don't make + // accessing _global_node_id_counter safe across goroutines. _global_id_mutex.Lock() defer _global_id_mutex.Unlock() _global_node_id_counter += 1 @@ -44,8 +43,8 @@ type Node struct { SynchronizedAfterSuiteProc1Body func(SpecContext) SynchronizedAfterSuiteProc1BodyHasContext bool - ReportEachBody func(types.SpecReport) - ReportSuiteBody func(types.Report) + ReportEachBody func(SpecContext, types.SpecReport) + ReportSuiteBody func(SpecContext, types.Report) MarkedFocus bool MarkedPending bool @@ -209,7 +208,7 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy args = unrollInterfaceSlice(args) remainingArgs := []interface{}{} - //First get the CodeLocation up-to-date + // First get the CodeLocation up-to-date for _, arg := range args { switch v := arg.(type) { case Offset: @@ -225,11 +224,11 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy trackedFunctionError := false args = remainingArgs remainingArgs = []interface{}{} - //now process the rest of the args + // now process the rest of the args for _, arg := range args { switch t := reflect.TypeOf(arg); { case t == reflect.TypeOf(float64(0)): - break //ignore deprecated timeouts + break // ignore deprecated timeouts case t == reflect.TypeOf(Focus): node.MarkedFocus = bool(arg.(focusType)) if !nodeType.Is(types.NodeTypesForContainerAndIt) { @@ -325,7 +324,12 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy node.Body = func(SpecContext) { body() } } else if nodeType.Is(types.NodeTypeReportBeforeEach | types.NodeTypeReportAfterEach) { if node.ReportEachBody == nil { - node.ReportEachBody = arg.(func(types.SpecReport)) + if fn, ok := arg.(func(types.SpecReport)); ok { + node.ReportEachBody = func(_ SpecContext, r types.SpecReport) { fn(r) } + } else { + node.ReportEachBody = arg.(func(SpecContext, types.SpecReport)) + node.HasContext = true + } } else { appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType)) trackedFunctionError = true @@ -333,7 +337,12 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy } } else if nodeType.Is(types.NodeTypeReportBeforeSuite | types.NodeTypeReportAfterSuite) { if node.ReportSuiteBody == nil { - node.ReportSuiteBody = arg.(func(types.Report)) + if fn, ok := arg.(func(types.Report)); ok { + node.ReportSuiteBody = func(_ SpecContext, r types.Report) { fn(r) } + } else { + node.ReportSuiteBody = arg.(func(SpecContext, types.Report)) + node.HasContext = true + } } else { appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType)) trackedFunctionError = true @@ -395,7 +404,7 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy } } - //validations + // validations if node.MarkedPending && node.MarkedFocus { appendError(types.GinkgoErrors.InvalidDeclarationOfFocusedAndPending(node.CodeLocation, nodeType)) } diff --git a/cluster-autoscaler/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_wasm.go b/cluster-autoscaler/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_wasm.go new file mode 100644 index 000000000000..4c374935b8a5 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_wasm.go @@ -0,0 +1,7 @@ +//go:build wasm + +package internal + +func NewOutputInterceptor() OutputInterceptor { + return &NoopOutputInterceptor{} +} diff --git a/cluster-autoscaler/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_wasm.go b/cluster-autoscaler/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_wasm.go new file mode 100644 index 000000000000..8c53fe0adad4 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_wasm.go @@ -0,0 +1,10 @@ +//go:build wasm + +package internal + +import ( + "os" + "syscall" +) + +var PROGRESS_SIGNALS = []os.Signal{syscall.SIGUSR1} diff --git a/cluster-autoscaler/vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go b/cluster-autoscaler/vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go index 2515b84a140c..2d2ea2fc3577 100644 --- a/cluster-autoscaler/vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go +++ b/cluster-autoscaler/vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go @@ -17,7 +17,7 @@ type specContext struct { context.Context *ProgressReporterManager - cancel context.CancelFunc + cancel context.CancelCauseFunc suite *Suite } @@ -30,7 +30,7 @@ Note that while SpecContext is used to enforce deadlines by Ginkgo it is not con This is because Ginkgo needs finer control over when the context is canceled. Specifically, Ginkgo needs to generate a ProgressReport before it cancels the context to ensure progress is captured where the spec is currently running. The only way to avoid a race here is to manually control the cancellation. */ func NewSpecContext(suite *Suite) *specContext { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancelCause(context.Background()) sc := &specContext{ cancel: cancel, suite: suite, diff --git a/cluster-autoscaler/vendor/github.com/onsi/ginkgo/v2/internal/suite.go b/cluster-autoscaler/vendor/github.com/onsi/ginkgo/v2/internal/suite.go index fe6e8288ad90..a994ee3d67e5 100644 --- a/cluster-autoscaler/vendor/github.com/onsi/ginkgo/v2/internal/suite.go +++ b/cluster-autoscaler/vendor/github.com/onsi/ginkgo/v2/internal/suite.go @@ -79,7 +79,7 @@ func NewSuite() *Suite { func (suite *Suite) Clone() (*Suite, error) { if suite.phase != PhaseBuildTopLevel { - return nil, fmt.Errorf("cnanot clone suite after tree has been built") + return nil, fmt.Errorf("cannot clone suite after tree has been built") } return &Suite{ tree: &TreeNode{}, @@ -594,8 +594,8 @@ func (suite *Suite) reportEach(spec Spec, nodeType types.NodeType) { suite.writer.Truncate() suite.outputInterceptor.StartInterceptingOutput() report := suite.currentSpecReport - nodes[i].Body = func(SpecContext) { - nodes[i].ReportEachBody(report) + nodes[i].Body = func(ctx SpecContext) { + nodes[i].ReportEachBody(ctx, report) } state, failure := suite.runNode(nodes[i], time.Time{}, spec.Nodes.BestTextFor(nodes[i])) @@ -762,7 +762,7 @@ func (suite *Suite) runReportSuiteNode(node Node, report types.Report) { report = report.Add(aggregatedReport) } - node.Body = func(SpecContext) { node.ReportSuiteBody(report) } + node.Body = func(ctx SpecContext) { node.ReportSuiteBody(ctx, report) } suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "") suite.currentSpecReport.EndTime = time.Now() @@ -840,7 +840,7 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ timeoutInPlay = "node" } if (!deadline.IsZero() && deadline.Before(now)) || interruptStatus.Interrupted() { - //we're out of time already. let's wait for a NodeTimeout if we have it, or GracePeriod if we don't + // we're out of time already. let's wait for a NodeTimeout if we have it, or GracePeriod if we don't if node.NodeTimeout > 0 { deadline = now.Add(node.NodeTimeout) timeoutInPlay = "node" @@ -858,7 +858,7 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ } sc := NewSpecContext(suite) - defer sc.cancel() + defer sc.cancel(fmt.Errorf("spec has finished")) suite.selectiveLock.Lock() suite.currentSpecContext = sc @@ -918,9 +918,9 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ if outcomeFromRun != types.SpecStatePassed { additionalFailure := types.AdditionalFailure{ State: outcomeFromRun, - Failure: failure, //we make a copy - this will include all the configuration set up above... + Failure: failure, // we make a copy - this will include all the configuration set up above... } - //...and then we update the failure with the details from failureFromRun + // ...and then we update the failure with the details from failureFromRun additionalFailure.Failure.Location, additionalFailure.Failure.ForwardedPanic, additionalFailure.Failure.TimelineLocation = failureFromRun.Location, failureFromRun.ForwardedPanic, failureFromRun.TimelineLocation additionalFailure.Failure.ProgressReport = types.ProgressReport{} if outcome == types.SpecStateTimedout { @@ -958,8 +958,8 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ // tell the spec to stop. it's important we generate the progress report first to make sure we capture where // the spec is actually stuck - sc.cancel() - //and now we wait for the grace period + sc.cancel(fmt.Errorf("%s timeout occurred", timeoutInPlay)) + // and now we wait for the grace period gracePeriodChannel = time.After(gracePeriod) case <-interruptStatus.Channel: interruptStatus = suite.interruptHandler.Status() @@ -985,7 +985,7 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ } progressReport = progressReport.WithoutOtherGoroutines() - sc.cancel() + sc.cancel(fmt.Errorf(interruptStatus.Message())) if interruptStatus.Level == interrupt_handler.InterruptLevelBailOut { if interruptStatus.ShouldIncludeProgressReport() { diff --git a/cluster-autoscaler/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go b/cluster-autoscaler/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go index 56b7be758796..4026859ec397 100644 --- a/cluster-autoscaler/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go +++ b/cluster-autoscaler/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go @@ -182,6 +182,22 @@ func (r *DefaultReporter) WillRun(report types.SpecReport) { r.emitBlock(r.f(r.codeLocationBlock(report, "{{/}}", v.Is(types.VerbosityLevelVeryVerbose), false))) } +func (r *DefaultReporter) wrapTextBlock(sectionName string, fn func()) { + r.emitBlock("\n") + if r.conf.GithubOutput { + r.emitBlock(r.fi(1, "::group::%s", sectionName)) + } else { + r.emitBlock(r.fi(1, "{{gray}}%s >>{{/}}", sectionName)) + } + fn() + if r.conf.GithubOutput { + r.emitBlock(r.fi(1, "::endgroup::")) + } else { + r.emitBlock(r.fi(1, "{{gray}}<< %s{{/}}", sectionName)) + } + +} + func (r *DefaultReporter) DidRun(report types.SpecReport) { v := r.conf.Verbosity() inParallel := report.RunningInParallel @@ -283,26 +299,23 @@ func (r *DefaultReporter) DidRun(report types.SpecReport) { //Emit Stdout/Stderr Output if showSeparateStdSection { - r.emitBlock("\n") - r.emitBlock(r.fi(1, "{{gray}}Captured StdOut/StdErr Output >>{{/}}")) - r.emitBlock(r.fi(1, "%s", report.CapturedStdOutErr)) - r.emitBlock(r.fi(1, "{{gray}}<< Captured StdOut/StdErr Output{{/}}")) + r.wrapTextBlock("Captured StdOut/StdErr Output", func() { + r.emitBlock(r.fi(1, "%s", report.CapturedStdOutErr)) + }) } if showSeparateVisibilityAlwaysReportsSection { - r.emitBlock("\n") - r.emitBlock(r.fi(1, "{{gray}}Report Entries >>{{/}}")) - for _, entry := range report.ReportEntries.WithVisibility(types.ReportEntryVisibilityAlways) { - r.emitReportEntry(1, entry) - } - r.emitBlock(r.fi(1, "{{gray}}<< Report Entries{{/}}")) + r.wrapTextBlock("Report Entries", func() { + for _, entry := range report.ReportEntries.WithVisibility(types.ReportEntryVisibilityAlways) { + r.emitReportEntry(1, entry) + } + }) } if showTimeline { - r.emitBlock("\n") - r.emitBlock(r.fi(1, "{{gray}}Timeline >>{{/}}")) - r.emitTimeline(1, report, timeline) - r.emitBlock(r.fi(1, "{{gray}}<< Timeline{{/}}")) + r.wrapTextBlock("Timeline", func() { + r.emitTimeline(1, report, timeline) + }) } // Emit Failure Message @@ -405,7 +418,11 @@ func (r *DefaultReporter) emitShortFailure(indent uint, state types.SpecState, f func (r *DefaultReporter) emitFailure(indent uint, state types.SpecState, failure types.Failure, includeAdditionalFailure bool) { highlightColor := r.highlightColorForState(state) r.emitBlock(r.fi(indent, highlightColor+"[%s] %s{{/}}", r.humanReadableState(state), failure.Message)) - r.emitBlock(r.fi(indent, highlightColor+"In {{bold}}[%s]{{/}}"+highlightColor+" at: {{bold}}%s{{/}} {{gray}}@ %s{{/}}\n", failure.FailureNodeType, failure.Location, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) + if r.conf.GithubOutput { + r.emitBlock(r.fi(indent, "::error file=%s,line=%d::%s %s", failure.Location.FileName, failure.Location.LineNumber, failure.FailureNodeType, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) + } else { + r.emitBlock(r.fi(indent, highlightColor+"In {{bold}}[%s]{{/}}"+highlightColor+" at: {{bold}}%s{{/}} {{gray}}@ %s{{/}}\n", failure.FailureNodeType, failure.Location, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) + } if failure.ForwardedPanic != "" { r.emitBlock("\n") r.emitBlock(r.fi(indent, highlightColor+"%s{{/}}", failure.ForwardedPanic)) diff --git a/cluster-autoscaler/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go b/cluster-autoscaler/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go index 816042208c08..43244a9bd519 100644 --- a/cluster-autoscaler/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go +++ b/cluster-autoscaler/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go @@ -15,6 +15,7 @@ import ( "fmt" "os" "path" + "regexp" "strings" "github.com/onsi/ginkgo/v2/config" @@ -104,6 +105,8 @@ type JUnitProperty struct { Value string `xml:"value,attr"` } +var ownerRE = regexp.MustCompile(`(?i)^owner:(.*)$`) + type JUnitTestCase struct { // Name maps onto the full text of the spec - equivalent to "[SpecReport.LeafNodeType] SpecReport.FullText()" Name string `xml:"name,attr"` @@ -113,6 +116,8 @@ type JUnitTestCase struct { Status string `xml:"status,attr"` // Time is the time in seconds to execute the spec - maps onto SpecReport.RunTime Time float64 `xml:"time,attr"` + // Owner is the owner the spec - is set if a label matching Label("owner:X") is provided. The last matching label is used as the owner, thereby allowing specs to override owners specified in container nodes. + Owner string `xml:"owner,attr,omitempty"` //Skipped is populated with a message if the test was skipped or pending Skipped *JUnitSkipped `xml:"skipped,omitempty"` //Error is populated if the test panicked or was interrupted @@ -195,6 +200,12 @@ func GenerateJUnitReportWithConfig(report types.Report, dst string, config Junit if len(labels) > 0 && !config.OmitSpecLabels { name = name + " [" + strings.Join(labels, ", ") + "]" } + owner := "" + for _, label := range labels { + if matches := ownerRE.FindStringSubmatch(label); len(matches) == 2 { + owner = matches[1] + } + } name = strings.TrimSpace(name) test := JUnitTestCase{ @@ -202,6 +213,7 @@ func GenerateJUnitReportWithConfig(report types.Report, dst string, config Junit Classname: report.SuiteDescription, Status: spec.State.String(), Time: spec.RunTime.Seconds(), + Owner: owner, } if !spec.State.Is(config.OmitTimelinesForSpecState) { test.SystemErr = systemErrForUnstructuredReporters(spec) diff --git a/cluster-autoscaler/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go b/cluster-autoscaler/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go index f33786a2d653..aa1a35176aa3 100644 --- a/cluster-autoscaler/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go +++ b/cluster-autoscaler/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go @@ -74,12 +74,21 @@ func AddReportEntry(name string, args ...interface{}) { /* ReportBeforeEach nodes are run for each spec, even if the spec is skipped or pending. ReportBeforeEach nodes take a function that -receives a SpecReport. They are called before the spec starts. +receives a SpecReport or both SpecContext and Report for interruptible behavior. They are called before the spec starts. + +Example: + + ReportBeforeEach(func(report SpecReport) { // process report }) + ReportBeforeEach(func(ctx SpecContext, report SpecReport) { + // process report + }), NodeTimeout(1 * time.Minute)) You cannot nest any other Ginkgo nodes within a ReportBeforeEach node's closure. You can learn more about ReportBeforeEach here: https://onsi.github.io/ginkgo/#generating-reports-programmatically + +You can learn about interruptible nodes here: https://onsi.github.io/ginkgo/#spec-timeouts-and-interruptible-nodes */ -func ReportBeforeEach(body func(SpecReport), args ...interface{}) bool { +func ReportBeforeEach(body any, args ...any) bool { combinedArgs := []interface{}{body} combinedArgs = append(combinedArgs, args...) @@ -87,13 +96,23 @@ func ReportBeforeEach(body func(SpecReport), args ...interface{}) bool { } /* -ReportAfterEach nodes are run for each spec, even if the spec is skipped or pending. ReportAfterEach nodes take a function that -receives a SpecReport. They are called after the spec has completed and receive the final report for the spec. +ReportAfterEach nodes are run for each spec, even if the spec is skipped or pending. +ReportAfterEach nodes take a function that receives a SpecReport or both SpecContext and Report for interruptible behavior. +They are called after the spec has completed and receive the final report for the spec. + +Example: + + ReportAfterEach(func(report SpecReport) { // process report }) + ReportAfterEach(func(ctx SpecContext, report SpecReport) { + // process report + }), NodeTimeout(1 * time.Minute)) You cannot nest any other Ginkgo nodes within a ReportAfterEach node's closure. You can learn more about ReportAfterEach here: https://onsi.github.io/ginkgo/#generating-reports-programmatically + +You can learn about interruptible nodes here: https://onsi.github.io/ginkgo/#spec-timeouts-and-interruptible-nodes */ -func ReportAfterEach(body func(SpecReport), args ...interface{}) bool { +func ReportAfterEach(body any, args ...any) bool { combinedArgs := []interface{}{body} combinedArgs = append(combinedArgs, args...) @@ -101,7 +120,15 @@ func ReportAfterEach(body func(SpecReport), args ...interface{}) bool { } /* -ReportBeforeSuite nodes are run at the beginning of the suite. ReportBeforeSuite nodes take a function that receives a suite Report. +ReportBeforeSuite nodes are run at the beginning of the suite. ReportBeforeSuite nodes take a function +that can either receive Report or both SpecContext and Report for interruptible behavior. + +Example Usage: + + ReportBeforeSuite(func(r Report) { // process report }) + ReportBeforeSuite(func(ctx SpecContext, r Report) { + // process report + }, NodeTimeout(1 * time.Minute)) They are called at the beginning of the suite, before any specs have run and any BeforeSuite or SynchronizedBeforeSuite nodes, and are passed in the initial report for the suite. ReportBeforeSuite nodes must be created at the top-level (i.e. not nested in a Context/Describe/When node) @@ -112,18 +139,28 @@ You cannot nest any other Ginkgo nodes within a ReportAfterSuite node's closure. You can learn more about ReportAfterSuite here: https://onsi.github.io/ginkgo/#generating-reports-programmatically You can learn more about Ginkgo's reporting infrastructure, including generating reports with the CLI here: https://onsi.github.io/ginkgo/#generating-machine-readable-reports + +You can learn about interruptible nodes here: https://onsi.github.io/ginkgo/#spec-timeouts-and-interruptible-nodes */ -func ReportBeforeSuite(body func(Report), args ...interface{}) bool { +func ReportBeforeSuite(body any, args ...any) bool { combinedArgs := []interface{}{body} combinedArgs = append(combinedArgs, args...) return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportBeforeSuite, "", combinedArgs...)) } /* -ReportAfterSuite nodes are run at the end of the suite. ReportAfterSuite nodes take a function that receives a suite Report. +ReportAfterSuite nodes are run at the end of the suite. ReportAfterSuite nodes execute at the suite's conclusion, +and accept a function that can either receive Report or both SpecContext and Report for interruptible behavior. + +Example Usage: + + ReportAfterSuite("Non-interruptible ReportAfterSuite", func(r Report) { // process report }) + ReportAfterSuite("Interruptible ReportAfterSuite", func(ctx SpecContext, r Report) { + // process report + }, NodeTimeout(1 * time.Minute)) They are called at the end of the suite, after all specs have run and any AfterSuite or SynchronizedAfterSuite nodes, and are passed in the final report for the suite. -ReportAftersuite nodes must be created at the top-level (i.e. not nested in a Context/Describe/When node) +ReportAfterSuite nodes must be created at the top-level (i.e. not nested in a Context/Describe/When node) When running in parallel, Ginkgo ensures that only one of the parallel nodes runs the ReportAfterSuite and that it is passed a report that is aggregated across all parallel nodes @@ -134,8 +171,10 @@ You cannot nest any other Ginkgo nodes within a ReportAfterSuite node's closure. You can learn more about ReportAfterSuite here: https://onsi.github.io/ginkgo/#generating-reports-programmatically You can learn more about Ginkgo's reporting infrastructure, including generating reports with the CLI here: https://onsi.github.io/ginkgo/#generating-machine-readable-reports + +You can learn about interruptible nodes here: https://onsi.github.io/ginkgo/#spec-timeouts-and-interruptible-nodes */ -func ReportAfterSuite(text string, body func(Report), args ...interface{}) bool { +func ReportAfterSuite(text string, body any, args ...interface{}) bool { combinedArgs := []interface{}{body} combinedArgs = append(combinedArgs, args...) return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportAfterSuite, text, combinedArgs...)) diff --git a/cluster-autoscaler/vendor/github.com/onsi/ginkgo/v2/table_dsl.go b/cluster-autoscaler/vendor/github.com/onsi/ginkgo/v2/table_dsl.go index ac9b7abb5ee2..a3aef821bff4 100644 --- a/cluster-autoscaler/vendor/github.com/onsi/ginkgo/v2/table_dsl.go +++ b/cluster-autoscaler/vendor/github.com/onsi/ginkgo/v2/table_dsl.go @@ -46,7 +46,7 @@ And can explore some Table patterns here: https://onsi.github.io/ginkgo/#table-s */ func DescribeTable(description string, args ...interface{}) bool { GinkgoHelper() - generateTable(description, args...) + generateTable(description, false, args...) return true } @@ -56,7 +56,7 @@ You can focus a table with `FDescribeTable`. This is equivalent to `FDescribe`. func FDescribeTable(description string, args ...interface{}) bool { GinkgoHelper() args = append(args, internal.Focus) - generateTable(description, args...) + generateTable(description, false, args...) return true } @@ -66,7 +66,7 @@ You can mark a table as pending with `PDescribeTable`. This is equivalent to `P func PDescribeTable(description string, args ...interface{}) bool { GinkgoHelper() args = append(args, internal.Pending) - generateTable(description, args...) + generateTable(description, false, args...) return true } @@ -75,6 +75,71 @@ You can mark a table as pending with `XDescribeTable`. This is equivalent to `X */ var XDescribeTable = PDescribeTable +/* +DescribeTableSubtree describes a table-driven spec that generates a set of tests for each entry. + +For example: + + DescribeTableSubtree("a subtree table", + func(url string, code int, message string) { + var resp *http.Response + BeforeEach(func() { + var err error + resp, err = http.Get(url) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(resp.Body.Close) + }) + + It("should return the expected status code", func() { + Expect(resp.StatusCode).To(Equal(code)) + }) + + It("should return the expected message", func() { + body, err := ioutil.ReadAll(resp.Body) + Expect(err).NotTo(HaveOccurred()) + Expect(string(body)).To(Equal(message)) + }) + }, + Entry("default response", "example.com/response", http.StatusOK, "hello world"), + Entry("missing response", "example.com/missing", http.StatusNotFound, "wat?"), + ) + +Note that you **must** place define an It inside the body function. + +You can learn more about DescribeTableSubtree here: https://onsi.github.io/ginkgo/#table-specs +And can explore some Table patterns here: https://onsi.github.io/ginkgo/#table-specs-patterns +*/ +func DescribeTableSubtree(description string, args ...interface{}) bool { + GinkgoHelper() + generateTable(description, true, args...) + return true +} + +/* +You can focus a table with `FDescribeTableSubtree`. This is equivalent to `FDescribe`. +*/ +func FDescribeTableSubtree(description string, args ...interface{}) bool { + GinkgoHelper() + args = append(args, internal.Focus) + generateTable(description, true, args...) + return true +} + +/* +You can mark a table as pending with `PDescribeTableSubtree`. This is equivalent to `PDescribe`. +*/ +func PDescribeTableSubtree(description string, args ...interface{}) bool { + GinkgoHelper() + args = append(args, internal.Pending) + generateTable(description, true, args...) + return true +} + +/* +You can mark a table as pending with `XDescribeTableSubtree`. This is equivalent to `XDescribe`. +*/ +var XDescribeTableSubtree = PDescribeTableSubtree + /* TableEntry represents an entry in a table test. You generally use the `Entry` constructor. */ @@ -131,14 +196,14 @@ var XEntry = PEntry var contextType = reflect.TypeOf(new(context.Context)).Elem() var specContextType = reflect.TypeOf(new(SpecContext)).Elem() -func generateTable(description string, args ...interface{}) { +func generateTable(description string, isSubtree bool, args ...interface{}) { GinkgoHelper() cl := types.NewCodeLocation(0) containerNodeArgs := []interface{}{cl} entries := []TableEntry{} - var itBody interface{} - var itBodyType reflect.Type + var internalBody interface{} + var internalBodyType reflect.Type var tableLevelEntryDescription interface{} tableLevelEntryDescription = func(args ...interface{}) string { @@ -166,11 +231,11 @@ func generateTable(description string, args ...interface{}) { case t.Kind() == reflect.Func && t.NumOut() == 1 && t.Out(0) == reflect.TypeOf(""): tableLevelEntryDescription = arg case t.Kind() == reflect.Func: - if itBody != nil { + if internalBody != nil { exitIfErr(types.GinkgoErrors.MultipleEntryBodyFunctionsForTable(cl)) } - itBody = arg - itBodyType = reflect.TypeOf(itBody) + internalBody = arg + internalBodyType = reflect.TypeOf(internalBody) default: containerNodeArgs = append(containerNodeArgs, arg) } @@ -200,39 +265,47 @@ func generateTable(description string, args ...interface{}) { err = types.GinkgoErrors.InvalidEntryDescription(entry.codeLocation) } - itNodeArgs := []interface{}{entry.codeLocation} - itNodeArgs = append(itNodeArgs, entry.decorations...) + internalNodeArgs := []interface{}{entry.codeLocation} + internalNodeArgs = append(internalNodeArgs, entry.decorations...) hasContext := false - if itBodyType.NumIn() > 0. { - if itBodyType.In(0).Implements(specContextType) { + if internalBodyType.NumIn() > 0. { + if internalBodyType.In(0).Implements(specContextType) { hasContext = true - } else if itBodyType.In(0).Implements(contextType) && (len(entry.parameters) == 0 || !reflect.TypeOf(entry.parameters[0]).Implements(contextType)) { + } else if internalBodyType.In(0).Implements(contextType) && (len(entry.parameters) == 0 || !reflect.TypeOf(entry.parameters[0]).Implements(contextType)) { hasContext = true } } if err == nil { - err = validateParameters(itBody, entry.parameters, "Table Body function", entry.codeLocation, hasContext) + err = validateParameters(internalBody, entry.parameters, "Table Body function", entry.codeLocation, hasContext) } if hasContext { - itNodeArgs = append(itNodeArgs, func(c SpecContext) { + internalNodeArgs = append(internalNodeArgs, func(c SpecContext) { if err != nil { panic(err) } - invokeFunction(itBody, append([]interface{}{c}, entry.parameters...)) + invokeFunction(internalBody, append([]interface{}{c}, entry.parameters...)) }) + if isSubtree { + exitIfErr(types.GinkgoErrors.ContextsCannotBeUsedInSubtreeTables(cl)) + } } else { - itNodeArgs = append(itNodeArgs, func() { + internalNodeArgs = append(internalNodeArgs, func() { if err != nil { panic(err) } - invokeFunction(itBody, entry.parameters) + invokeFunction(internalBody, entry.parameters) }) } - pushNode(internal.NewNode(deprecationTracker, types.NodeTypeIt, description, itNodeArgs...)) + internalNodeType := types.NodeTypeIt + if isSubtree { + internalNodeType = types.NodeTypeContainer + } + + pushNode(internal.NewNode(deprecationTracker, internalNodeType, description, internalNodeArgs...)) } }) diff --git a/cluster-autoscaler/vendor/github.com/onsi/ginkgo/v2/types/config.go b/cluster-autoscaler/vendor/github.com/onsi/ginkgo/v2/types/config.go index c88fc85a75fb..cef273ee1ff9 100644 --- a/cluster-autoscaler/vendor/github.com/onsi/ginkgo/v2/types/config.go +++ b/cluster-autoscaler/vendor/github.com/onsi/ginkgo/v2/types/config.go @@ -89,6 +89,7 @@ type ReporterConfig struct { VeryVerbose bool FullTrace bool ShowNodeEvents bool + GithubOutput bool JSONReport string JUnitReport string @@ -264,7 +265,7 @@ var FlagSections = GinkgoFlagSections{ // SuiteConfigFlags provides flags for the Ginkgo test process, and CLI var SuiteConfigFlags = GinkgoFlags{ {KeyPath: "S.RandomSeed", Name: "seed", SectionKey: "order", UsageDefaultValue: "randomly generated by Ginkgo", - Usage: "The seed used to randomize the spec suite."}, + Usage: "The seed used to randomize the spec suite.", AlwaysExport: true}, {KeyPath: "S.RandomizeAllSpecs", Name: "randomize-all", SectionKey: "order", DeprecatedName: "randomizeAllSpecs", DeprecatedDocLink: "changed-command-line-flags", Usage: "If set, ginkgo will randomize all specs together. By default, ginkgo only randomizes the top level Describe, Context and When containers."}, @@ -331,6 +332,8 @@ var ReporterConfigFlags = GinkgoFlags{ Usage: "If set, default reporter prints out the full stack trace when a failure occurs"}, {KeyPath: "R.ShowNodeEvents", Name: "show-node-events", SectionKey: "output", Usage: "If set, default reporter prints node > Enter and < Exit events when specs fail"}, + {KeyPath: "R.GithubOutput", Name: "github-output", SectionKey: "output", + Usage: "If set, default reporter prints easier to manage output in Github Actions."}, {KeyPath: "R.JSONReport", Name: "json-report", UsageArgument: "filename.json", SectionKey: "output", Usage: "If set, Ginkgo will generate a JSON-formatted test report at the specified location."}, diff --git a/cluster-autoscaler/vendor/github.com/onsi/ginkgo/v2/types/errors.go b/cluster-autoscaler/vendor/github.com/onsi/ginkgo/v2/types/errors.go index 4fbdc3e9b1d3..6bb72d00ccda 100644 --- a/cluster-autoscaler/vendor/github.com/onsi/ginkgo/v2/types/errors.go +++ b/cluster-autoscaler/vendor/github.com/onsi/ginkgo/v2/types/errors.go @@ -505,6 +505,15 @@ func (g ginkgoErrors) IncorrectVariadicParameterTypeToTableFunction(expected, ac } } +func (g ginkgoErrors) ContextsCannotBeUsedInSubtreeTables(cl CodeLocation) error { + return GinkgoError{ + Heading: "Contexts cannot be used in subtree tables", + Message: "You''ve defined a subtree body function that accepts a context but did not provide one in the table entry. Ginkgo SpecContexts can only be passed in to subject and setup nodes - so if you are trying to implement a spec timeout you should request a context in the It function within your subtree body function, not in the subtree body function itself.", + CodeLocation: cl, + DocLink: "table-specs", + } +} + /* Parallel Synchronization errors */ func (g ginkgoErrors) AggregatedReportUnavailableDueToNodeDisappearing() error { diff --git a/cluster-autoscaler/vendor/github.com/onsi/ginkgo/v2/types/flags.go b/cluster-autoscaler/vendor/github.com/onsi/ginkgo/v2/types/flags.go index 9186ae873d0e..de69f3022def 100644 --- a/cluster-autoscaler/vendor/github.com/onsi/ginkgo/v2/types/flags.go +++ b/cluster-autoscaler/vendor/github.com/onsi/ginkgo/v2/types/flags.go @@ -24,7 +24,8 @@ type GinkgoFlag struct { DeprecatedDocLink string DeprecatedVersion string - ExportAs string + ExportAs string + AlwaysExport bool } type GinkgoFlags []GinkgoFlag @@ -431,7 +432,7 @@ func (ssv stringSliceVar) Set(s string) error { return nil } -//given a set of GinkgoFlags and bindings, generate flag arguments suitable to be passed to an application with that set of flags configured. +// given a set of GinkgoFlags and bindings, generate flag arguments suitable to be passed to an application with that set of flags configured. func GenerateFlagArgs(flags GinkgoFlags, bindings interface{}) ([]string, error) { result := []string{} for _, flag := range flags { @@ -451,19 +452,19 @@ func GenerateFlagArgs(flags GinkgoFlags, bindings interface{}) ([]string, error) iface := value.Interface() switch value.Type() { case reflect.TypeOf(string("")): - if iface.(string) != "" { + if iface.(string) != "" || flag.AlwaysExport { result = append(result, fmt.Sprintf("--%s=%s", name, iface)) } case reflect.TypeOf(int64(0)): - if iface.(int64) != 0 { + if iface.(int64) != 0 || flag.AlwaysExport { result = append(result, fmt.Sprintf("--%s=%d", name, iface)) } case reflect.TypeOf(float64(0)): - if iface.(float64) != 0 { + if iface.(float64) != 0 || flag.AlwaysExport { result = append(result, fmt.Sprintf("--%s=%f", name, iface)) } case reflect.TypeOf(int(0)): - if iface.(int) != 0 { + if iface.(int) != 0 || flag.AlwaysExport { result = append(result, fmt.Sprintf("--%s=%d", name, iface)) } case reflect.TypeOf(bool(true)): @@ -471,7 +472,7 @@ func GenerateFlagArgs(flags GinkgoFlags, bindings interface{}) ([]string, error) result = append(result, fmt.Sprintf("--%s", name)) } case reflect.TypeOf(time.Duration(0)): - if iface.(time.Duration) != time.Duration(0) { + if iface.(time.Duration) != time.Duration(0) || flag.AlwaysExport { result = append(result, fmt.Sprintf("--%s=%s", name, iface)) } diff --git a/cluster-autoscaler/vendor/github.com/onsi/ginkgo/v2/types/version.go b/cluster-autoscaler/vendor/github.com/onsi/ginkgo/v2/types/version.go index a4a1524b4fd0..851d42b456b8 100644 --- a/cluster-autoscaler/vendor/github.com/onsi/ginkgo/v2/types/version.go +++ b/cluster-autoscaler/vendor/github.com/onsi/ginkgo/v2/types/version.go @@ -1,3 +1,3 @@ package types -const VERSION = "2.13.2" +const VERSION = "2.17.1" diff --git a/cluster-autoscaler/vendor/github.com/onsi/gomega/CHANGELOG.md b/cluster-autoscaler/vendor/github.com/onsi/gomega/CHANGELOG.md index fe72a7b183fc..01ec5245cdc6 100644 --- a/cluster-autoscaler/vendor/github.com/onsi/gomega/CHANGELOG.md +++ b/cluster-autoscaler/vendor/github.com/onsi/gomega/CHANGELOG.md @@ -1,3 +1,41 @@ +## 1.32.0 + +### Maintenance +- Migrate github.com/golang/protobuf to google.golang.org/protobuf [436a197] + + This release drops the deprecated github.com/golang/protobuf and adopts google.golang.org/protobuf. Care was taken to ensure the release is backwards compatible (thanks @jbduncan !). Please open an issue if you run into one. + +- chore: test with Go 1.22 (#733) [32ef35e] +- Bump golang.org/x/net from 0.19.0 to 0.20.0 (#717) [a0d0387] +- Bump github-pages and jekyll-feed in /docs (#732) [b71e477] +- docs: fix typo and broken anchor link to gstruct [f460154] +- docs: fix HaveEach matcher signature [a2862e4] + +## 1.31.1 + +### Fixes +- Inverted arguments order of FailureMessage of BeComparableToMatcher [e0dd999] +- Update test in case keeping msg is desired [ad1a367] + +### Maintenance +- Show how to import the format sub package [24e958d] +- tidy up go.sum [26661b8] +- bump dependencies [bde8f7a] + +## 1.31.0 + +### Features +- Async assertions include context cancellation cause if present [121c37f] + +### Maintenance +- Bump minimum go version [dee1e3c] +- docs: fix typo in example usage "occured" -> "occurred" [49005fe] +- Bump actions/setup-go from 4 to 5 (#714) [f1c8757] +- Bump github/codeql-action from 2 to 3 (#715) [9836e76] +- Bump github.com/onsi/ginkgo/v2 from 2.13.0 to 2.13.2 (#713) [54726f0] +- Bump golang.org/x/net from 0.17.0 to 0.19.0 (#711) [df97ecc] +- docs: fix `HaveExactElement` typo (#712) [a672c86] + ## 1.30.0 ### Features diff --git a/cluster-autoscaler/vendor/github.com/onsi/gomega/gomega_dsl.go b/cluster-autoscaler/vendor/github.com/onsi/gomega/gomega_dsl.go index c271a366ae0b..ffb81b1feb39 100644 --- a/cluster-autoscaler/vendor/github.com/onsi/gomega/gomega_dsl.go +++ b/cluster-autoscaler/vendor/github.com/onsi/gomega/gomega_dsl.go @@ -22,7 +22,7 @@ import ( "github.com/onsi/gomega/types" ) -const GOMEGA_VERSION = "1.30.0" +const GOMEGA_VERSION = "1.32.0" const nilGomegaPanic = `You are trying to make an assertion, but haven't registered Gomega's fail handler. If you're using Ginkgo then you probably forgot to put your assertion in an It(). diff --git a/cluster-autoscaler/vendor/github.com/onsi/gomega/internal/async_assertion.go b/cluster-autoscaler/vendor/github.com/onsi/gomega/internal/async_assertion.go index 1188b0bce37f..cde9e2ec8bd6 100644 --- a/cluster-autoscaler/vendor/github.com/onsi/gomega/internal/async_assertion.go +++ b/cluster-autoscaler/vendor/github.com/onsi/gomega/internal/async_assertion.go @@ -553,7 +553,12 @@ func (assertion *AsyncAssertion) match(matcher types.GomegaMatcher, desiredMatch lock.Unlock() } case <-contextDone: - fail("Context was cancelled") + err := context.Cause(assertion.ctx) + if err != nil && err != context.Canceled { + fail(fmt.Sprintf("Context was cancelled (cause: %s)", err)) + } else { + fail("Context was cancelled") + } return false case <-timeout: if assertion.asyncType == AsyncAssertionTypeEventually { diff --git a/cluster-autoscaler/vendor/github.com/onsi/gomega/matchers.go b/cluster-autoscaler/vendor/github.com/onsi/gomega/matchers.go index 43f994374da3..8860d677fc8f 100644 --- a/cluster-autoscaler/vendor/github.com/onsi/gomega/matchers.go +++ b/cluster-autoscaler/vendor/github.com/onsi/gomega/matchers.go @@ -394,7 +394,7 @@ func ConsistOf(elements ...interface{}) types.GomegaMatcher { } } -// HaveExactElemets succeeds if actual contains elements that precisely match the elemets passed into the matcher. The ordering of the elements does matter. +// HaveExactElements succeeds if actual contains elements that precisely match the elemets passed into the matcher. The ordering of the elements does matter. // By default HaveExactElements() uses Equal() to match the elements, however custom matchers can be passed in instead. Here are some examples: // // Expect([]string{"Foo", "FooBar"}).Should(HaveExactElements("Foo", "FooBar")) diff --git a/cluster-autoscaler/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go b/cluster-autoscaler/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go index 8ab4bb919492..4e3897858c77 100644 --- a/cluster-autoscaler/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go +++ b/cluster-autoscaler/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go @@ -41,9 +41,9 @@ func (matcher *BeComparableToMatcher) Match(actual interface{}) (success bool, m } func (matcher *BeComparableToMatcher) FailureMessage(actual interface{}) (message string) { - return cmp.Diff(matcher.Expected, actual, matcher.Options) + return fmt.Sprint("Expected object to be comparable, diff: ", cmp.Diff(actual, matcher.Expected, matcher.Options...)) } func (matcher *BeComparableToMatcher) NegatedFailureMessage(actual interface{}) (message string) { - return format.Message(actual, "not to equal", matcher.Expected) + return format.Message(actual, "not to be comparable to", matcher.Expected) } diff --git a/cluster-autoscaler/vendor/github.com/stretchr/objx/README.md b/cluster-autoscaler/vendor/github.com/stretchr/objx/README.md index 246660b21a9f..78dc1f8b03ed 100644 --- a/cluster-autoscaler/vendor/github.com/stretchr/objx/README.md +++ b/cluster-autoscaler/vendor/github.com/stretchr/objx/README.md @@ -4,20 +4,20 @@ [![Maintainability](https://api.codeclimate.com/v1/badges/1d64bc6c8474c2074f2b/maintainability)](https://codeclimate.com/github/stretchr/objx/maintainability) [![Test Coverage](https://api.codeclimate.com/v1/badges/1d64bc6c8474c2074f2b/test_coverage)](https://codeclimate.com/github/stretchr/objx/test_coverage) [![Sourcegraph](https://sourcegraph.com/github.com/stretchr/objx/-/badge.svg)](https://sourcegraph.com/github.com/stretchr/objx) -[![GoDoc](https://godoc.org/github.com/stretchr/objx?status.svg)](https://godoc.org/github.com/stretchr/objx) +[![GoDoc](https://pkg.go.dev/badge/github.com/stretchr/objx?utm_source=godoc)](https://pkg.go.dev/github.com/stretchr/objx) Objx - Go package for dealing with maps, slices, JSON and other data. Get started: - Install Objx with [one line of code](#installation), or [update it with another](#staying-up-to-date) -- Check out the API Documentation http://godoc.org/github.com/stretchr/objx +- Check out the API Documentation http://pkg.go.dev/github.com/stretchr/objx ## Overview Objx provides the `objx.Map` type, which is a `map[string]interface{}` that exposes a powerful `Get` method (among others) that allows you to easily and quickly get access to data within the map, without having to worry too much about type assertions, missing data, default values etc. ### Pattern -Objx uses a preditable pattern to make access data from within `map[string]interface{}` easy. Call one of the `objx.` functions to create your `objx.Map` to get going: +Objx uses a predictable pattern to make access data from within `map[string]interface{}` easy. Call one of the `objx.` functions to create your `objx.Map` to get going: m, err := objx.FromJSON(json) @@ -74,7 +74,7 @@ To update Objx to the latest version, run: go get -u github.com/stretchr/objx ### Supported go versions -We support the lastest three major Go versions, which are 1.10, 1.11 and 1.12 at the moment. +We currently support the three recent major Go versions. ## Contributing Please feel free to submit issues, fork the repository and send pull requests! diff --git a/cluster-autoscaler/vendor/github.com/stretchr/objx/Taskfile.yml b/cluster-autoscaler/vendor/github.com/stretchr/objx/Taskfile.yml index 7746f516da20..8a79e8d674c0 100644 --- a/cluster-autoscaler/vendor/github.com/stretchr/objx/Taskfile.yml +++ b/cluster-autoscaler/vendor/github.com/stretchr/objx/Taskfile.yml @@ -1,7 +1,4 @@ -version: '2' - -env: - GOFLAGS: -mod=vendor +version: '3' tasks: default: diff --git a/cluster-autoscaler/vendor/github.com/stretchr/objx/accessors.go b/cluster-autoscaler/vendor/github.com/stretchr/objx/accessors.go index 4c6045588637..72f1d1c1ce3d 100644 --- a/cluster-autoscaler/vendor/github.com/stretchr/objx/accessors.go +++ b/cluster-autoscaler/vendor/github.com/stretchr/objx/accessors.go @@ -14,17 +14,17 @@ const ( // For example, `location.address.city` PathSeparator string = "." - // arrayAccesRegexString is the regex used to extract the array number + // arrayAccessRegexString is the regex used to extract the array number // from the access path - arrayAccesRegexString = `^(.+)\[([0-9]+)\]$` + arrayAccessRegexString = `^(.+)\[([0-9]+)\]$` // mapAccessRegexString is the regex used to extract the map key // from the access path mapAccessRegexString = `^([^\[]*)\[([^\]]+)\](.*)$` ) -// arrayAccesRegex is the compiled arrayAccesRegexString -var arrayAccesRegex = regexp.MustCompile(arrayAccesRegexString) +// arrayAccessRegex is the compiled arrayAccessRegexString +var arrayAccessRegex = regexp.MustCompile(arrayAccessRegexString) // mapAccessRegex is the compiled mapAccessRegexString var mapAccessRegex = regexp.MustCompile(mapAccessRegexString) @@ -37,11 +37,11 @@ var mapAccessRegex = regexp.MustCompile(mapAccessRegexString) // // Get can only operate directly on map[string]interface{} and []interface. // -// Example +// # Example // // To access the title of the third chapter of the second book, do: // -// o.Get("books[1].chapters[2].title") +// o.Get("books[1].chapters[2].title") func (m Map) Get(selector string) *Value { rawObj := access(m, selector, nil, false) return &Value{data: rawObj} @@ -52,26 +52,26 @@ func (m Map) Get(selector string) *Value { // // Set can only operate directly on map[string]interface{} and []interface // -// Example +// # Example // // To set the title of the third chapter of the second book, do: // -// o.Set("books[1].chapters[2].title","Time to Go") +// o.Set("books[1].chapters[2].title","Time to Go") func (m Map) Set(selector string, value interface{}) Map { access(m, selector, value, true) return m } -// getIndex returns the index, which is hold in s by two braches. -// It also returns s withour the index part, e.g. name[1] will return (1, name). +// getIndex returns the index, which is hold in s by two branches. +// It also returns s without the index part, e.g. name[1] will return (1, name). // If no index is found, -1 is returned func getIndex(s string) (int, string) { - arrayMatches := arrayAccesRegex.FindStringSubmatch(s) + arrayMatches := arrayAccessRegex.FindStringSubmatch(s) if len(arrayMatches) > 0 { // Get the key into the map selector := arrayMatches[1] // Get the index into the array at the key - // We know this cannt fail because arrayMatches[2] is an int for sure + // We know this can't fail because arrayMatches[2] is an int for sure index, _ := strconv.Atoi(arrayMatches[2]) return index, selector } diff --git a/cluster-autoscaler/vendor/github.com/stretchr/objx/conversions.go b/cluster-autoscaler/vendor/github.com/stretchr/objx/conversions.go index 080aa46e4723..01c63d7d3bbf 100644 --- a/cluster-autoscaler/vendor/github.com/stretchr/objx/conversions.go +++ b/cluster-autoscaler/vendor/github.com/stretchr/objx/conversions.go @@ -15,7 +15,7 @@ import ( const SignatureSeparator = "_" // URLValuesSliceKeySuffix is the character that is used to -// specify a suffic for slices parsed by URLValues. +// specify a suffix for slices parsed by URLValues. // If the suffix is set to "[i]", then the index of the slice // is used in place of i // Ex: Suffix "[]" would have the form a[]=b&a[]=c @@ -30,7 +30,7 @@ const ( ) // SetURLValuesSliceKeySuffix sets the character that is used to -// specify a suffic for slices parsed by URLValues. +// specify a suffix for slices parsed by URLValues. // If the suffix is set to "[i]", then the index of the slice // is used in place of i // Ex: Suffix "[]" would have the form a[]=b&a[]=c diff --git a/cluster-autoscaler/vendor/github.com/stretchr/objx/doc.go b/cluster-autoscaler/vendor/github.com/stretchr/objx/doc.go index 6d6af1a83abf..b170af74b39a 100644 --- a/cluster-autoscaler/vendor/github.com/stretchr/objx/doc.go +++ b/cluster-autoscaler/vendor/github.com/stretchr/objx/doc.go @@ -1,19 +1,19 @@ /* -Objx - Go package for dealing with maps, slices, JSON and other data. +Package objx provides utilities for dealing with maps, slices, JSON and other data. -Overview +# Overview Objx provides the `objx.Map` type, which is a `map[string]interface{}` that exposes a powerful `Get` method (among others) that allows you to easily and quickly get access to data within the map, without having to worry too much about type assertions, missing data, default values etc. -Pattern +# Pattern -Objx uses a preditable pattern to make access data from within `map[string]interface{}` easy. +Objx uses a predictable pattern to make access data from within `map[string]interface{}` easy. Call one of the `objx.` functions to create your `objx.Map` to get going: - m, err := objx.FromJSON(json) + m, err := objx.FromJSON(json) NOTE: Any methods or functions with the `Must` prefix will panic if something goes wrong, the rest will be optimistic and try to figure things out without panicking. @@ -21,46 +21,46 @@ the rest will be optimistic and try to figure things out without panicking. Use `Get` to access the value you're interested in. You can use dot and array notation too: - m.Get("places[0].latlng") + m.Get("places[0].latlng") Once you have sought the `Value` you're interested in, you can use the `Is*` methods to determine its type. - if m.Get("code").IsStr() { // Your code... } + if m.Get("code").IsStr() { // Your code... } Or you can just assume the type, and use one of the strong type methods to extract the real value: - m.Get("code").Int() + m.Get("code").Int() If there's no value there (or if it's the wrong type) then a default value will be returned, or you can be explicit about the default value. - Get("code").Int(-1) + Get("code").Int(-1) If you're dealing with a slice of data as a value, Objx provides many useful methods for iterating, manipulating and selecting that data. You can find out more by exploring the index below. -Reading data +# Reading data A simple example of how to use Objx: - // Use MustFromJSON to make an objx.Map from some JSON - m := objx.MustFromJSON(`{"name": "Mat", "age": 30}`) + // Use MustFromJSON to make an objx.Map from some JSON + m := objx.MustFromJSON(`{"name": "Mat", "age": 30}`) - // Get the details - name := m.Get("name").Str() - age := m.Get("age").Int() + // Get the details + name := m.Get("name").Str() + age := m.Get("age").Int() - // Get their nickname (or use their name if they don't have one) - nickname := m.Get("nickname").Str(name) + // Get their nickname (or use their name if they don't have one) + nickname := m.Get("nickname").Str(name) -Ranging +# Ranging Since `objx.Map` is a `map[string]interface{}` you can treat it as such. For example, to `range` the data, do what you would expect: - m := objx.MustFromJSON(json) - for key, value := range m { - // Your code... - } + m := objx.MustFromJSON(json) + for key, value := range m { + // Your code... + } */ package objx diff --git a/cluster-autoscaler/vendor/github.com/stretchr/objx/map.go b/cluster-autoscaler/vendor/github.com/stretchr/objx/map.go index a64712a08b50..ab9f9ae67cbf 100644 --- a/cluster-autoscaler/vendor/github.com/stretchr/objx/map.go +++ b/cluster-autoscaler/vendor/github.com/stretchr/objx/map.go @@ -47,17 +47,16 @@ func New(data interface{}) Map { // // The arguments follow a key, value pattern. // -// // Returns nil if any key argument is non-string or if there are an odd number of arguments. // -// Example +// # Example // // To easily create Maps: // -// m := objx.MSI("name", "Mat", "age", 29, "subobj", objx.MSI("active", true)) +// m := objx.MSI("name", "Mat", "age", 29, "subobj", objx.MSI("active", true)) // -// // creates an Map equivalent to -// m := objx.Map{"name": "Mat", "age": 29, "subobj": objx.Map{"active": true}} +// // creates an Map equivalent to +// m := objx.Map{"name": "Mat", "age": 29, "subobj": objx.Map{"active": true}} func MSI(keyAndValuePairs ...interface{}) Map { newMap := Map{} keyAndValuePairsLen := len(keyAndValuePairs) diff --git a/cluster-autoscaler/vendor/github.com/stretchr/testify/assert/assertion_compare.go b/cluster-autoscaler/vendor/github.com/stretchr/testify/assert/assertion_compare.go index b774da88d86c..4d4b4aad6fe8 100644 --- a/cluster-autoscaler/vendor/github.com/stretchr/testify/assert/assertion_compare.go +++ b/cluster-autoscaler/vendor/github.com/stretchr/testify/assert/assertion_compare.go @@ -28,6 +28,8 @@ var ( uint32Type = reflect.TypeOf(uint32(1)) uint64Type = reflect.TypeOf(uint64(1)) + uintptrType = reflect.TypeOf(uintptr(1)) + float32Type = reflect.TypeOf(float32(1)) float64Type = reflect.TypeOf(float64(1)) @@ -308,11 +310,11 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { case reflect.Struct: { // All structs enter here. We're not interested in most types. - if !canConvert(obj1Value, timeType) { + if !obj1Value.CanConvert(timeType) { break } - // time.Time can compared! + // time.Time can be compared! timeObj1, ok := obj1.(time.Time) if !ok { timeObj1 = obj1Value.Convert(timeType).Interface().(time.Time) @@ -328,7 +330,7 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { case reflect.Slice: { // We only care about the []byte type. - if !canConvert(obj1Value, bytesType) { + if !obj1Value.CanConvert(bytesType) { break } @@ -345,6 +347,26 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { return CompareType(bytes.Compare(bytesObj1, bytesObj2)), true } + case reflect.Uintptr: + { + uintptrObj1, ok := obj1.(uintptr) + if !ok { + uintptrObj1 = obj1Value.Convert(uintptrType).Interface().(uintptr) + } + uintptrObj2, ok := obj2.(uintptr) + if !ok { + uintptrObj2 = obj2Value.Convert(uintptrType).Interface().(uintptr) + } + if uintptrObj1 > uintptrObj2 { + return compareGreater, true + } + if uintptrObj1 == uintptrObj2 { + return compareEqual, true + } + if uintptrObj1 < uintptrObj2 { + return compareLess, true + } + } } return compareEqual, false diff --git a/cluster-autoscaler/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go b/cluster-autoscaler/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go deleted file mode 100644 index da867903e2fa..000000000000 --- a/cluster-autoscaler/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go +++ /dev/null @@ -1,16 +0,0 @@ -//go:build go1.17 -// +build go1.17 - -// TODO: once support for Go 1.16 is dropped, this file can be -// merged/removed with assertion_compare_go1.17_test.go and -// assertion_compare_legacy.go - -package assert - -import "reflect" - -// Wrapper around reflect.Value.CanConvert, for compatibility -// reasons. -func canConvert(value reflect.Value, to reflect.Type) bool { - return value.CanConvert(to) -} diff --git a/cluster-autoscaler/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go b/cluster-autoscaler/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go deleted file mode 100644 index 1701af2a3c89..000000000000 --- a/cluster-autoscaler/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go +++ /dev/null @@ -1,16 +0,0 @@ -//go:build !go1.17 -// +build !go1.17 - -// TODO: once support for Go 1.16 is dropped, this file can be -// merged/removed with assertion_compare_go1.17_test.go and -// assertion_compare_can_convert.go - -package assert - -import "reflect" - -// Older versions of Go does not have the reflect.Value.CanConvert -// method. -func canConvert(value reflect.Value, to reflect.Type) bool { - return false -} diff --git a/cluster-autoscaler/vendor/github.com/stretchr/testify/assert/assertion_format.go b/cluster-autoscaler/vendor/github.com/stretchr/testify/assert/assertion_format.go index 84dbd6c790b9..3ddab109ad9e 100644 --- a/cluster-autoscaler/vendor/github.com/stretchr/testify/assert/assertion_format.go +++ b/cluster-autoscaler/vendor/github.com/stretchr/testify/assert/assertion_format.go @@ -1,7 +1,4 @@ -/* -* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen -* THIS FILE MUST NOT BE EDITED BY HAND - */ +// Code generated with github.com/stretchr/testify/_codegen; DO NOT EDIT. package assert @@ -107,7 +104,7 @@ func EqualExportedValuesf(t TestingT, expected interface{}, actual interface{}, return EqualExportedValues(t, expected, actual, append([]interface{}{msg}, args...)...) } -// EqualValuesf asserts that two objects are equal or convertable to the same types +// EqualValuesf asserts that two objects are equal or convertible to the same types // and equal. // // assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted") @@ -616,6 +613,16 @@ func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interf return NotErrorIs(t, err, target, append([]interface{}{msg}, args...)...) } +// NotImplementsf asserts that an object does not implement the specified interface. +// +// assert.NotImplementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted") +func NotImplementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return NotImplements(t, interfaceObject, object, append([]interface{}{msg}, args...)...) +} + // NotNilf asserts that the specified object is not nil. // // assert.NotNilf(t, err, "error message %s", "formatted") @@ -660,10 +667,12 @@ func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string, return NotSame(t, expected, actual, append([]interface{}{msg}, args...)...) } -// NotSubsetf asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). +// NotSubsetf asserts that the specified list(array, slice...) or map does NOT +// contain all elements given in the specified subset list(array, slice...) or +// map. // -// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") +// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "error message %s", "formatted") +// assert.NotSubsetf(t, {"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -747,10 +756,11 @@ func Samef(t TestingT, expected interface{}, actual interface{}, msg string, arg return Same(t, expected, actual, append([]interface{}{msg}, args...)...) } -// Subsetf asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). +// Subsetf asserts that the specified list(array, slice...) or map contains all +// elements given in the specified subset list(array, slice...) or map. // -// assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") +// assert.Subsetf(t, [1, 2, 3], [1, 2], "error message %s", "formatted") +// assert.Subsetf(t, {"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() diff --git a/cluster-autoscaler/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/cluster-autoscaler/vendor/github.com/stretchr/testify/assert/assertion_forward.go index b1d94aec53cc..a84e09bd4090 100644 --- a/cluster-autoscaler/vendor/github.com/stretchr/testify/assert/assertion_forward.go +++ b/cluster-autoscaler/vendor/github.com/stretchr/testify/assert/assertion_forward.go @@ -1,7 +1,4 @@ -/* -* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen -* THIS FILE MUST NOT BE EDITED BY HAND - */ +// Code generated with github.com/stretchr/testify/_codegen; DO NOT EDIT. package assert @@ -189,7 +186,7 @@ func (a *Assertions) EqualExportedValuesf(expected interface{}, actual interface return EqualExportedValuesf(a.t, expected, actual, msg, args...) } -// EqualValues asserts that two objects are equal or convertable to the same types +// EqualValues asserts that two objects are equal or convertible to the same types // and equal. // // a.EqualValues(uint32(123), int32(123)) @@ -200,7 +197,7 @@ func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAn return EqualValues(a.t, expected, actual, msgAndArgs...) } -// EqualValuesf asserts that two objects are equal or convertable to the same types +// EqualValuesf asserts that two objects are equal or convertible to the same types // and equal. // // a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted") @@ -1221,6 +1218,26 @@ func (a *Assertions) NotErrorIsf(err error, target error, msg string, args ...in return NotErrorIsf(a.t, err, target, msg, args...) } +// NotImplements asserts that an object does not implement the specified interface. +// +// a.NotImplements((*MyInterface)(nil), new(MyObject)) +func (a *Assertions) NotImplements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotImplements(a.t, interfaceObject, object, msgAndArgs...) +} + +// NotImplementsf asserts that an object does not implement the specified interface. +// +// a.NotImplementsf((*MyInterface)(nil), new(MyObject), "error message %s", "formatted") +func (a *Assertions) NotImplementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotImplementsf(a.t, interfaceObject, object, msg, args...) +} + // NotNil asserts that the specified object is not nil. // // a.NotNil(err) @@ -1309,10 +1326,12 @@ func (a *Assertions) NotSamef(expected interface{}, actual interface{}, msg stri return NotSamef(a.t, expected, actual, msg, args...) } -// NotSubset asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). +// NotSubset asserts that the specified list(array, slice...) or map does NOT +// contain all elements given in the specified subset list(array, slice...) or +// map. // -// a.NotSubset([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") +// a.NotSubset([1, 3, 4], [1, 2]) +// a.NotSubset({"x": 1, "y": 2}, {"z": 3}) func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1320,10 +1339,12 @@ func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs return NotSubset(a.t, list, subset, msgAndArgs...) } -// NotSubsetf asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). +// NotSubsetf asserts that the specified list(array, slice...) or map does NOT +// contain all elements given in the specified subset list(array, slice...) or +// map. // -// a.NotSubsetf([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") +// a.NotSubsetf([1, 3, 4], [1, 2], "error message %s", "formatted") +// a.NotSubsetf({"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1483,10 +1504,11 @@ func (a *Assertions) Samef(expected interface{}, actual interface{}, msg string, return Samef(a.t, expected, actual, msg, args...) } -// Subset asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). +// Subset asserts that the specified list(array, slice...) or map contains all +// elements given in the specified subset list(array, slice...) or map. // -// a.Subset([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") +// a.Subset([1, 2, 3], [1, 2]) +// a.Subset({"x": 1, "y": 2}, {"x": 1}) func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1494,10 +1516,11 @@ func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ... return Subset(a.t, list, subset, msgAndArgs...) } -// Subsetf asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). +// Subsetf asserts that the specified list(array, slice...) or map contains all +// elements given in the specified subset list(array, slice...) or map. // -// a.Subsetf([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") +// a.Subsetf([1, 2, 3], [1, 2], "error message %s", "formatted") +// a.Subsetf({"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() diff --git a/cluster-autoscaler/vendor/github.com/stretchr/testify/assert/assertions.go b/cluster-autoscaler/vendor/github.com/stretchr/testify/assert/assertions.go index a55d1bba926c..0b7570f21c63 100644 --- a/cluster-autoscaler/vendor/github.com/stretchr/testify/assert/assertions.go +++ b/cluster-autoscaler/vendor/github.com/stretchr/testify/assert/assertions.go @@ -19,7 +19,7 @@ import ( "github.com/davecgh/go-spew/spew" "github.com/pmezard/go-difflib/difflib" - yaml "gopkg.in/yaml.v3" + "gopkg.in/yaml.v3" ) //go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=assert -template=assertion_format.go.tmpl" @@ -110,7 +110,12 @@ func copyExportedFields(expected interface{}) interface{} { return result.Interface() case reflect.Array, reflect.Slice: - result := reflect.MakeSlice(expectedType, expectedValue.Len(), expectedValue.Len()) + var result reflect.Value + if expectedKind == reflect.Array { + result = reflect.New(reflect.ArrayOf(expectedValue.Len(), expectedType.Elem())).Elem() + } else { + result = reflect.MakeSlice(expectedType, expectedValue.Len(), expectedValue.Len()) + } for i := 0; i < expectedValue.Len(); i++ { index := expectedValue.Index(i) if isNil(index) { @@ -140,6 +145,8 @@ func copyExportedFields(expected interface{}) interface{} { // structures. // // This function does no assertion of any kind. +// +// Deprecated: Use [EqualExportedValues] instead. func ObjectsExportedFieldsAreEqual(expected, actual interface{}) bool { expectedCleaned := copyExportedFields(expected) actualCleaned := copyExportedFields(actual) @@ -153,17 +160,40 @@ func ObjectsAreEqualValues(expected, actual interface{}) bool { return true } - actualType := reflect.TypeOf(actual) - if actualType == nil { + expectedValue := reflect.ValueOf(expected) + actualValue := reflect.ValueOf(actual) + if !expectedValue.IsValid() || !actualValue.IsValid() { return false } - expectedValue := reflect.ValueOf(expected) - if expectedValue.IsValid() && expectedValue.Type().ConvertibleTo(actualType) { + + expectedType := expectedValue.Type() + actualType := actualValue.Type() + if !expectedType.ConvertibleTo(actualType) { + return false + } + + if !isNumericType(expectedType) || !isNumericType(actualType) { // Attempt comparison after type conversion - return reflect.DeepEqual(expectedValue.Convert(actualType).Interface(), actual) + return reflect.DeepEqual( + expectedValue.Convert(actualType).Interface(), actual, + ) } - return false + // If BOTH values are numeric, there are chances of false positives due + // to overflow or underflow. So, we need to make sure to always convert + // the smaller type to a larger type before comparing. + if expectedType.Size() >= actualType.Size() { + return actualValue.Convert(expectedType).Interface() == expected + } + + return expectedValue.Convert(actualType).Interface() == actual +} + +// isNumericType returns true if the type is one of: +// int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, +// float32, float64, complex64, complex128 +func isNumericType(t reflect.Type) bool { + return t.Kind() >= reflect.Int && t.Kind() <= reflect.Complex128 } /* CallerInfo is necessary because the assert functions use the testing object @@ -266,7 +296,7 @@ func messageFromMsgAndArgs(msgAndArgs ...interface{}) string { // Aligns the provided message so that all lines after the first line start at the same location as the first line. // Assumes that the first line starts at the correct location (after carriage return, tab, label, spacer and tab). -// The longestLabelLen parameter specifies the length of the longest label in the output (required becaues this is the +// The longestLabelLen parameter specifies the length of the longest label in the output (required because this is the // basis on which the alignment occurs). func indentMessageLines(message string, longestLabelLen int) string { outBuf := new(bytes.Buffer) @@ -382,6 +412,25 @@ func Implements(t TestingT, interfaceObject interface{}, object interface{}, msg return true } +// NotImplements asserts that an object does not implement the specified interface. +// +// assert.NotImplements(t, (*MyInterface)(nil), new(MyObject)) +func NotImplements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + interfaceType := reflect.TypeOf(interfaceObject).Elem() + + if object == nil { + return Fail(t, fmt.Sprintf("Cannot check if nil does not implement %v", interfaceType), msgAndArgs...) + } + if reflect.TypeOf(object).Implements(interfaceType) { + return Fail(t, fmt.Sprintf("%T implements %v", object, interfaceType), msgAndArgs...) + } + + return true +} + // IsType asserts that the specified objects are of the same type. func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { @@ -496,7 +545,7 @@ func samePointers(first, second interface{}) bool { // representations appropriate to be presented to the user. // // If the values are not of like type, the returned strings will be prefixed -// with the type name, and the value will be enclosed in parenthesis similar +// with the type name, and the value will be enclosed in parentheses similar // to a type conversion in the Go grammar. func formatUnequalValues(expected, actual interface{}) (e string, a string) { if reflect.TypeOf(expected) != reflect.TypeOf(actual) { @@ -523,7 +572,7 @@ func truncatingFormat(data interface{}) string { return value } -// EqualValues asserts that two objects are equal or convertable to the same types +// EqualValues asserts that two objects are equal or convertible to the same types // and equal. // // assert.EqualValues(t, uint32(123), int32(123)) @@ -566,12 +615,19 @@ func EqualExportedValues(t TestingT, expected, actual interface{}, msgAndArgs .. return Fail(t, fmt.Sprintf("Types expected to match exactly\n\t%v != %v", aType, bType), msgAndArgs...) } + if aType.Kind() == reflect.Ptr { + aType = aType.Elem() + } + if bType.Kind() == reflect.Ptr { + bType = bType.Elem() + } + if aType.Kind() != reflect.Struct { - return Fail(t, fmt.Sprintf("Types expected to both be struct \n\t%v != %v", aType.Kind(), reflect.Struct), msgAndArgs...) + return Fail(t, fmt.Sprintf("Types expected to both be struct or pointer to struct \n\t%v != %v", aType.Kind(), reflect.Struct), msgAndArgs...) } if bType.Kind() != reflect.Struct { - return Fail(t, fmt.Sprintf("Types expected to both be struct \n\t%v != %v", bType.Kind(), reflect.Struct), msgAndArgs...) + return Fail(t, fmt.Sprintf("Types expected to both be struct or pointer to struct \n\t%v != %v", bType.Kind(), reflect.Struct), msgAndArgs...) } expected = copyExportedFields(expected) @@ -620,17 +676,6 @@ func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { return Fail(t, "Expected value not to be nil.", msgAndArgs...) } -// containsKind checks if a specified kind in the slice of kinds. -func containsKind(kinds []reflect.Kind, kind reflect.Kind) bool { - for i := 0; i < len(kinds); i++ { - if kind == kinds[i] { - return true - } - } - - return false -} - // isNil checks if a specified object is nil or not, without Failing. func isNil(object interface{}) bool { if object == nil { @@ -638,16 +683,13 @@ func isNil(object interface{}) bool { } value := reflect.ValueOf(object) - kind := value.Kind() - isNilableKind := containsKind( - []reflect.Kind{ - reflect.Chan, reflect.Func, - reflect.Interface, reflect.Map, - reflect.Ptr, reflect.Slice, reflect.UnsafePointer}, - kind) - - if isNilableKind && value.IsNil() { - return true + switch value.Kind() { + case + reflect.Chan, reflect.Func, + reflect.Interface, reflect.Map, + reflect.Ptr, reflect.Slice, reflect.UnsafePointer: + + return value.IsNil() } return false @@ -731,16 +773,14 @@ func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { } -// getLen try to get length of object. -// return (false, 0) if impossible. -func getLen(x interface{}) (ok bool, length int) { +// getLen tries to get the length of an object. +// It returns (0, false) if impossible. +func getLen(x interface{}) (length int, ok bool) { v := reflect.ValueOf(x) defer func() { - if e := recover(); e != nil { - ok = false - } + ok = recover() == nil }() - return true, v.Len() + return v.Len(), true } // Len asserts that the specified object has specific length. @@ -751,13 +791,13 @@ func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) if h, ok := t.(tHelper); ok { h.Helper() } - ok, l := getLen(object) + l, ok := getLen(object) if !ok { - return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", object), msgAndArgs...) + return Fail(t, fmt.Sprintf("\"%v\" could not be applied builtin len()", object), msgAndArgs...) } if l != length { - return Fail(t, fmt.Sprintf("\"%s\" should have %d item(s), but has %d", object, length, l), msgAndArgs...) + return Fail(t, fmt.Sprintf("\"%v\" should have %d item(s), but has %d", object, length, l), msgAndArgs...) } return true } @@ -919,10 +959,11 @@ func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) } -// Subset asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). +// Subset asserts that the specified list(array, slice...) or map contains all +// elements given in the specified subset list(array, slice...) or map. // -// assert.Subset(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") +// assert.Subset(t, [1, 2, 3], [1, 2]) +// assert.Subset(t, {"x": 1, "y": 2}, {"x": 1}) func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { if h, ok := t.(tHelper); ok { h.Helper() @@ -975,10 +1016,12 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok return true } -// NotSubset asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). +// NotSubset asserts that the specified list(array, slice...) or map does NOT +// contain all elements given in the specified subset list(array, slice...) or +// map. // -// assert.NotSubset(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") +// assert.NotSubset(t, [1, 3, 4], [1, 2]) +// assert.NotSubset(t, {"x": 1, "y": 2}, {"z": 3}) func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1439,7 +1482,7 @@ func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAnd h.Helper() } if math.IsNaN(epsilon) { - return Fail(t, "epsilon must not be NaN") + return Fail(t, "epsilon must not be NaN", msgAndArgs...) } actualEpsilon, err := calcRelativeError(expected, actual) if err != nil { @@ -1458,19 +1501,26 @@ func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, m if h, ok := t.(tHelper); ok { h.Helper() } - if expected == nil || actual == nil || - reflect.TypeOf(actual).Kind() != reflect.Slice || - reflect.TypeOf(expected).Kind() != reflect.Slice { + + if expected == nil || actual == nil { return Fail(t, "Parameters must be slice", msgAndArgs...) } - actualSlice := reflect.ValueOf(actual) expectedSlice := reflect.ValueOf(expected) + actualSlice := reflect.ValueOf(actual) - for i := 0; i < actualSlice.Len(); i++ { - result := InEpsilon(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), epsilon) - if !result { - return result + if expectedSlice.Type().Kind() != reflect.Slice { + return Fail(t, "Expected value must be slice", msgAndArgs...) + } + + expectedLen := expectedSlice.Len() + if !IsType(t, expected, actual) || !Len(t, actual, expectedLen) { + return false + } + + for i := 0; i < expectedLen; i++ { + if !InEpsilon(t, expectedSlice.Index(i).Interface(), actualSlice.Index(i).Interface(), epsilon, "at index %d", i) { + return false } } @@ -1870,23 +1920,18 @@ func (c *CollectT) Errorf(format string, args ...interface{}) { } // FailNow panics. -func (c *CollectT) FailNow() { +func (*CollectT) FailNow() { panic("Assertion failed") } -// Reset clears the collected errors. -func (c *CollectT) Reset() { - c.errors = nil +// Deprecated: That was a method for internal usage that should not have been published. Now just panics. +func (*CollectT) Reset() { + panic("Reset() is deprecated") } -// Copy copies the collected errors to the supplied t. -func (c *CollectT) Copy(t TestingT) { - if tt, ok := t.(tHelper); ok { - tt.Helper() - } - for _, err := range c.errors { - t.Errorf("%v", err) - } +// Deprecated: That was a method for internal usage that should not have been published. Now just panics. +func (*CollectT) Copy(TestingT) { + panic("Copy() is deprecated") } // EventuallyWithT asserts that given condition will be met in waitFor time, @@ -1912,8 +1957,8 @@ func EventuallyWithT(t TestingT, condition func(collect *CollectT), waitFor time h.Helper() } - collect := new(CollectT) - ch := make(chan bool, 1) + var lastFinishedTickErrs []error + ch := make(chan []error, 1) timer := time.NewTimer(waitFor) defer timer.Stop() @@ -1924,19 +1969,25 @@ func EventuallyWithT(t TestingT, condition func(collect *CollectT), waitFor time for tick := ticker.C; ; { select { case <-timer.C: - collect.Copy(t) + for _, err := range lastFinishedTickErrs { + t.Errorf("%v", err) + } return Fail(t, "Condition never satisfied", msgAndArgs...) case <-tick: tick = nil - collect.Reset() go func() { + collect := new(CollectT) + defer func() { + ch <- collect.errors + }() condition(collect) - ch <- len(collect.errors) == 0 }() - case v := <-ch: - if v { + case errs := <-ch: + if len(errs) == 0 { return true } + // Keep the errors from the last ended condition, so that they can be copied to t if timeout is reached. + lastFinishedTickErrs = errs tick = ticker.C } } diff --git a/cluster-autoscaler/vendor/github.com/stretchr/testify/assert/http_assertions.go b/cluster-autoscaler/vendor/github.com/stretchr/testify/assert/http_assertions.go index d8038c28a758..861ed4b7ced0 100644 --- a/cluster-autoscaler/vendor/github.com/stretchr/testify/assert/http_assertions.go +++ b/cluster-autoscaler/vendor/github.com/stretchr/testify/assert/http_assertions.go @@ -12,7 +12,7 @@ import ( // an error if building a new request fails. func httpCode(handler http.HandlerFunc, method, url string, values url.Values) (int, error) { w := httptest.NewRecorder() - req, err := http.NewRequest(method, url, nil) + req, err := http.NewRequest(method, url, http.NoBody) if err != nil { return -1, err } @@ -32,12 +32,12 @@ func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, value } code, err := httpCode(handler, method, url, values) if err != nil { - Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) + Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err), msgAndArgs...) } isSuccessCode := code >= http.StatusOK && code <= http.StatusPartialContent if !isSuccessCode { - Fail(t, fmt.Sprintf("Expected HTTP success status code for %q but received %d", url+"?"+values.Encode(), code)) + Fail(t, fmt.Sprintf("Expected HTTP success status code for %q but received %d", url+"?"+values.Encode(), code), msgAndArgs...) } return isSuccessCode @@ -54,12 +54,12 @@ func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, valu } code, err := httpCode(handler, method, url, values) if err != nil { - Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) + Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err), msgAndArgs...) } isRedirectCode := code >= http.StatusMultipleChoices && code <= http.StatusTemporaryRedirect if !isRedirectCode { - Fail(t, fmt.Sprintf("Expected HTTP redirect status code for %q but received %d", url+"?"+values.Encode(), code)) + Fail(t, fmt.Sprintf("Expected HTTP redirect status code for %q but received %d", url+"?"+values.Encode(), code), msgAndArgs...) } return isRedirectCode @@ -76,12 +76,12 @@ func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values } code, err := httpCode(handler, method, url, values) if err != nil { - Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) + Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err), msgAndArgs...) } isErrorCode := code >= http.StatusBadRequest if !isErrorCode { - Fail(t, fmt.Sprintf("Expected HTTP error status code for %q but received %d", url+"?"+values.Encode(), code)) + Fail(t, fmt.Sprintf("Expected HTTP error status code for %q but received %d", url+"?"+values.Encode(), code), msgAndArgs...) } return isErrorCode @@ -98,12 +98,12 @@ func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method, url string, va } code, err := httpCode(handler, method, url, values) if err != nil { - Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) + Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err), msgAndArgs...) } successful := code == statuscode if !successful { - Fail(t, fmt.Sprintf("Expected HTTP status code %d for %q but received %d", statuscode, url+"?"+values.Encode(), code)) + Fail(t, fmt.Sprintf("Expected HTTP status code %d for %q but received %d", statuscode, url+"?"+values.Encode(), code), msgAndArgs...) } return successful @@ -113,7 +113,10 @@ func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method, url string, va // empty string if building a new request fails. func HTTPBody(handler http.HandlerFunc, method, url string, values url.Values) string { w := httptest.NewRecorder() - req, err := http.NewRequest(method, url+"?"+values.Encode(), nil) + if len(values) > 0 { + url += "?" + values.Encode() + } + req, err := http.NewRequest(method, url, http.NoBody) if err != nil { return "" } @@ -135,7 +138,7 @@ func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, contains := strings.Contains(body, fmt.Sprint(str)) if !contains { - Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body)) + Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body), msgAndArgs...) } return contains @@ -155,7 +158,7 @@ func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url strin contains := strings.Contains(body, fmt.Sprint(str)) if contains { - Fail(t, fmt.Sprintf("Expected response body for \"%s\" to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body)) + Fail(t, fmt.Sprintf("Expected response body for \"%s\" to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body), msgAndArgs...) } return !contains diff --git a/cluster-autoscaler/vendor/github.com/stretchr/testify/mock/mock.go b/cluster-autoscaler/vendor/github.com/stretchr/testify/mock/mock.go index f4b42e44ffe9..213bde2ea636 100644 --- a/cluster-autoscaler/vendor/github.com/stretchr/testify/mock/mock.go +++ b/cluster-autoscaler/vendor/github.com/stretchr/testify/mock/mock.go @@ -18,6 +18,9 @@ import ( "github.com/stretchr/testify/assert" ) +// regex for GCCGO functions +var gccgoRE = regexp.MustCompile(`\.pN\d+_`) + // TestingT is an interface wrapper around *testing.T type TestingT interface { Logf(format string, args ...interface{}) @@ -111,7 +114,7 @@ func (c *Call) Return(returnArguments ...interface{}) *Call { return c } -// Panic specifies if the functon call should fail and the panic message +// Panic specifies if the function call should fail and the panic message // // Mock.On("DoSomething").Panic("test panic") func (c *Call) Panic(msg string) *Call { @@ -123,21 +126,21 @@ func (c *Call) Panic(msg string) *Call { return c } -// Once indicates that that the mock should only return the value once. +// Once indicates that the mock should only return the value once. // // Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Once() func (c *Call) Once() *Call { return c.Times(1) } -// Twice indicates that that the mock should only return the value twice. +// Twice indicates that the mock should only return the value twice. // // Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Twice() func (c *Call) Twice() *Call { return c.Times(2) } -// Times indicates that that the mock should only return the indicated number +// Times indicates that the mock should only return the indicated number // of times. // // Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Times(5) @@ -455,9 +458,8 @@ func (m *Mock) Called(arguments ...interface{}) Arguments { // For Ex: github_com_docker_libkv_store_mock.WatchTree.pN39_github_com_docker_libkv_store_mock.Mock // uses interface information unlike golang github.com/docker/libkv/store/mock.(*Mock).WatchTree // With GCCGO we need to remove interface information starting from pN
. - re := regexp.MustCompile("\\.pN\\d+_") - if re.MatchString(functionPath) { - functionPath = re.Split(functionPath, -1)[0] + if gccgoRE.MatchString(functionPath) { + functionPath = gccgoRE.Split(functionPath, -1)[0] } parts := strings.Split(functionPath, ".") functionName := parts[len(parts)-1] @@ -474,7 +476,7 @@ func (m *Mock) MethodCalled(methodName string, arguments ...interface{}) Argumen found, call := m.findExpectedCall(methodName, arguments...) if found < 0 { - // expected call found but it has already been called with repeatable times + // expected call found, but it has already been called with repeatable times if call != nil { m.mutex.Unlock() m.fail("\nassert: mock: The method has been called over %d times.\n\tEither do one more Mock.On(\"%s\").Return(...), or remove extra call.\n\tThis call was unexpected:\n\t\t%s\n\tat: %s", call.totalCalls, methodName, callString(methodName, arguments, true), assert.CallerInfo()) @@ -563,7 +565,7 @@ func (m *Mock) MethodCalled(methodName string, arguments ...interface{}) Argumen Assertions */ -type assertExpectationser interface { +type assertExpectationiser interface { AssertExpectations(TestingT) bool } @@ -580,7 +582,7 @@ func AssertExpectationsForObjects(t TestingT, testObjects ...interface{}) bool { t.Logf("Deprecated mock.AssertExpectationsForObjects(myMock.Mock) use mock.AssertExpectationsForObjects(myMock)") obj = m } - m := obj.(assertExpectationser) + m := obj.(assertExpectationiser) if !m.AssertExpectations(t) { t.Logf("Expectations didn't match for Mock: %+v", reflect.TypeOf(m)) return false @@ -592,6 +594,9 @@ func AssertExpectationsForObjects(t TestingT, testObjects ...interface{}) bool { // AssertExpectations asserts that everything specified with On and Return was // in fact called as expected. Calls may have occurred in any order. func (m *Mock) AssertExpectations(t TestingT) bool { + if s, ok := t.(interface{ Skipped() bool }); ok && s.Skipped() { + return true + } if h, ok := t.(tHelper); ok { h.Helper() } @@ -606,8 +611,8 @@ func (m *Mock) AssertExpectations(t TestingT) bool { satisfied, reason := m.checkExpectation(expectedCall) if !satisfied { failedExpectations++ + t.Logf(reason) } - t.Logf(reason) } if failedExpectations != 0 { @@ -758,25 +763,33 @@ const ( Anything = "mock.Anything" ) -// AnythingOfTypeArgument is a string that contains the type of an argument +// AnythingOfTypeArgument contains the type of an argument +// for use when type checking. Used in Diff and Assert. +// +// Deprecated: this is an implementation detail that must not be used. Use [AnythingOfType] instead. +type AnythingOfTypeArgument = anythingOfTypeArgument + +// anythingOfTypeArgument is a string that contains the type of an argument // for use when type checking. Used in Diff and Assert. -type AnythingOfTypeArgument string +type anythingOfTypeArgument string -// AnythingOfType returns an AnythingOfTypeArgument object containing the -// name of the type to check for. Used in Diff and Assert. +// AnythingOfType returns a special value containing the +// name of the type to check for. The type name will be matched against the type name returned by [reflect.Type.String]. +// +// Used in Diff and Assert. // // For example: // // Assert(t, AnythingOfType("string"), AnythingOfType("int")) func AnythingOfType(t string) AnythingOfTypeArgument { - return AnythingOfTypeArgument(t) + return anythingOfTypeArgument(t) } // IsTypeArgument is a struct that contains the type of an argument // for use when type checking. This is an alternative to AnythingOfType. // Used in Diff and Assert. type IsTypeArgument struct { - t interface{} + t reflect.Type } // IsType returns an IsTypeArgument object containing the type to check for. @@ -786,7 +799,7 @@ type IsTypeArgument struct { // For example: // Assert(t, IsType(""), IsType(0)) func IsType(t interface{}) *IsTypeArgument { - return &IsTypeArgument{t: t} + return &IsTypeArgument{t: reflect.TypeOf(t)} } // FunctionalOptionsArgument is a struct that contains the type and value of an functional option argument @@ -950,53 +963,55 @@ func (args Arguments) Diff(objects []interface{}) (string, int) { differences++ output = fmt.Sprintf("%s\t%d: FAIL: %s not matched by %s\n", output, i, actualFmt, matcher) } - } else if reflect.TypeOf(expected) == reflect.TypeOf((*AnythingOfTypeArgument)(nil)).Elem() { - // type checking - if reflect.TypeOf(actual).Name() != string(expected.(AnythingOfTypeArgument)) && reflect.TypeOf(actual).String() != string(expected.(AnythingOfTypeArgument)) { - // not match - differences++ - output = fmt.Sprintf("%s\t%d: FAIL: type %s != type %s - %s\n", output, i, expected, reflect.TypeOf(actual).Name(), actualFmt) - } - } else if reflect.TypeOf(expected) == reflect.TypeOf((*IsTypeArgument)(nil)) { - t := expected.(*IsTypeArgument).t - if reflect.TypeOf(t) != reflect.TypeOf(actual) { - differences++ - output = fmt.Sprintf("%s\t%d: FAIL: type %s != type %s - %s\n", output, i, reflect.TypeOf(t).Name(), reflect.TypeOf(actual).Name(), actualFmt) - } - } else if reflect.TypeOf(expected) == reflect.TypeOf((*FunctionalOptionsArgument)(nil)) { - t := expected.(*FunctionalOptionsArgument).value + } else { + switch expected := expected.(type) { + case anythingOfTypeArgument: + // type checking + if reflect.TypeOf(actual).Name() != string(expected) && reflect.TypeOf(actual).String() != string(expected) { + // not match + differences++ + output = fmt.Sprintf("%s\t%d: FAIL: type %s != type %s - %s\n", output, i, expected, reflect.TypeOf(actual).Name(), actualFmt) + } + case *IsTypeArgument: + actualT := reflect.TypeOf(actual) + if actualT != expected.t { + differences++ + output = fmt.Sprintf("%s\t%d: FAIL: type %s != type %s - %s\n", output, i, expected.t.Name(), actualT.Name(), actualFmt) + } + case *FunctionalOptionsArgument: + t := expected.value - var name string - tValue := reflect.ValueOf(t) - if tValue.Len() > 0 { - name = "[]" + reflect.TypeOf(tValue.Index(0).Interface()).String() - } + var name string + tValue := reflect.ValueOf(t) + if tValue.Len() > 0 { + name = "[]" + reflect.TypeOf(tValue.Index(0).Interface()).String() + } - tName := reflect.TypeOf(t).Name() - if name != reflect.TypeOf(actual).String() && tValue.Len() != 0 { - differences++ - output = fmt.Sprintf("%s\t%d: FAIL: type %s != type %s - %s\n", output, i, tName, reflect.TypeOf(actual).Name(), actualFmt) - } else { - if ef, af := assertOpts(t, actual); ef == "" && af == "" { + tName := reflect.TypeOf(t).Name() + if name != reflect.TypeOf(actual).String() && tValue.Len() != 0 { + differences++ + output = fmt.Sprintf("%s\t%d: FAIL: type %s != type %s - %s\n", output, i, tName, reflect.TypeOf(actual).Name(), actualFmt) + } else { + if ef, af := assertOpts(t, actual); ef == "" && af == "" { + // match + output = fmt.Sprintf("%s\t%d: PASS: %s == %s\n", output, i, tName, tName) + } else { + // not match + differences++ + output = fmt.Sprintf("%s\t%d: FAIL: %s != %s\n", output, i, af, ef) + } + } + + default: + if assert.ObjectsAreEqual(expected, Anything) || assert.ObjectsAreEqual(actual, Anything) || assert.ObjectsAreEqual(actual, expected) { // match - output = fmt.Sprintf("%s\t%d: PASS: %s == %s\n", output, i, tName, tName) + output = fmt.Sprintf("%s\t%d: PASS: %s == %s\n", output, i, actualFmt, expectedFmt) } else { // not match differences++ - output = fmt.Sprintf("%s\t%d: FAIL: %s != %s\n", output, i, af, ef) + output = fmt.Sprintf("%s\t%d: FAIL: %s != %s\n", output, i, actualFmt, expectedFmt) } } - } else { - // normal checking - - if assert.ObjectsAreEqual(expected, Anything) || assert.ObjectsAreEqual(actual, Anything) || assert.ObjectsAreEqual(actual, expected) { - // match - output = fmt.Sprintf("%s\t%d: PASS: %s == %s\n", output, i, actualFmt, expectedFmt) - } else { - // not match - differences++ - output = fmt.Sprintf("%s\t%d: FAIL: %s != %s\n", output, i, actualFmt, expectedFmt) - } } } diff --git a/cluster-autoscaler/vendor/github.com/stretchr/testify/require/require.go b/cluster-autoscaler/vendor/github.com/stretchr/testify/require/require.go index 63f852147675..506a82f80777 100644 --- a/cluster-autoscaler/vendor/github.com/stretchr/testify/require/require.go +++ b/cluster-autoscaler/vendor/github.com/stretchr/testify/require/require.go @@ -1,7 +1,4 @@ -/* -* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen -* THIS FILE MUST NOT BE EDITED BY HAND - */ +// Code generated with github.com/stretchr/testify/_codegen; DO NOT EDIT. package require @@ -235,7 +232,7 @@ func EqualExportedValuesf(t TestingT, expected interface{}, actual interface{}, t.FailNow() } -// EqualValues asserts that two objects are equal or convertable to the same types +// EqualValues asserts that two objects are equal or convertible to the same types // and equal. // // assert.EqualValues(t, uint32(123), int32(123)) @@ -249,7 +246,7 @@ func EqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArg t.FailNow() } -// EqualValuesf asserts that two objects are equal or convertable to the same types +// EqualValuesf asserts that two objects are equal or convertible to the same types // and equal. // // assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted") @@ -1546,6 +1543,32 @@ func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interf t.FailNow() } +// NotImplements asserts that an object does not implement the specified interface. +// +// assert.NotImplements(t, (*MyInterface)(nil), new(MyObject)) +func NotImplements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NotImplements(t, interfaceObject, object, msgAndArgs...) { + return + } + t.FailNow() +} + +// NotImplementsf asserts that an object does not implement the specified interface. +// +// assert.NotImplementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted") +func NotImplementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NotImplementsf(t, interfaceObject, object, msg, args...) { + return + } + t.FailNow() +} + // NotNil asserts that the specified object is not nil. // // assert.NotNil(t, err) @@ -1658,10 +1681,12 @@ func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string, t.FailNow() } -// NotSubset asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). +// NotSubset asserts that the specified list(array, slice...) or map does NOT +// contain all elements given in the specified subset list(array, slice...) or +// map. // -// assert.NotSubset(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") +// assert.NotSubset(t, [1, 3, 4], [1, 2]) +// assert.NotSubset(t, {"x": 1, "y": 2}, {"z": 3}) func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1672,10 +1697,12 @@ func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...i t.FailNow() } -// NotSubsetf asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). +// NotSubsetf asserts that the specified list(array, slice...) or map does NOT +// contain all elements given in the specified subset list(array, slice...) or +// map. // -// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") +// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "error message %s", "formatted") +// assert.NotSubsetf(t, {"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1880,10 +1907,11 @@ func Samef(t TestingT, expected interface{}, actual interface{}, msg string, arg t.FailNow() } -// Subset asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). +// Subset asserts that the specified list(array, slice...) or map contains all +// elements given in the specified subset list(array, slice...) or map. // -// assert.Subset(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") +// assert.Subset(t, [1, 2, 3], [1, 2]) +// assert.Subset(t, {"x": 1, "y": 2}, {"x": 1}) func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1894,10 +1922,11 @@ func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...inte t.FailNow() } -// Subsetf asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). +// Subsetf asserts that the specified list(array, slice...) or map contains all +// elements given in the specified subset list(array, slice...) or map. // -// assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") +// assert.Subsetf(t, [1, 2, 3], [1, 2], "error message %s", "formatted") +// assert.Subsetf(t, {"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() diff --git a/cluster-autoscaler/vendor/github.com/stretchr/testify/require/require_forward.go b/cluster-autoscaler/vendor/github.com/stretchr/testify/require/require_forward.go index 3b5b09330a43..eee8310a5fa9 100644 --- a/cluster-autoscaler/vendor/github.com/stretchr/testify/require/require_forward.go +++ b/cluster-autoscaler/vendor/github.com/stretchr/testify/require/require_forward.go @@ -1,7 +1,4 @@ -/* -* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen -* THIS FILE MUST NOT BE EDITED BY HAND - */ +// Code generated with github.com/stretchr/testify/_codegen; DO NOT EDIT. package require @@ -190,7 +187,7 @@ func (a *Assertions) EqualExportedValuesf(expected interface{}, actual interface EqualExportedValuesf(a.t, expected, actual, msg, args...) } -// EqualValues asserts that two objects are equal or convertable to the same types +// EqualValues asserts that two objects are equal or convertible to the same types // and equal. // // a.EqualValues(uint32(123), int32(123)) @@ -201,7 +198,7 @@ func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAn EqualValues(a.t, expected, actual, msgAndArgs...) } -// EqualValuesf asserts that two objects are equal or convertable to the same types +// EqualValuesf asserts that two objects are equal or convertible to the same types // and equal. // // a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted") @@ -1222,6 +1219,26 @@ func (a *Assertions) NotErrorIsf(err error, target error, msg string, args ...in NotErrorIsf(a.t, err, target, msg, args...) } +// NotImplements asserts that an object does not implement the specified interface. +// +// a.NotImplements((*MyInterface)(nil), new(MyObject)) +func (a *Assertions) NotImplements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + NotImplements(a.t, interfaceObject, object, msgAndArgs...) +} + +// NotImplementsf asserts that an object does not implement the specified interface. +// +// a.NotImplementsf((*MyInterface)(nil), new(MyObject), "error message %s", "formatted") +func (a *Assertions) NotImplementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + NotImplementsf(a.t, interfaceObject, object, msg, args...) +} + // NotNil asserts that the specified object is not nil. // // a.NotNil(err) @@ -1310,10 +1327,12 @@ func (a *Assertions) NotSamef(expected interface{}, actual interface{}, msg stri NotSamef(a.t, expected, actual, msg, args...) } -// NotSubset asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). +// NotSubset asserts that the specified list(array, slice...) or map does NOT +// contain all elements given in the specified subset list(array, slice...) or +// map. // -// a.NotSubset([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") +// a.NotSubset([1, 3, 4], [1, 2]) +// a.NotSubset({"x": 1, "y": 2}, {"z": 3}) func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1321,10 +1340,12 @@ func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs NotSubset(a.t, list, subset, msgAndArgs...) } -// NotSubsetf asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). +// NotSubsetf asserts that the specified list(array, slice...) or map does NOT +// contain all elements given in the specified subset list(array, slice...) or +// map. // -// a.NotSubsetf([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") +// a.NotSubsetf([1, 3, 4], [1, 2], "error message %s", "formatted") +// a.NotSubsetf({"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1484,10 +1505,11 @@ func (a *Assertions) Samef(expected interface{}, actual interface{}, msg string, Samef(a.t, expected, actual, msg, args...) } -// Subset asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). +// Subset asserts that the specified list(array, slice...) or map contains all +// elements given in the specified subset list(array, slice...) or map. // -// a.Subset([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") +// a.Subset([1, 2, 3], [1, 2]) +// a.Subset({"x": 1, "y": 2}, {"x": 1}) func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1495,10 +1517,11 @@ func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ... Subset(a.t, list, subset, msgAndArgs...) } -// Subsetf asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). +// Subsetf asserts that the specified list(array, slice...) or map contains all +// elements given in the specified subset list(array, slice...) or map. // -// a.Subsetf([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") +// a.Subsetf([1, 2, 3], [1, 2], "error message %s", "formatted") +// a.Subsetf({"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() diff --git a/cluster-autoscaler/vendor/github.com/stretchr/testify/suite/suite.go b/cluster-autoscaler/vendor/github.com/stretchr/testify/suite/suite.go index 8b4202d8906d..18443a91c85d 100644 --- a/cluster-autoscaler/vendor/github.com/stretchr/testify/suite/suite.go +++ b/cluster-autoscaler/vendor/github.com/stretchr/testify/suite/suite.go @@ -58,7 +58,7 @@ func (suite *Suite) Require() *require.Assertions { suite.mu.Lock() defer suite.mu.Unlock() if suite.require == nil { - suite.require = require.New(suite.T()) + panic("'Require' must not be called before 'Run' or 'SetT'") } return suite.require } @@ -72,17 +72,19 @@ func (suite *Suite) Assert() *assert.Assertions { suite.mu.Lock() defer suite.mu.Unlock() if suite.Assertions == nil { - suite.Assertions = assert.New(suite.T()) + panic("'Assert' must not be called before 'Run' or 'SetT'") } return suite.Assertions } func recoverAndFailOnPanic(t *testing.T) { + t.Helper() r := recover() failOnPanic(t, r) } func failOnPanic(t *testing.T, r interface{}) { + t.Helper() if r != nil { t.Errorf("test panicked: %v\n%s", r, debug.Stack()) t.FailNow() @@ -96,19 +98,20 @@ func failOnPanic(t *testing.T, r interface{}) { func (suite *Suite) Run(name string, subtest func()) bool { oldT := suite.T() - if setupSubTest, ok := suite.s.(SetupSubTest); ok { - setupSubTest.SetupSubTest() - } + return oldT.Run(name, func(t *testing.T) { + suite.SetT(t) + defer suite.SetT(oldT) + + defer recoverAndFailOnPanic(t) + + if setupSubTest, ok := suite.s.(SetupSubTest); ok { + setupSubTest.SetupSubTest() + } - defer func() { - suite.SetT(oldT) if tearDownSubTest, ok := suite.s.(TearDownSubTest); ok { - tearDownSubTest.TearDownSubTest() + defer tearDownSubTest.TearDownSubTest() } - }() - return oldT.Run(name, func(t *testing.T) { - suite.SetT(t) subtest() }) } @@ -164,6 +167,8 @@ func Run(t *testing.T, suite TestingSuite) { suite.SetT(t) defer recoverAndFailOnPanic(t) defer func() { + t.Helper() + r := recover() if stats != nil { diff --git a/cluster-autoscaler/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s b/cluster-autoscaler/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s index 66aebae25885..c672ccf6986b 100644 --- a/cluster-autoscaler/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s +++ b/cluster-autoscaler/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s @@ -33,6 +33,9 @@ #define CONSTBASE R16 #define BLOCKS R17 +// for VPERMXOR +#define MASK R18 + DATA consts<>+0x00(SB)/8, $0x3320646e61707865 DATA consts<>+0x08(SB)/8, $0x6b20657479622d32 DATA consts<>+0x10(SB)/8, $0x0000000000000001 @@ -53,7 +56,11 @@ DATA consts<>+0x80(SB)/8, $0x6b2065746b206574 DATA consts<>+0x88(SB)/8, $0x6b2065746b206574 DATA consts<>+0x90(SB)/8, $0x0000000100000000 DATA consts<>+0x98(SB)/8, $0x0000000300000002 -GLOBL consts<>(SB), RODATA, $0xa0 +DATA consts<>+0xa0(SB)/8, $0x5566774411223300 +DATA consts<>+0xa8(SB)/8, $0xddeeffcc99aabb88 +DATA consts<>+0xb0(SB)/8, $0x6677445522330011 +DATA consts<>+0xb8(SB)/8, $0xeeffccddaabb8899 +GLOBL consts<>(SB), RODATA, $0xc0 //func chaCha20_ctr32_vsx(out, inp *byte, len int, key *[8]uint32, counter *uint32) TEXT ·chaCha20_ctr32_vsx(SB),NOSPLIT,$64-40 @@ -70,6 +77,9 @@ TEXT ·chaCha20_ctr32_vsx(SB),NOSPLIT,$64-40 MOVD $48, R10 MOVD $64, R11 SRD $6, LEN, BLOCKS + // for VPERMXOR + MOVD $consts<>+0xa0(SB), MASK + MOVD $16, R20 // V16 LXVW4X (CONSTBASE)(R0), VS48 ADD $80,CONSTBASE @@ -87,6 +97,10 @@ TEXT ·chaCha20_ctr32_vsx(SB),NOSPLIT,$64-40 // V28 LXVW4X (CONSTBASE)(R11), VS60 + // Load mask constants for VPERMXOR + LXVW4X (MASK)(R0), V20 + LXVW4X (MASK)(R20), V21 + // splat slot from V19 -> V26 VSPLTW $0, V19, V26 @@ -97,7 +111,7 @@ TEXT ·chaCha20_ctr32_vsx(SB),NOSPLIT,$64-40 MOVD $10, R14 MOVD R14, CTR - + PCALIGN $16 loop_outer_vsx: // V0, V1, V2, V3 LXVW4X (R0)(CONSTBASE), VS32 @@ -128,22 +142,17 @@ loop_outer_vsx: VSPLTISW $12, V28 VSPLTISW $8, V29 VSPLTISW $7, V30 - + PCALIGN $16 loop_vsx: VADDUWM V0, V4, V0 VADDUWM V1, V5, V1 VADDUWM V2, V6, V2 VADDUWM V3, V7, V3 - VXOR V12, V0, V12 - VXOR V13, V1, V13 - VXOR V14, V2, V14 - VXOR V15, V3, V15 - - VRLW V12, V27, V12 - VRLW V13, V27, V13 - VRLW V14, V27, V14 - VRLW V15, V27, V15 + VPERMXOR V12, V0, V21, V12 + VPERMXOR V13, V1, V21, V13 + VPERMXOR V14, V2, V21, V14 + VPERMXOR V15, V3, V21, V15 VADDUWM V8, V12, V8 VADDUWM V9, V13, V9 @@ -165,15 +174,10 @@ loop_vsx: VADDUWM V2, V6, V2 VADDUWM V3, V7, V3 - VXOR V12, V0, V12 - VXOR V13, V1, V13 - VXOR V14, V2, V14 - VXOR V15, V3, V15 - - VRLW V12, V29, V12 - VRLW V13, V29, V13 - VRLW V14, V29, V14 - VRLW V15, V29, V15 + VPERMXOR V12, V0, V20, V12 + VPERMXOR V13, V1, V20, V13 + VPERMXOR V14, V2, V20, V14 + VPERMXOR V15, V3, V20, V15 VADDUWM V8, V12, V8 VADDUWM V9, V13, V9 @@ -195,15 +199,10 @@ loop_vsx: VADDUWM V2, V7, V2 VADDUWM V3, V4, V3 - VXOR V15, V0, V15 - VXOR V12, V1, V12 - VXOR V13, V2, V13 - VXOR V14, V3, V14 - - VRLW V15, V27, V15 - VRLW V12, V27, V12 - VRLW V13, V27, V13 - VRLW V14, V27, V14 + VPERMXOR V15, V0, V21, V15 + VPERMXOR V12, V1, V21, V12 + VPERMXOR V13, V2, V21, V13 + VPERMXOR V14, V3, V21, V14 VADDUWM V10, V15, V10 VADDUWM V11, V12, V11 @@ -225,15 +224,10 @@ loop_vsx: VADDUWM V2, V7, V2 VADDUWM V3, V4, V3 - VXOR V15, V0, V15 - VXOR V12, V1, V12 - VXOR V13, V2, V13 - VXOR V14, V3, V14 - - VRLW V15, V29, V15 - VRLW V12, V29, V12 - VRLW V13, V29, V13 - VRLW V14, V29, V14 + VPERMXOR V15, V0, V20, V15 + VPERMXOR V12, V1, V20, V12 + VPERMXOR V13, V2, V20, V13 + VPERMXOR V14, V3, V20, V14 VADDUWM V10, V15, V10 VADDUWM V11, V12, V11 @@ -249,48 +243,48 @@ loop_vsx: VRLW V6, V30, V6 VRLW V7, V30, V7 VRLW V4, V30, V4 - BC 16, LT, loop_vsx + BDNZ loop_vsx VADDUWM V12, V26, V12 - WORD $0x13600F8C // VMRGEW V0, V1, V27 - WORD $0x13821F8C // VMRGEW V2, V3, V28 + VMRGEW V0, V1, V27 + VMRGEW V2, V3, V28 - WORD $0x10000E8C // VMRGOW V0, V1, V0 - WORD $0x10421E8C // VMRGOW V2, V3, V2 + VMRGOW V0, V1, V0 + VMRGOW V2, V3, V2 - WORD $0x13A42F8C // VMRGEW V4, V5, V29 - WORD $0x13C63F8C // VMRGEW V6, V7, V30 + VMRGEW V4, V5, V29 + VMRGEW V6, V7, V30 XXPERMDI VS32, VS34, $0, VS33 XXPERMDI VS32, VS34, $3, VS35 XXPERMDI VS59, VS60, $0, VS32 XXPERMDI VS59, VS60, $3, VS34 - WORD $0x10842E8C // VMRGOW V4, V5, V4 - WORD $0x10C63E8C // VMRGOW V6, V7, V6 + VMRGOW V4, V5, V4 + VMRGOW V6, V7, V6 - WORD $0x13684F8C // VMRGEW V8, V9, V27 - WORD $0x138A5F8C // VMRGEW V10, V11, V28 + VMRGEW V8, V9, V27 + VMRGEW V10, V11, V28 XXPERMDI VS36, VS38, $0, VS37 XXPERMDI VS36, VS38, $3, VS39 XXPERMDI VS61, VS62, $0, VS36 XXPERMDI VS61, VS62, $3, VS38 - WORD $0x11084E8C // VMRGOW V8, V9, V8 - WORD $0x114A5E8C // VMRGOW V10, V11, V10 + VMRGOW V8, V9, V8 + VMRGOW V10, V11, V10 - WORD $0x13AC6F8C // VMRGEW V12, V13, V29 - WORD $0x13CE7F8C // VMRGEW V14, V15, V30 + VMRGEW V12, V13, V29 + VMRGEW V14, V15, V30 XXPERMDI VS40, VS42, $0, VS41 XXPERMDI VS40, VS42, $3, VS43 XXPERMDI VS59, VS60, $0, VS40 XXPERMDI VS59, VS60, $3, VS42 - WORD $0x118C6E8C // VMRGOW V12, V13, V12 - WORD $0x11CE7E8C // VMRGOW V14, V15, V14 + VMRGOW V12, V13, V12 + VMRGOW V14, V15, V14 VSPLTISW $4, V27 VADDUWM V26, V27, V26 @@ -431,7 +425,7 @@ tail_vsx: ADD $-1, R11, R12 ADD $-1, INP ADD $-1, OUT - + PCALIGN $16 looptail_vsx: // Copying the result to OUT // in bytes. @@ -439,7 +433,7 @@ looptail_vsx: MOVBZU 1(INP), TMP XOR KEY, TMP, KEY MOVBU KEY, 1(OUT) - BC 16, LT, looptail_vsx + BDNZ looptail_vsx // Clear the stack values STXVW4X VS48, (R11)(R0) diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/mmap_nomremap.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/mmap_nomremap.go index 4b68e59780a2..7f602ffd26d4 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/mmap_nomremap.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/mmap_nomremap.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build aix || darwin || dragonfly || freebsd || openbsd || solaris +//go:build aix || darwin || dragonfly || freebsd || openbsd || solaris || zos package unix diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go b/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go index b473038c6155..27c41b6f0a13 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go @@ -1520,6 +1520,14 @@ func (m *mmapper) Munmap(data []byte) (err error) { return nil } +func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) { + return mapper.Mmap(fd, offset, length, prot, flags) +} + +func Munmap(b []byte) (err error) { + return mapper.Munmap(b) +} + func Read(fd int, p []byte) (n int, err error) { n, err = read(fd, p) if raceenabled { diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/windows/syscall_windows.go b/cluster-autoscaler/vendor/golang.org/x/sys/windows/syscall_windows.go index 6395a031d45d..6525c62f3c2f 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -165,6 +165,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes, createmode uint32, attrs uint32, templatefile Handle) (handle Handle, err error) [failretval==InvalidHandle] = CreateFileW //sys CreateNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *SecurityAttributes) (handle Handle, err error) [failretval==InvalidHandle] = CreateNamedPipeW //sys ConnectNamedPipe(pipe Handle, overlapped *Overlapped) (err error) +//sys DisconnectNamedPipe(pipe Handle) (err error) //sys GetNamedPipeInfo(pipe Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) //sys GetNamedPipeHandleState(pipe Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW //sys SetNamedPipeHandleState(pipe Handle, state *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32) (err error) = SetNamedPipeHandleState @@ -348,8 +349,19 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys SetProcessPriorityBoost(process Handle, disable bool) (err error) = kernel32.SetProcessPriorityBoost //sys GetProcessWorkingSetSizeEx(hProcess Handle, lpMinimumWorkingSetSize *uintptr, lpMaximumWorkingSetSize *uintptr, flags *uint32) //sys SetProcessWorkingSetSizeEx(hProcess Handle, dwMinimumWorkingSetSize uintptr, dwMaximumWorkingSetSize uintptr, flags uint32) (err error) +//sys ClearCommBreak(handle Handle) (err error) +//sys ClearCommError(handle Handle, lpErrors *uint32, lpStat *ComStat) (err error) +//sys EscapeCommFunction(handle Handle, dwFunc uint32) (err error) +//sys GetCommState(handle Handle, lpDCB *DCB) (err error) +//sys GetCommModemStatus(handle Handle, lpModemStat *uint32) (err error) //sys GetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) +//sys PurgeComm(handle Handle, dwFlags uint32) (err error) +//sys SetCommBreak(handle Handle) (err error) +//sys SetCommMask(handle Handle, dwEvtMask uint32) (err error) +//sys SetCommState(handle Handle, lpDCB *DCB) (err error) //sys SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) +//sys SetupComm(handle Handle, dwInQueue uint32, dwOutQueue uint32) (err error) +//sys WaitCommEvent(handle Handle, lpEvtMask *uint32, lpOverlapped *Overlapped) (err error) //sys GetActiveProcessorCount(groupNumber uint16) (ret uint32) //sys GetMaximumProcessorCount(groupNumber uint16) (ret uint32) //sys EnumWindows(enumFunc uintptr, param unsafe.Pointer) (err error) = user32.EnumWindows @@ -1834,3 +1846,73 @@ func ResizePseudoConsole(pconsole Handle, size Coord) error { // accept arguments that can be casted to uintptr, and Coord can't. return resizePseudoConsole(pconsole, *((*uint32)(unsafe.Pointer(&size)))) } + +// DCB constants. See https://learn.microsoft.com/en-us/windows/win32/api/winbase/ns-winbase-dcb. +const ( + CBR_110 = 110 + CBR_300 = 300 + CBR_600 = 600 + CBR_1200 = 1200 + CBR_2400 = 2400 + CBR_4800 = 4800 + CBR_9600 = 9600 + CBR_14400 = 14400 + CBR_19200 = 19200 + CBR_38400 = 38400 + CBR_57600 = 57600 + CBR_115200 = 115200 + CBR_128000 = 128000 + CBR_256000 = 256000 + + DTR_CONTROL_DISABLE = 0x00000000 + DTR_CONTROL_ENABLE = 0x00000010 + DTR_CONTROL_HANDSHAKE = 0x00000020 + + RTS_CONTROL_DISABLE = 0x00000000 + RTS_CONTROL_ENABLE = 0x00001000 + RTS_CONTROL_HANDSHAKE = 0x00002000 + RTS_CONTROL_TOGGLE = 0x00003000 + + NOPARITY = 0 + ODDPARITY = 1 + EVENPARITY = 2 + MARKPARITY = 3 + SPACEPARITY = 4 + + ONESTOPBIT = 0 + ONE5STOPBITS = 1 + TWOSTOPBITS = 2 +) + +// EscapeCommFunction constants. See https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-escapecommfunction. +const ( + SETXOFF = 1 + SETXON = 2 + SETRTS = 3 + CLRRTS = 4 + SETDTR = 5 + CLRDTR = 6 + SETBREAK = 8 + CLRBREAK = 9 +) + +// PurgeComm constants. See https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-purgecomm. +const ( + PURGE_TXABORT = 0x0001 + PURGE_RXABORT = 0x0002 + PURGE_TXCLEAR = 0x0004 + PURGE_RXCLEAR = 0x0008 +) + +// SetCommMask constants. See https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-setcommmask. +const ( + EV_RXCHAR = 0x0001 + EV_RXFLAG = 0x0002 + EV_TXEMPTY = 0x0004 + EV_CTS = 0x0008 + EV_DSR = 0x0010 + EV_RLSD = 0x0020 + EV_BREAK = 0x0040 + EV_ERR = 0x0080 + EV_RING = 0x0100 +) diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/windows/types_windows.go b/cluster-autoscaler/vendor/golang.org/x/sys/windows/types_windows.go index 359780f6ace5..d8cb71db0a61 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/windows/types_windows.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/windows/types_windows.go @@ -3380,3 +3380,27 @@ type BLOB struct { Size uint32 BlobData *byte } + +type ComStat struct { + Flags uint32 + CBInQue uint32 + CBOutQue uint32 +} + +type DCB struct { + DCBlength uint32 + BaudRate uint32 + Flags uint32 + wReserved uint16 + XonLim uint16 + XoffLim uint16 + ByteSize uint8 + Parity uint8 + StopBits uint8 + XonChar byte + XoffChar byte + ErrorChar byte + EofChar byte + EvtChar byte + wReserved1 uint16 +} diff --git a/cluster-autoscaler/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/cluster-autoscaler/vendor/golang.org/x/sys/windows/zsyscall_windows.go index e8791c82c30f..5c6035ddfa92 100644 --- a/cluster-autoscaler/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/cluster-autoscaler/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -188,6 +188,8 @@ var ( procAssignProcessToJobObject = modkernel32.NewProc("AssignProcessToJobObject") procCancelIo = modkernel32.NewProc("CancelIo") procCancelIoEx = modkernel32.NewProc("CancelIoEx") + procClearCommBreak = modkernel32.NewProc("ClearCommBreak") + procClearCommError = modkernel32.NewProc("ClearCommError") procCloseHandle = modkernel32.NewProc("CloseHandle") procClosePseudoConsole = modkernel32.NewProc("ClosePseudoConsole") procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe") @@ -212,7 +214,9 @@ var ( procDeleteProcThreadAttributeList = modkernel32.NewProc("DeleteProcThreadAttributeList") procDeleteVolumeMountPointW = modkernel32.NewProc("DeleteVolumeMountPointW") procDeviceIoControl = modkernel32.NewProc("DeviceIoControl") + procDisconnectNamedPipe = modkernel32.NewProc("DisconnectNamedPipe") procDuplicateHandle = modkernel32.NewProc("DuplicateHandle") + procEscapeCommFunction = modkernel32.NewProc("EscapeCommFunction") procExitProcess = modkernel32.NewProc("ExitProcess") procExpandEnvironmentStringsW = modkernel32.NewProc("ExpandEnvironmentStringsW") procFindClose = modkernel32.NewProc("FindClose") @@ -236,6 +240,8 @@ var ( procGenerateConsoleCtrlEvent = modkernel32.NewProc("GenerateConsoleCtrlEvent") procGetACP = modkernel32.NewProc("GetACP") procGetActiveProcessorCount = modkernel32.NewProc("GetActiveProcessorCount") + procGetCommModemStatus = modkernel32.NewProc("GetCommModemStatus") + procGetCommState = modkernel32.NewProc("GetCommState") procGetCommTimeouts = modkernel32.NewProc("GetCommTimeouts") procGetCommandLineW = modkernel32.NewProc("GetCommandLineW") procGetComputerNameExW = modkernel32.NewProc("GetComputerNameExW") @@ -322,6 +328,7 @@ var ( procProcess32NextW = modkernel32.NewProc("Process32NextW") procProcessIdToSessionId = modkernel32.NewProc("ProcessIdToSessionId") procPulseEvent = modkernel32.NewProc("PulseEvent") + procPurgeComm = modkernel32.NewProc("PurgeComm") procQueryDosDeviceW = modkernel32.NewProc("QueryDosDeviceW") procQueryFullProcessImageNameW = modkernel32.NewProc("QueryFullProcessImageNameW") procQueryInformationJobObject = modkernel32.NewProc("QueryInformationJobObject") @@ -335,6 +342,9 @@ var ( procResetEvent = modkernel32.NewProc("ResetEvent") procResizePseudoConsole = modkernel32.NewProc("ResizePseudoConsole") procResumeThread = modkernel32.NewProc("ResumeThread") + procSetCommBreak = modkernel32.NewProc("SetCommBreak") + procSetCommMask = modkernel32.NewProc("SetCommMask") + procSetCommState = modkernel32.NewProc("SetCommState") procSetCommTimeouts = modkernel32.NewProc("SetCommTimeouts") procSetConsoleCursorPosition = modkernel32.NewProc("SetConsoleCursorPosition") procSetConsoleMode = modkernel32.NewProc("SetConsoleMode") @@ -342,7 +352,6 @@ var ( procSetDefaultDllDirectories = modkernel32.NewProc("SetDefaultDllDirectories") procSetDllDirectoryW = modkernel32.NewProc("SetDllDirectoryW") procSetEndOfFile = modkernel32.NewProc("SetEndOfFile") - procSetFileValidData = modkernel32.NewProc("SetFileValidData") procSetEnvironmentVariableW = modkernel32.NewProc("SetEnvironmentVariableW") procSetErrorMode = modkernel32.NewProc("SetErrorMode") procSetEvent = modkernel32.NewProc("SetEvent") @@ -351,6 +360,7 @@ var ( procSetFileInformationByHandle = modkernel32.NewProc("SetFileInformationByHandle") procSetFilePointer = modkernel32.NewProc("SetFilePointer") procSetFileTime = modkernel32.NewProc("SetFileTime") + procSetFileValidData = modkernel32.NewProc("SetFileValidData") procSetHandleInformation = modkernel32.NewProc("SetHandleInformation") procSetInformationJobObject = modkernel32.NewProc("SetInformationJobObject") procSetNamedPipeHandleState = modkernel32.NewProc("SetNamedPipeHandleState") @@ -361,6 +371,7 @@ var ( procSetStdHandle = modkernel32.NewProc("SetStdHandle") procSetVolumeLabelW = modkernel32.NewProc("SetVolumeLabelW") procSetVolumeMountPointW = modkernel32.NewProc("SetVolumeMountPointW") + procSetupComm = modkernel32.NewProc("SetupComm") procSizeofResource = modkernel32.NewProc("SizeofResource") procSleepEx = modkernel32.NewProc("SleepEx") procTerminateJobObject = modkernel32.NewProc("TerminateJobObject") @@ -379,6 +390,7 @@ var ( procVirtualQueryEx = modkernel32.NewProc("VirtualQueryEx") procVirtualUnlock = modkernel32.NewProc("VirtualUnlock") procWTSGetActiveConsoleSessionId = modkernel32.NewProc("WTSGetActiveConsoleSessionId") + procWaitCommEvent = modkernel32.NewProc("WaitCommEvent") procWaitForMultipleObjects = modkernel32.NewProc("WaitForMultipleObjects") procWaitForSingleObject = modkernel32.NewProc("WaitForSingleObject") procWriteConsoleW = modkernel32.NewProc("WriteConsoleW") @@ -1641,6 +1653,22 @@ func CancelIoEx(s Handle, o *Overlapped) (err error) { return } +func ClearCommBreak(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procClearCommBreak.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func ClearCommError(handle Handle, lpErrors *uint32, lpStat *ComStat) (err error) { + r1, _, e1 := syscall.Syscall(procClearCommError.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(lpErrors)), uintptr(unsafe.Pointer(lpStat))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func CloseHandle(handle Handle) (err error) { r1, _, e1 := syscall.Syscall(procCloseHandle.Addr(), 1, uintptr(handle), 0, 0) if r1 == 0 { @@ -1845,6 +1873,14 @@ func DeviceIoControl(handle Handle, ioControlCode uint32, inBuffer *byte, inBuff return } +func DisconnectNamedPipe(pipe Handle) (err error) { + r1, _, e1 := syscall.Syscall(procDisconnectNamedPipe.Addr(), 1, uintptr(pipe), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetProcessHandle Handle, lpTargetHandle *Handle, dwDesiredAccess uint32, bInheritHandle bool, dwOptions uint32) (err error) { var _p0 uint32 if bInheritHandle { @@ -1857,6 +1893,14 @@ func DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetP return } +func EscapeCommFunction(handle Handle, dwFunc uint32) (err error) { + r1, _, e1 := syscall.Syscall(procEscapeCommFunction.Addr(), 2, uintptr(handle), uintptr(dwFunc), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func ExitProcess(exitcode uint32) { syscall.Syscall(procExitProcess.Addr(), 1, uintptr(exitcode), 0, 0) return @@ -2058,6 +2102,22 @@ func GetActiveProcessorCount(groupNumber uint16) (ret uint32) { return } +func GetCommModemStatus(handle Handle, lpModemStat *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetCommModemStatus.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(lpModemStat)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetCommState(handle Handle, lpDCB *DCB) (err error) { + r1, _, e1 := syscall.Syscall(procGetCommState.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(lpDCB)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func GetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { r1, _, e1 := syscall.Syscall(procGetCommTimeouts.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(timeouts)), 0) if r1 == 0 { @@ -2810,6 +2870,14 @@ func PulseEvent(event Handle) (err error) { return } +func PurgeComm(handle Handle, dwFlags uint32) (err error) { + r1, _, e1 := syscall.Syscall(procPurgeComm.Addr(), 2, uintptr(handle), uintptr(dwFlags), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func QueryDosDevice(deviceName *uint16, targetPath *uint16, max uint32) (n uint32, err error) { r0, _, e1 := syscall.Syscall(procQueryDosDeviceW.Addr(), 3, uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath)), uintptr(max)) n = uint32(r0) @@ -2924,6 +2992,30 @@ func ResumeThread(thread Handle) (ret uint32, err error) { return } +func SetCommBreak(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procSetCommBreak.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetCommMask(handle Handle, dwEvtMask uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetCommMask.Addr(), 2, uintptr(handle), uintptr(dwEvtMask), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetCommState(handle Handle, lpDCB *DCB) (err error) { + r1, _, e1 := syscall.Syscall(procSetCommState.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(lpDCB)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { r1, _, e1 := syscall.Syscall(procSetCommTimeouts.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(timeouts)), 0) if r1 == 0 { @@ -2989,14 +3081,6 @@ func SetEndOfFile(handle Handle) (err error) { return } -func SetFileValidData(handle Handle, validDataLength int64) (err error) { - r1, _, e1 := syscall.Syscall(procSetFileValidData.Addr(), 2, uintptr(handle), uintptr(validDataLength), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - func SetEnvironmentVariable(name *uint16, value *uint16) (err error) { r1, _, e1 := syscall.Syscall(procSetEnvironmentVariableW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(value)), 0) if r1 == 0 { @@ -3060,6 +3144,14 @@ func SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetim return } +func SetFileValidData(handle Handle, validDataLength int64) (err error) { + r1, _, e1 := syscall.Syscall(procSetFileValidData.Addr(), 2, uintptr(handle), uintptr(validDataLength), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func SetHandleInformation(handle Handle, mask uint32, flags uint32) (err error) { r1, _, e1 := syscall.Syscall(procSetHandleInformation.Addr(), 3, uintptr(handle), uintptr(mask), uintptr(flags)) if r1 == 0 { @@ -3145,6 +3237,14 @@ func SetVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16) (err erro return } +func SetupComm(handle Handle, dwInQueue uint32, dwOutQueue uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetupComm.Addr(), 3, uintptr(handle), uintptr(dwInQueue), uintptr(dwOutQueue)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func SizeofResource(module Handle, resInfo Handle) (size uint32, err error) { r0, _, e1 := syscall.Syscall(procSizeofResource.Addr(), 2, uintptr(module), uintptr(resInfo), 0) size = uint32(r0) @@ -3291,6 +3391,14 @@ func WTSGetActiveConsoleSessionId() (sessionID uint32) { return } +func WaitCommEvent(handle Handle, lpEvtMask *uint32, lpOverlapped *Overlapped) (err error) { + r1, _, e1 := syscall.Syscall(procWaitCommEvent.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(lpEvtMask)), uintptr(unsafe.Pointer(lpOverlapped))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func waitForMultipleObjects(count uint32, handles uintptr, waitAll bool, waitMilliseconds uint32) (event uint32, err error) { var _p0 uint32 if waitAll { diff --git a/cluster-autoscaler/vendor/golang.org/x/tools/cover/profile.go b/cluster-autoscaler/vendor/golang.org/x/tools/cover/profile.go new file mode 100644 index 000000000000..47a9a541164b --- /dev/null +++ b/cluster-autoscaler/vendor/golang.org/x/tools/cover/profile.go @@ -0,0 +1,266 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cover provides support for parsing coverage profiles +// generated by "go test -coverprofile=cover.out". +package cover // import "golang.org/x/tools/cover" + +import ( + "bufio" + "errors" + "fmt" + "io" + "math" + "os" + "sort" + "strconv" + "strings" +) + +// Profile represents the profiling data for a specific file. +type Profile struct { + FileName string + Mode string + Blocks []ProfileBlock +} + +// ProfileBlock represents a single block of profiling data. +type ProfileBlock struct { + StartLine, StartCol int + EndLine, EndCol int + NumStmt, Count int +} + +type byFileName []*Profile + +func (p byFileName) Len() int { return len(p) } +func (p byFileName) Less(i, j int) bool { return p[i].FileName < p[j].FileName } +func (p byFileName) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +// ParseProfiles parses profile data in the specified file and returns a +// Profile for each source file described therein. +func ParseProfiles(fileName string) ([]*Profile, error) { + pf, err := os.Open(fileName) + if err != nil { + return nil, err + } + defer pf.Close() + return ParseProfilesFromReader(pf) +} + +// ParseProfilesFromReader parses profile data from the Reader and +// returns a Profile for each source file described therein. +func ParseProfilesFromReader(rd io.Reader) ([]*Profile, error) { + // First line is "mode: foo", where foo is "set", "count", or "atomic". + // Rest of file is in the format + // encoding/base64/base64.go:34.44,37.40 3 1 + // where the fields are: name.go:line.column,line.column numberOfStatements count + files := make(map[string]*Profile) + s := bufio.NewScanner(rd) + mode := "" + for s.Scan() { + line := s.Text() + if mode == "" { + const p = "mode: " + if !strings.HasPrefix(line, p) || line == p { + return nil, fmt.Errorf("bad mode line: %v", line) + } + mode = line[len(p):] + continue + } + fn, b, err := parseLine(line) + if err != nil { + return nil, fmt.Errorf("line %q doesn't match expected format: %v", line, err) + } + p := files[fn] + if p == nil { + p = &Profile{ + FileName: fn, + Mode: mode, + } + files[fn] = p + } + p.Blocks = append(p.Blocks, b) + } + if err := s.Err(); err != nil { + return nil, err + } + for _, p := range files { + sort.Sort(blocksByStart(p.Blocks)) + // Merge samples from the same location. + j := 1 + for i := 1; i < len(p.Blocks); i++ { + b := p.Blocks[i] + last := p.Blocks[j-1] + if b.StartLine == last.StartLine && + b.StartCol == last.StartCol && + b.EndLine == last.EndLine && + b.EndCol == last.EndCol { + if b.NumStmt != last.NumStmt { + return nil, fmt.Errorf("inconsistent NumStmt: changed from %d to %d", last.NumStmt, b.NumStmt) + } + if mode == "set" { + p.Blocks[j-1].Count |= b.Count + } else { + p.Blocks[j-1].Count += b.Count + } + continue + } + p.Blocks[j] = b + j++ + } + p.Blocks = p.Blocks[:j] + } + // Generate a sorted slice. + profiles := make([]*Profile, 0, len(files)) + for _, profile := range files { + profiles = append(profiles, profile) + } + sort.Sort(byFileName(profiles)) + return profiles, nil +} + +// parseLine parses a line from a coverage file. +// It is equivalent to the regex +// ^(.+):([0-9]+)\.([0-9]+),([0-9]+)\.([0-9]+) ([0-9]+) ([0-9]+)$ +// +// However, it is much faster: https://golang.org/cl/179377 +func parseLine(l string) (fileName string, block ProfileBlock, err error) { + end := len(l) + + b := ProfileBlock{} + b.Count, end, err = seekBack(l, ' ', end, "Count") + if err != nil { + return "", b, err + } + b.NumStmt, end, err = seekBack(l, ' ', end, "NumStmt") + if err != nil { + return "", b, err + } + b.EndCol, end, err = seekBack(l, '.', end, "EndCol") + if err != nil { + return "", b, err + } + b.EndLine, end, err = seekBack(l, ',', end, "EndLine") + if err != nil { + return "", b, err + } + b.StartCol, end, err = seekBack(l, '.', end, "StartCol") + if err != nil { + return "", b, err + } + b.StartLine, end, err = seekBack(l, ':', end, "StartLine") + if err != nil { + return "", b, err + } + fn := l[0:end] + if fn == "" { + return "", b, errors.New("a FileName cannot be blank") + } + return fn, b, nil +} + +// seekBack searches backwards from end to find sep in l, then returns the +// value between sep and end as an integer. +// If seekBack fails, the returned error will reference what. +func seekBack(l string, sep byte, end int, what string) (value int, nextSep int, err error) { + // Since we're seeking backwards and we know only ASCII is legal for these values, + // we can ignore the possibility of non-ASCII characters. + for start := end - 1; start >= 0; start-- { + if l[start] == sep { + i, err := strconv.Atoi(l[start+1 : end]) + if err != nil { + return 0, 0, fmt.Errorf("couldn't parse %q: %v", what, err) + } + if i < 0 { + return 0, 0, fmt.Errorf("negative values are not allowed for %s, found %d", what, i) + } + return i, start, nil + } + } + return 0, 0, fmt.Errorf("couldn't find a %s before %s", string(sep), what) +} + +type blocksByStart []ProfileBlock + +func (b blocksByStart) Len() int { return len(b) } +func (b blocksByStart) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b blocksByStart) Less(i, j int) bool { + bi, bj := b[i], b[j] + return bi.StartLine < bj.StartLine || bi.StartLine == bj.StartLine && bi.StartCol < bj.StartCol +} + +// Boundary represents the position in a source file of the beginning or end of a +// block as reported by the coverage profile. In HTML mode, it will correspond to +// the opening or closing of a tag and will be used to colorize the source +type Boundary struct { + Offset int // Location as a byte offset in the source file. + Start bool // Is this the start of a block? + Count int // Event count from the cover profile. + Norm float64 // Count normalized to [0..1]. + Index int // Order in input file. +} + +// Boundaries returns a Profile as a set of Boundary objects within the provided src. +func (p *Profile) Boundaries(src []byte) (boundaries []Boundary) { + // Find maximum count. + max := 0 + for _, b := range p.Blocks { + if b.Count > max { + max = b.Count + } + } + // Divisor for normalization. + divisor := math.Log(float64(max)) + + // boundary returns a Boundary, populating the Norm field with a normalized Count. + index := 0 + boundary := func(offset int, start bool, count int) Boundary { + b := Boundary{Offset: offset, Start: start, Count: count, Index: index} + index++ + if !start || count == 0 { + return b + } + if max <= 1 { + b.Norm = 0.8 // Profile is in"set" mode; we want a heat map. Use cov8 in the CSS. + } else if count > 0 { + b.Norm = math.Log(float64(count)) / divisor + } + return b + } + + line, col := 1, 2 // TODO: Why is this 2? + for si, bi := 0, 0; si < len(src) && bi < len(p.Blocks); { + b := p.Blocks[bi] + if b.StartLine == line && b.StartCol == col { + boundaries = append(boundaries, boundary(si, true, b.Count)) + } + if b.EndLine == line && b.EndCol == col || line > b.EndLine { + boundaries = append(boundaries, boundary(si, false, 0)) + bi++ + continue // Don't advance through src; maybe the next block starts here. + } + if src[si] == '\n' { + line++ + col = 0 + } + col++ + si++ + } + sort.Sort(boundariesByPos(boundaries)) + return +} + +type boundariesByPos []Boundary + +func (b boundariesByPos) Len() int { return len(b) } +func (b boundariesByPos) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b boundariesByPos) Less(i, j int) bool { + if b[i].Offset == b[j].Offset { + // Boundaries at the same offset should be ordered according to + // their original position. + return b[i].Index < b[j].Index + } + return b[i].Offset < b[j].Offset +} diff --git a/cluster-autoscaler/vendor/golang.org/x/tools/internal/versions/versions_go121.go b/cluster-autoscaler/vendor/golang.org/x/tools/internal/versions/versions.go similarity index 80% rename from cluster-autoscaler/vendor/golang.org/x/tools/internal/versions/versions_go121.go rename to cluster-autoscaler/vendor/golang.org/x/tools/internal/versions/versions.go index cf4a7d0360f1..e16f6c33a523 100644 --- a/cluster-autoscaler/vendor/golang.org/x/tools/internal/versions/versions_go121.go +++ b/cluster-autoscaler/vendor/golang.org/x/tools/internal/versions/versions.go @@ -2,11 +2,14 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !go1.22 -// +build !go1.22 - package versions +// Note: If we use build tags to use go/versions when go >=1.22, +// we run into go.dev/issue/53737. Under some operations users would see an +// import of "go/versions" even if they would not compile the file. +// For example, during `go get -u ./...` (go.dev/issue/64490) we do not try to include +// For this reason, this library just a clone of go/versions for the moment. + // Lang returns the Go language version for version x. // If x is not a valid version, Lang returns the empty string. // For example: diff --git a/cluster-autoscaler/vendor/golang.org/x/tools/internal/versions/versions_go122.go b/cluster-autoscaler/vendor/golang.org/x/tools/internal/versions/versions_go122.go deleted file mode 100644 index c1c1814b28dd..000000000000 --- a/cluster-autoscaler/vendor/golang.org/x/tools/internal/versions/versions_go122.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.22 -// +build go1.22 - -package versions - -import ( - "go/version" -) - -// Lang returns the Go language version for version x. -// If x is not a valid version, Lang returns the empty string. -// For example: -// -// Lang("go1.21rc2") = "go1.21" -// Lang("go1.21.2") = "go1.21" -// Lang("go1.21") = "go1.21" -// Lang("go1") = "go1" -// Lang("bad") = "" -// Lang("1.21") = "" -func Lang(x string) string { return version.Lang(x) } - -// Compare returns -1, 0, or +1 depending on whether -// x < y, x == y, or x > y, interpreted as Go versions. -// The versions x and y must begin with a "go" prefix: "go1.21" not "1.21". -// Invalid versions, including the empty string, compare less than -// valid versions and equal to each other. -// The language version "go1.21" compares less than the -// release candidate and eventual releases "go1.21rc1" and "go1.21.0". -// Custom toolchain suffixes are ignored during comparison: -// "go1.21.0" and "go1.21.0-bigcorp" are equal. -func Compare(x, y string) int { return version.Compare(x, y) } - -// IsValid reports whether the version x is valid. -func IsValid(x string) bool { return version.IsValid(x) } diff --git a/cluster-autoscaler/vendor/modules.txt b/cluster-autoscaler/vendor/modules.txt index 1d344e0509eb..16e48d3246ee 100644 --- a/cluster-autoscaler/vendor/modules.txt +++ b/cluster-autoscaler/vendor/modules.txt @@ -16,14 +16,13 @@ github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network github.com/Azure/azure-sdk-for-go/services/network/mgmt/2022-07-01/network github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2017-05-10/resources github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage -github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage github.com/Azure/azure-sdk-for-go/storage github.com/Azure/azure-sdk-for-go/version # github.com/Azure/azure-sdk-for-go-extensions v0.1.6 ## explicit; go 1.19 github.com/Azure/azure-sdk-for-go-extensions/pkg/middleware -# github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.2 +# github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/azcore github.com/Azure/azure-sdk-for-go/sdk/azcore/arm @@ -46,9 +45,10 @@ github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming github.com/Azure/azure-sdk-for-go/sdk/azcore/to github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing -# github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0 +# github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/azidentity +github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal # github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/internal/diag @@ -58,13 +58,19 @@ github.com/Azure/azure-sdk-for-go/sdk/internal/log github.com/Azure/azure-sdk-for-go/sdk/internal/poller github.com/Azure/azure-sdk-for-go/sdk/internal/temporal github.com/Azure/azure-sdk-for-go/sdk/internal/uuid -# github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.4.0 +# github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets v0.12.0 +## explicit; go 1.18 +github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets +# github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal v0.7.1 +## explicit; go 1.18 +github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal +# github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.6.0 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 # github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerregistry/armcontainerregistry v1.2.0 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerregistry/armcontainerregistry -# github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4 v4.8.0-beta.1 +# github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4 v4.9.0-beta.1 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4 # github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/keyvault/armkeyvault v1.4.0 @@ -92,14 +98,14 @@ github.com/Azure/go-autorest ## explicit; go 1.15 github.com/Azure/go-autorest/autorest github.com/Azure/go-autorest/autorest/azure -# github.com/Azure/go-autorest/autorest/adal v0.9.23 +# github.com/Azure/go-autorest/autorest/adal v0.9.24 ## explicit; go 1.15 github.com/Azure/go-autorest/autorest/adal -# github.com/Azure/go-autorest/autorest/azure/auth v0.5.8 -## explicit; go 1.12 +# github.com/Azure/go-autorest/autorest/azure/auth v0.5.13 +## explicit; go 1.15 github.com/Azure/go-autorest/autorest/azure/auth -# github.com/Azure/go-autorest/autorest/azure/cli v0.4.2 -## explicit; go 1.12 +# github.com/Azure/go-autorest/autorest/azure/cli v0.4.6 +## explicit; go 1.15 github.com/Azure/go-autorest/autorest/azure/cli # github.com/Azure/go-autorest/autorest/date v0.3.0 ## explicit; go 1.12 @@ -122,7 +128,7 @@ github.com/Azure/go-autorest/tracing # github.com/Azure/skewer v0.0.14 ## explicit; go 1.13 github.com/Azure/skewer -# github.com/AzureAD/microsoft-authentication-library-for-go v1.2.0 +# github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 ## explicit; go 1.18 github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential @@ -315,7 +321,7 @@ github.com/emicklei/go-restful/v3/log # github.com/euank/go-kmsg-parser v2.0.0+incompatible ## explicit github.com/euank/go-kmsg-parser/kmsgparser -# github.com/evanphx/json-patch v5.7.0+incompatible +# github.com/evanphx/json-patch v5.9.0+incompatible ## explicit github.com/evanphx/json-patch # github.com/felixge/httpsnoop v1.0.4 @@ -364,7 +370,7 @@ github.com/gogo/protobuf/types # github.com/golang-jwt/jwt/v4 v4.5.0 ## explicit; go 1.16 github.com/golang-jwt/jwt/v4 -# github.com/golang-jwt/jwt/v5 v5.0.0 +# github.com/golang-jwt/jwt/v5 v5.2.1 ## explicit; go 1.18 github.com/golang-jwt/jwt/v5 # github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da @@ -505,7 +511,7 @@ github.com/google/s2a-go/internal/v2/remotesigner github.com/google/s2a-go/internal/v2/tlsconfigstore github.com/google/s2a-go/retry github.com/google/s2a-go/stream -# github.com/google/uuid v1.5.0 +# github.com/google/uuid v1.6.0 ## explicit github.com/google/uuid # github.com/googleapis/enterprise-certificate-proxy v0.2.3 @@ -599,8 +605,8 @@ github.com/munnerz/goautoneg # github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f ## explicit github.com/mxk/go-flowrate/flowrate -# github.com/onsi/ginkgo/v2 v2.13.2 -## explicit; go 1.18 +# github.com/onsi/ginkgo/v2 v2.17.1 +## explicit; go 1.20 github.com/onsi/ginkgo/v2 github.com/onsi/ginkgo/v2/config github.com/onsi/ginkgo/v2/formatter @@ -621,8 +627,8 @@ github.com/onsi/ginkgo/v2/internal/parallel_support github.com/onsi/ginkgo/v2/internal/testingtproxy github.com/onsi/ginkgo/v2/reporters github.com/onsi/ginkgo/v2/types -# github.com/onsi/gomega v1.30.0 -## explicit; go 1.18 +# github.com/onsi/gomega v1.32.0 +## explicit; go 1.20 github.com/onsi/gomega github.com/onsi/gomega/format github.com/onsi/gomega/internal @@ -671,7 +677,7 @@ github.com/opencontainers/runtime-spec/specs-go github.com/opencontainers/selinux/go-selinux github.com/opencontainers/selinux/go-selinux/label github.com/opencontainers/selinux/pkg/pwalkdir -# github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 +# github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c ## explicit; go 1.14 github.com/pkg/browser # github.com/pkg/errors v0.9.1 @@ -722,11 +728,11 @@ github.com/spf13/pflag # github.com/stoewer/go-strcase v1.3.0 ## explicit; go 1.11 github.com/stoewer/go-strcase -# github.com/stretchr/objx v0.5.0 -## explicit; go 1.12 -github.com/stretchr/objx -# github.com/stretchr/testify v1.8.4 +# github.com/stretchr/objx v0.5.2 ## explicit; go 1.20 +github.com/stretchr/objx +# github.com/stretchr/testify v1.9.0 +## explicit; go 1.17 github.com/stretchr/testify/assert github.com/stretchr/testify/mock github.com/stretchr/testify/require @@ -896,7 +902,7 @@ go.uber.org/zap/internal/color go.uber.org/zap/internal/exit go.uber.org/zap/zapcore go.uber.org/zap/zapgrpc -# golang.org/x/crypto v0.21.0 +# golang.org/x/crypto v0.22.0 ## explicit; go 1.18 golang.org/x/crypto/chacha20 golang.org/x/crypto/chacha20poly1305 @@ -918,7 +924,7 @@ golang.org/x/exp/slices golang.org/x/mod/internal/lazyregexp golang.org/x/mod/module golang.org/x/mod/semver -# golang.org/x/net v0.23.0 +# golang.org/x/net v0.24.0 ## explicit; go 1.18 golang.org/x/net/bpf golang.org/x/net/context @@ -944,11 +950,11 @@ golang.org/x/oauth2/google/internal/externalaccount golang.org/x/oauth2/internal golang.org/x/oauth2/jws golang.org/x/oauth2/jwt -# golang.org/x/sync v0.6.0 +# golang.org/x/sync v0.7.0 ## explicit; go 1.18 golang.org/x/sync/errgroup golang.org/x/sync/singleflight -# golang.org/x/sys v0.18.0 +# golang.org/x/sys v0.19.0 ## explicit; go 1.18 golang.org/x/sys/cpu golang.org/x/sys/plan9 @@ -956,7 +962,7 @@ golang.org/x/sys/unix golang.org/x/sys/windows golang.org/x/sys/windows/registry golang.org/x/sys/windows/svc -# golang.org/x/term v0.18.0 +# golang.org/x/term v0.19.0 ## explicit; go 1.18 golang.org/x/term # golang.org/x/text v0.14.0 @@ -993,9 +999,10 @@ golang.org/x/text/width # golang.org/x/time v0.5.0 ## explicit; go 1.18 golang.org/x/time/rate -# golang.org/x/tools v0.16.1 +# golang.org/x/tools v0.17.0 ## explicit; go 1.18 golang.org/x/tools/cmd/stringer +golang.org/x/tools/cover golang.org/x/tools/go/ast/astutil golang.org/x/tools/go/ast/inspector golang.org/x/tools/go/gcexportdata @@ -2302,7 +2309,7 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/metrics sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/common/metrics sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client -# sigs.k8s.io/cloud-provider-azure v1.29.0 +# sigs.k8s.io/cloud-provider-azure v1.29.4 ## explicit; go 1.21 sigs.k8s.io/cloud-provider-azure/pkg/azureclients sigs.k8s.io/cloud-provider-azure/pkg/azureclients/armclient @@ -2355,12 +2362,16 @@ sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/securitygroup sigs.k8s.io/cloud-provider-azure/pkg/provider/virtualmachine sigs.k8s.io/cloud-provider-azure/pkg/retry sigs.k8s.io/cloud-provider-azure/pkg/util/deepcopy +sigs.k8s.io/cloud-provider-azure/pkg/util/sets +sigs.k8s.io/cloud-provider-azure/pkg/util/string sigs.k8s.io/cloud-provider-azure/pkg/util/taints +sigs.k8s.io/cloud-provider-azure/pkg/util/vm sigs.k8s.io/cloud-provider-azure/pkg/version -# sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.0-20240105075710-c4d4895a970b +# sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.13 ## explicit; go 1.20 sigs.k8s.io/cloud-provider-azure/pkg/azclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/accountclient +sigs.k8s.io/cloud-provider-azure/pkg/azclient/armauth sigs.k8s.io/cloud-provider-azure/pkg/azclient/availabilitysetclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/blobcontainerclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/blobservicepropertiesclient @@ -2397,8 +2408,8 @@ sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachinescalesetclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachinescalesetvmclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualnetworkclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualnetworklinkclient -# sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.0-20231205023417-1ba5a224ab0e -## explicit; go 1.20 +# sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.4 +## explicit; go 1.21 sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader # sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd ## explicit; go 1.18 diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/arm_conf.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/arm_conf.go index e533cf66965f..d2e654c85f1e 100644 --- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/arm_conf.go +++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/arm_conf.go @@ -62,3 +62,7 @@ func GetAzCoreClientOption(armConfig *ARMClientConfig) (*policy.ClientOptions, e } return &azCoreClientConfig, nil } + +func IsMultiTenant(armConfig *ARMClientConfig) bool { + return armConfig != nil && armConfig.NetworkResourceTenantID != "" && !strings.EqualFold(armConfig.NetworkResourceTenantID, armConfig.GetTenantID()) +} diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/armauth/auxiliary_auth_policy.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/armauth/auxiliary_auth_policy.go new file mode 100644 index 000000000000..0af24a8bf94d --- /dev/null +++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/armauth/auxiliary_auth_policy.go @@ -0,0 +1,59 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package armauth + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" +) + +const ( + HeaderAuthorizationAuxiliary = "x-ms-authorization-auxiliary" +) + +type AuxiliaryAuthPolicy struct { + credentials []azcore.TokenCredential + scope string +} + +func NewAuxiliaryAuthPolicy(credentials []azcore.TokenCredential, scope string) *AuxiliaryAuthPolicy { + return &AuxiliaryAuthPolicy{ + credentials: credentials, + scope: scope, + } +} + +func (p *AuxiliaryAuthPolicy) Do(req *policy.Request) (*http.Response, error) { + tokens := make([]string, 0, len(p.credentials)) + + for _, cred := range p.credentials { + token, err := cred.GetToken(context.TODO(), policy.TokenRequestOptions{ + Scopes: []string{p.scope}, + }) + if err != nil { + return nil, err + } + tokens = append(tokens, fmt.Sprintf("Bearer %s", token.Token)) + } + req.Raw().Header.Set(HeaderAuthorizationAuxiliary, strings.Join(tokens, ", ")) + return req.Next() +} diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/armauth/keyvault_credential.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/armauth/keyvault_credential.go new file mode 100644 index 000000000000..13352d729b33 --- /dev/null +++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/armauth/keyvault_credential.go @@ -0,0 +1,102 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package armauth + +import ( + "context" + "encoding/json" + "fmt" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets" +) + +type KeyVaultCredential struct { + secretClient *azsecrets.Client + secretPath string + + token *azcore.AccessToken +} + +type KeyVaultCredentialSecret struct { + AccessToken string `json:"access_token"` + ExpiresOn time.Time `json:"expires_on"` +} + +func NewKeyVaultCredential( + msiCredential azcore.TokenCredential, + keyVaultURL string, + secretName string, +) (*KeyVaultCredential, error) { + cli, err := azsecrets.NewClient(keyVaultURL, msiCredential, nil) + if err != nil { + return nil, fmt.Errorf("create KeyVault client: %w", err) + } + + rv := &KeyVaultCredential{ + secretClient: cli, + secretPath: secretName, + } + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + if err := rv.refreshToken(ctx); err != nil { + return nil, fmt.Errorf("refresh token: %w", err) + } + + return rv, nil +} + +func (c *KeyVaultCredential) refreshToken(ctx context.Context) error { + const LatestVersion = "" + + resp, err := c.secretClient.GetSecret(ctx, c.secretPath, LatestVersion, nil) + if err != nil { + return err + } + if resp.Value == nil { + return fmt.Errorf("secret value is nil") + } + + var secret KeyVaultCredentialSecret + if err := json.Unmarshal([]byte(*resp.Value), &secret); err != nil { + return fmt.Errorf("unmarshal secret value `%s`: %w", *resp.Value, err) + } + + c.token = &azcore.AccessToken{ + Token: secret.AccessToken, + ExpiresOn: secret.ExpiresOn, + } + + return nil +} + +func (c *KeyVaultCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + const RefreshTokenOffset = 5 * time.Minute + + if c.token != nil && c.token.ExpiresOn.Add(RefreshTokenOffset).Before(time.Now()) { + return *c.token, nil + } + + if err := c.refreshToken(ctx); err != nil { + return azcore.AccessToken{}, fmt.Errorf("refresh token: %w", err) + } + + return *c.token, nil +} diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/auth.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/auth.go index 5c519c141167..df6316a16e18 100644 --- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/auth.go +++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/auth.go @@ -24,15 +24,21 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + + "sigs.k8s.io/cloud-provider-azure/pkg/azclient/armauth" ) type AuthProvider struct { - FederatedIdentityCredential azcore.TokenCredential - ManagedIdentityCredential azcore.TokenCredential - ClientSecretCredential azcore.TokenCredential + FederatedIdentityCredential azcore.TokenCredential + + ManagedIdentityCredential azcore.TokenCredential + ClientSecretCredential azcore.TokenCredential + ClientCertificateCredential azcore.TokenCredential + + NetworkTokenCredential azcore.TokenCredential NetworkClientSecretCredential azcore.TokenCredential - MultiTenantCredential azcore.TokenCredential - ClientCertificateCredential azcore.TokenCredential + + MultiTenantCredential azcore.TokenCredential } func NewAuthProvider(armConfig *ARMClientConfig, config *AzureAuthConfig, clientOptionsMutFn ...func(option *policy.ClientOptions)) (*AuthProvider, error) { @@ -76,6 +82,20 @@ func NewAuthProvider(armConfig *ARMClientConfig, config *AzureAuthConfig, client } } + var ( + networkTokenCredential azcore.TokenCredential + ) + if config.UseManagedIdentityExtension && config.AuxiliaryTokenProvider != nil && IsMultiTenant(armConfig) { + networkTokenCredential, err = armauth.NewKeyVaultCredential( + managedIdentityCredential, + config.AuxiliaryTokenProvider.KeyVaultURL, + config.AuxiliaryTokenProvider.SecretName, + ) + if err != nil { + return nil, fmt.Errorf("create KeyVaultCredential for auxiliary token provider: %w", err) + } + } + // ClientSecretCredential is used for client secret var clientSecretCredential azcore.TokenCredential var networkClientSecretCredential azcore.TokenCredential @@ -88,7 +108,7 @@ func NewAuthProvider(armConfig *ARMClientConfig, config *AzureAuthConfig, client if err != nil { return nil, err } - if len(armConfig.NetworkResourceTenantID) > 0 && !strings.EqualFold(armConfig.NetworkResourceTenantID, armConfig.GetTenantID()) { + if IsMultiTenant(armConfig) { credOptions := &azidentity.ClientSecretCredentialOptions{ ClientOptions: *clientOption, } @@ -128,6 +148,20 @@ func NewAuthProvider(armConfig *ARMClientConfig, config *AzureAuthConfig, client if err != nil { return nil, err } + if IsMultiTenant(armConfig) { + networkClientSecretCredential, err = azidentity.NewClientCertificateCredential(armConfig.NetworkResourceTenantID, config.GetAADClientID(), certificate, privateKey, credOptions) + if err != nil { + return nil, err + } + credOptions = &azidentity.ClientCertificateCredentialOptions{ + ClientOptions: *clientOption, + AdditionallyAllowedTenants: []string{armConfig.NetworkResourceTenantID}, + } + multiTenantCredential, err = azidentity.NewClientCertificateCredential(armConfig.GetTenantID(), config.GetAADClientID(), certificate, privateKey, credOptions) + if err != nil { + return nil, err + } + } } return &AuthProvider{ @@ -136,6 +170,7 @@ func NewAuthProvider(armConfig *ARMClientConfig, config *AzureAuthConfig, client ClientSecretCredential: clientSecretCredential, ClientCertificateCredential: clientCertificateCredential, NetworkClientSecretCredential: networkClientSecretCredential, + NetworkTokenCredential: networkTokenCredential, MultiTenantCredential: multiTenantCredential, }, nil } @@ -159,6 +194,9 @@ func (factory *AuthProvider) GetNetworkAzIdentity() azcore.TokenCredential { if factory.NetworkClientSecretCredential != nil { return factory.NetworkClientSecretCredential } + if factory.NetworkTokenCredential != nil { + return factory.NetworkTokenCredential + } return nil } diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/auth_conf.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/auth_conf.go index 4deeb1bb2c1d..aa7e464f2e06 100644 --- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/auth_conf.go +++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/auth_conf.go @@ -42,6 +42,14 @@ type AzureAuthConfig struct { AADFederatedTokenFile string `json:"aadFederatedTokenFile,omitempty" yaml:"aadFederatedTokenFile,omitempty"` // Use workload identity federation for the virtual machine to access Azure ARM APIs UseFederatedWorkloadIdentityExtension bool `json:"useFederatedWorkloadIdentityExtension,omitempty" yaml:"useFederatedWorkloadIdentityExtension,omitempty"` + // Auxiliary token provider for accessing resources from network tenant + // Require MSI to be enabled and have permission to access the KeyVault + AuxiliaryTokenProvider *AzureAuthAuxiliaryTokenProvider `json:"auxiliaryTokenProvider,omitempty" yaml:"auxiliaryTokenProvider,omitempty"` +} + +type AzureAuthAuxiliaryTokenProvider struct { + KeyVaultURL string `json:"keyVaultURL,omitempty" yaml:"keyVaultURL,omitempty"` + SecretName string `json:"secretName" yaml:"secretName"` } func (config *AzureAuthConfig) GetAADClientID() string { @@ -65,5 +73,5 @@ func (config *AzureAuthConfig) GetAzureFederatedTokenFile() (string, bool) { if clientCertPath := os.Getenv(utils.AzureFederatedTokenFile); clientCertPath != "" { return clientCertPath, true } - return config.AADClientCertPath, config.UseFederatedWorkloadIdentityExtension + return config.AADFederatedTokenFile, config.UseFederatedWorkloadIdentityExtension } diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/cloud.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/cloud.go index b88e8804957a..2e7d8832445d 100644 --- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/cloud.go +++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/cloud.go @@ -77,6 +77,14 @@ func AzureCloudConfigFromURL(endpoint string) (*cloud.Configuration, error) { } if len(metadata) > 0 { + // We use the endpoint to build our config, but on ASH the config returned + // does not contain the endpoint, and this is not accounted for. This + // ultimately unsets it for the returned config, causing the bootstrap of + // the provider to fail. Instead, check if the endpoint is returned, and if + // it is not then set it. + if len(metadata[0].ResourceManager) == 0 { + metadata[0].ResourceManager = endpoint + } return &cloud.Configuration{ ActiveDirectoryAuthorityHost: metadata[0].Authentication.LoginEndpoint, Services: map[cloud.ServiceName]cloud.ServiceConfiguration{ diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/diskclient/custom.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/diskclient/custom.go new file mode 100644 index 000000000000..e8d8af83409a --- /dev/null +++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/diskclient/custom.go @@ -0,0 +1,45 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package diskclient + +import ( + "context" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + armcompute "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" + + "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils" +) + +const PatchOperationName = "DisksClient.Patch" + +func (client *Client) Patch(ctx context.Context, resourceGroupName string, resourceName string, parameters armcompute.DiskUpdate) (result *armcompute.Disk, err error) { + ctx = utils.ContextWithClientName(ctx, "DisksClient") + ctx = utils.ContextWithRequestMethod(ctx, "Patch") + ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) + ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) + ctx, endSpan := runtime.StartSpan(ctx, CreateOrUpdateOperationName, client.tracer, nil) + defer endSpan(err) + resp, err := utils.NewPollerWrapper(client.DisksClient.BeginUpdate(ctx, resourceGroupName, resourceName, parameters, nil)).WaitforPollerResp(ctx) + if err != nil { + return nil, err + } + if resp != nil { + return &resp.Disk, nil + } + return nil, nil +} diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/diskclient/interface.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/diskclient/interface.go index af3d0a903283..9b9f85e10f64 100644 --- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/diskclient/interface.go +++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/diskclient/interface.go @@ -18,6 +18,8 @@ limitations under the License. package diskclient import ( + "context" + armcompute "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils" @@ -29,4 +31,5 @@ type Interface interface { utils.CreateOrUpdateFunc[armcompute.Disk] utils.DeleteFunc[armcompute.Disk] utils.ListFunc[armcompute.Disk] + Patch(ctx context.Context, resourceGroupName string, resourceName string, parameters armcompute.DiskUpdate) (result *armcompute.Disk, err error) } diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/factory.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/factory.go index d2b159e57948..f51b85dc8cf6 100644 --- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/factory.go +++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/factory.go @@ -79,6 +79,7 @@ type ClientFactory interface { GetSecretClient() secretclient.Interface GetSecurityGroupClient() securitygroupclient.Interface GetSnapshotClient() snapshotclient.Interface + GetSnapshotClientForSub(subscriptionID string) (snapshotclient.Interface, error) GetSSHPublicKeyResourceClient() sshpublickeyresourceclient.Interface GetSubnetClient() subnetclient.Interface GetVaultClient() vaultclient.Interface diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/factory_gen.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/factory_gen.go index c1aed81d6efd..1c2f36ee191b 100644 --- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/factory_gen.go +++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/factory_gen.go @@ -86,7 +86,7 @@ type ClientFactoryImpl struct { routetableclientInterface routetableclient.Interface secretclientInterface secretclient.Interface securitygroupclientInterface securitygroupclient.Interface - snapshotclientInterface snapshotclient.Interface + snapshotclientInterface sync.Map sshpublickeyresourceclientInterface sshpublickeyresourceclient.Interface subnetclientInterface subnetclient.Interface vaultclientInterface vaultclient.Interface @@ -247,7 +247,7 @@ func NewClientFactory(config *ClientFactoryConfig, armConfig *ARMClientConfig, c } //initialize snapshotclient - factory.snapshotclientInterface, err = factory.createSnapshotClient(config.SubscriptionID) + _, err = factory.GetSnapshotClientForSub(config.SubscriptionID) if err != nil { return nil, err } @@ -888,7 +888,24 @@ func (factory *ClientFactoryImpl) createSnapshotClient(subscription string) (sna } func (factory *ClientFactoryImpl) GetSnapshotClient() snapshotclient.Interface { - return factory.snapshotclientInterface + clientImp, _ := factory.snapshotclientInterface.Load(strings.ToLower(factory.facotryConfig.SubscriptionID)) + return clientImp.(snapshotclient.Interface) +} +func (factory *ClientFactoryImpl) GetSnapshotClientForSub(subscriptionID string) (snapshotclient.Interface, error) { + if subscriptionID == "" { + subscriptionID = factory.facotryConfig.SubscriptionID + } + clientImp, loaded := factory.snapshotclientInterface.Load(strings.ToLower(subscriptionID)) + if loaded { + return clientImp.(snapshotclient.Interface), nil + } + //It's not thread safe, but it's ok for now. because it will be called once. + clientImp, err := factory.createSnapshotClient(subscriptionID) + if err != nil { + return nil, err + } + factory.snapshotclientInterface.Store(strings.ToLower(subscriptionID), clientImp) + return clientImp.(snapshotclient.Interface), nil } func (factory *ClientFactoryImpl) createSSHPublicKeyResourceClient(subscription string) (sshpublickeyresourceclient.Interface, error) { diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/mock_azclient/interface.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/mock_azclient/interface.go index 763b5b338923..8e0445440fc6 100644 --- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/mock_azclient/interface.go +++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/mock_azclient/interface.go @@ -482,6 +482,21 @@ func (mr *MockClientFactoryMockRecorder) GetSnapshotClient() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSnapshotClient", reflect.TypeOf((*MockClientFactory)(nil).GetSnapshotClient)) } +// GetSnapshotClientForSub mocks base method. +func (m *MockClientFactory) GetSnapshotClientForSub(arg0 string) (snapshotclient.Interface, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSnapshotClientForSub", arg0) + ret0, _ := ret[0].(snapshotclient.Interface) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetSnapshotClientForSub indicates an expected call of GetSnapshotClientForSub. +func (mr *MockClientFactoryMockRecorder) GetSnapshotClientForSub(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSnapshotClientForSub", reflect.TypeOf((*MockClientFactory)(nil).GetSnapshotClientForSub), arg0) +} + // GetSubnetClient mocks base method. func (m *MockClientFactory) GetSubnetClient() subnetclient.Interface { m.ctrl.T.Helper() diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/snapshotclient/interface.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/snapshotclient/interface.go index 1cae1fb8074d..b6452e6c31f5 100644 --- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/snapshotclient/interface.go +++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/snapshotclient/interface.go @@ -23,7 +23,7 @@ import ( "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils" ) -// +azure:client:verbs=get;createorupdate;delete,resource=Snapshot,packageName=github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5,packageAlias=armcompute,clientName=SnapshotsClient,expand=false,rateLimitKey=snapshotRateLimit +// +azure:client:verbs=get;createorupdate;delete,resource=Snapshot,packageName=github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5,packageAlias=armcompute,clientName=SnapshotsClient,expand=false,rateLimitKey=snapshotRateLimit,crossSubFactory=true type Interface interface { utils.GetFunc[armcompute.Snapshot] utils.CreateOrUpdateFunc[armcompute.Snapshot] diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils/options.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils/options.go index 435a480380d4..8f7d9c21b0cb 100644 --- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils/options.go +++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils/options.go @@ -56,7 +56,8 @@ func GetDefaultAzCoreClientOption() policy.ClientOptions { Transport: &http.Client{ Transport: DefaultTransport, }, - TracingProvider: TracingProvider, - Cloud: cloud.AzurePublic, + TracingProvider: TracingProvider, + Cloud: cloud.AzurePublic, + InsecureAllowCredentialWithHTTP: true, } } diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils/transport.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils/transport.go index bc2523188018..67ddcedc98c9 100644 --- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils/transport.go +++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils/transport.go @@ -35,13 +35,16 @@ func init() { Timeout: 30 * time.Second, KeepAlive: 30 * time.Second, }).DialContext, - ForceAttemptHTTP2: true, - MaxIdleConns: 100, - MaxConnsPerHost: 100, - IdleConnTimeout: 90 * time.Second, - TLSHandshakeTimeout: 10 * time.Second, + ForceAttemptHTTP2: true, + MaxIdleConns: 100, + MaxConnsPerHost: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, // the same as default transport + ResponseHeaderTimeout: 60 * time.Second, TLSClientConfig: &tls.Config{ - MinVersion: tls.VersionTLS12, + MinVersion: tls.VersionTLS12, + Renegotiation: tls.RenegotiateNever, // the same as default transport https://pkg.go.dev/crypto/tls#RenegotiationSupport }, } }) diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachineclient/interface.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachineclient/interface.go index 611a5ef5bc4a..b8ae4eebee5a 100644 --- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachineclient/interface.go +++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachineclient/interface.go @@ -34,5 +34,6 @@ type Interface interface { utils.ListFunc[armcompute.VirtualMachine] InstanceView(ctx context.Context, resourceGroupName string, vmName string) (*armcompute.VirtualMachineInstanceView, error) ListVMInstanceView(ctx context.Context, resourceGroupName string) (result []*armcompute.VirtualMachine, rerr error) - BeginCreateOrUpdate(ctx context.Context, resourceGroupName string, vmName string, parameters armcompute.VirtualMachine, options *armcompute.VirtualMachinesClientBeginCreateOrUpdateOptions) (*runtime.Poller[armcompute.VirtualMachinesClientCreateOrUpdateResponse], error) + BeginAttachDetachDataDisks(ctx context.Context, resourceGroupName string, vmName string, parameters armcompute.AttachDetachDataDisksRequest, options *armcompute.VirtualMachinesClientBeginAttachDetachDataDisksOptions) (*runtime.Poller[armcompute.VirtualMachinesClientAttachDetachDataDisksResponse], error) + BeginUpdate(ctx context.Context, resourceGroupName string, vmName string, parameters armcompute.VirtualMachineUpdate, options *armcompute.VirtualMachinesClientBeginUpdateOptions) (*runtime.Poller[armcompute.VirtualMachinesClientUpdateResponse], error) } diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/armclient/util.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/armclient/util.go index 073ce78ef059..a899d1680e84 100644 --- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/armclient/util.go +++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/armclient/util.go @@ -121,7 +121,7 @@ func DoHackRegionalRetryForGET(c *Client) autorest.SendDecorator { bodyString := string(bodyBytes) trimmed := strings.TrimSpace(bodyString) - klog.V(6).Infof("Send.sendRequest got response with ContentLength %d, StatusCode %d and responseBody length %d", response.ContentLength, response.StatusCode, len(trimmed)) + klog.V(6).Infof("%s %s got response with ContentLength %d, StatusCode %d and responseBody length %d", request.Method, request.URL.Path, response.ContentLength, response.StatusCode, len(trimmed)) // Hack: retry the regional ARM endpoint in case of ARM traffic split and arm resource group replication is too slow // Empty content and 2xx http status code are returned in this case. diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssvmclient/azure_vmssvmclient.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssvmclient/azure_vmssvmclient.go index 4ad702caa019..6aba901b1f3a 100644 --- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssvmclient/azure_vmssvmclient.go +++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssvmclient/azure_vmssvmclient.go @@ -509,7 +509,48 @@ func (c *Client) updateVMSSVMs(ctx context.Context, resourceGroupName string, VM } responses := c.armClient.PutResourcesInBatches(ctx, resources, batchSize) - errors := make([]*retry.Error, 0) + errors, retryIDs := c.parseResp(ctx, responses, true) + if len(retryIDs) > 0 { + retryResources := make(map[string]interface{}) + for _, id := range retryIDs { + retryResources[id] = resources[id] + } + resps := c.armClient.PutResourcesInBatches(ctx, retryResources, batchSize) + errs, _ := c.parseResp(ctx, resps, false) + errors = append(errors, errs...) + } + + // Aggregate errors. + if len(errors) > 0 { + rerr := &retry.Error{} + errs := make([]error, 0) + for _, err := range errors { + if !err.Retriable && strings.Contains(err.Error().Error(), consts.ConcurrentRequestConflictMessage) { + err.Retriable = true + err.RetryAfter = time.Now().Add(5 * time.Second) + } + + if err.IsThrottled() && err.RetryAfter.After(rerr.RetryAfter) { + rerr.RetryAfter = err.RetryAfter + } + errs = append(errs, err.Error()) + } + rerr.RawError = utilerrors.Flatten(utilerrors.NewAggregate(errs)) + return rerr + } + + return nil +} + +func (c *Client) parseResp( + ctx context.Context, + responses map[string]*armclient.PutResourcesResponse, + shouldRetry bool, +) ([]*retry.Error, []string) { + var ( + errors []*retry.Error + retryIDs []string + ) for resourceID, resp := range responses { if resp == nil { continue @@ -534,6 +575,19 @@ func (c *Client) updateVMSSVMs(ctx context.Context, resourceGroupName string, VM continue } + if retry.IsSuccessHTTPResponse(resp.Response) && + strings.Contains( + strings.ToLower(errMsg), + strings.ToLower(consts.OperationPreemptedErrorMessage), + ) { + if shouldRetry { + klog.V(2).Infof("The operation on VM %s is preempted, will retry.", resourceID) + retryIDs = append(retryIDs, resourceID) + continue + } + klog.V(2).Infof("The operation on VM %s is preempted, will not retry.", resourceID) + } + errors = append(errors, resp.Error) continue } @@ -546,25 +600,5 @@ func (c *Client) updateVMSSVMs(ctx context.Context, resourceGroupName string, VM } } } - - // Aggregate errors. - if len(errors) > 0 { - rerr := &retry.Error{} - errs := make([]error, 0) - for _, err := range errors { - if !err.Retriable && strings.Contains(err.Error().Error(), consts.ConcurrentRequestConflictMessage) { - err.Retriable = true - err.RetryAfter = time.Now().Add(5 * time.Second) - } - - if err.IsThrottled() && err.RetryAfter.After(rerr.RetryAfter) { - rerr.RetryAfter = err.RetryAfter - } - errs = append(errs, err.Error()) - } - rerr.RawError = utilerrors.Flatten(utilerrors.NewAggregate(errs)) - return rerr - } - - return nil + return errors, retryIDs } diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/consts/consts.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/consts/consts.go index 3b24486586d2..312ebc361d06 100644 --- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/consts/consts.go +++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/consts/consts.go @@ -113,8 +113,8 @@ const ( // StrRawVersion is the raw version string StrRawVersion string = "raw" - // VirtualMachineScaleSetsDeallocating indicates VMSS instances are in Deallocating state. - VirtualMachineScaleSetsDeallocating = "Deallocating" + // ProvisionStateDeleting indicates VMSS instances are in Deleting state. + ProvisionStateDeleting = "Deleting" // VmssMachineIDTemplate is the vmss manchine ID template VmssMachineIDTemplate = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/virtualMachineScaleSets/%s/virtualMachines/%s" // VMSetCIDRIPV4TagKey specifies the node ipv4 CIDR mask of the instances on the VMSS or VMAS @@ -132,6 +132,8 @@ const ( ProvisioningStateDeleting = "Deleting" // ProvisioningStateSucceeded ... ProvisioningStateSucceeded = "Succeeded" + // ProvisioningStateUnknown is the unknown provisioning state + ProvisioningStateUnknown = "Unknown" ) // cache @@ -409,8 +411,8 @@ const ( CannotUpdateVMBeingDeletedMessagePrefix = "'Put on Virtual Machine Scale Set VM Instance' is not allowed on Virtual Machine Scale Set" // CannotUpdateVMBeingDeletedMessageSuffix is the suffix of the error message that the request failed due to delete a VM that is being deleted CannotUpdateVMBeingDeletedMessageSuffix = "since it is marked for deletion" - // OperationPreemptedErrorCode is the error code returned for vm operation preempted errors - OperationPreemptedErrorCode = "OperationPreempted" + // OperationPreemptedErrorMessage is the error message returned for vm operation preempted errors + OperationPreemptedErrorMessage = "Operation execution has been preempted by a more recent operation" ) // node ipam controller @@ -572,3 +574,13 @@ const ( ClusterServiceLoadBalancerHealthProbeDefaultPath = "/healthz" SharedProbeName = "cluster-service-shared-health-probe" ) + +// VM power state +const ( + VMPowerStatePrefix = "PowerState/" + VMPowerStateStopped = "stopped" + VMPowerStateStopping = "stopping" + VMPowerStateDeallocated = "deallocated" + VMPowerStateDeallocating = "deallocating" + VMPowerStateUnknown = "unknown" +) diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/metrics/azure_metrics.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/metrics/azure_metrics.go index 70acb3066948..b80c29fb7ef7 100644 --- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/metrics/azure_metrics.go +++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/metrics/azure_metrics.go @@ -60,6 +60,8 @@ type operationCallMetrics struct { type MetricContext struct { start time.Time attributes []string + // log level in ObserveOperationWithResult + LogLevel int32 } // NewMetricContext creates a new MetricContext. @@ -67,6 +69,7 @@ func NewMetricContext(prefix, request, resourceGroup, subscriptionID, source str return &MetricContext{ start: time.Now(), attributes: []string{prefix + "_" + request, strings.ToLower(resourceGroup), subscriptionID, source}, + LogLevel: 3, } } @@ -104,7 +107,7 @@ func (mc *MetricContext) ObserveOperationWithResult(isOperationSucceeded bool, l } mc.CountFailedOperation() } - mc.logLatency(3, latency, append(labelAndValues, "result_code", resultCode)...) + mc.logLatency(mc.LogLevel, latency, append(labelAndValues, "result_code", resultCode)...) } func (mc *MetricContext) logLatency(logLevel int32, latency float64, additionalKeysAndValues ...interface{}) { diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure.go index 28fc17a9e710..97b0caec7c90 100644 --- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure.go +++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure.go @@ -31,10 +31,10 @@ import ( "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/adal" "github.com/Azure/go-autorest/autorest/azure" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/informers" clientset "k8s.io/client-go/kubernetes" @@ -48,7 +48,6 @@ import ( cloudnodeutil "k8s.io/cloud-provider/node/helpers" nodeutil "k8s.io/component-helpers/node/util" "k8s.io/klog/v2" - "sigs.k8s.io/yaml" "sigs.k8s.io/cloud-provider-azure/pkg/azclient" "sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader" @@ -76,10 +75,14 @@ import ( "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssclient" "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssvmclient" "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/zoneclient" + + "sigs.k8s.io/yaml" + azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache" "sigs.k8s.io/cloud-provider-azure/pkg/consts" ratelimitconfig "sigs.k8s.io/cloud-provider-azure/pkg/provider/config" "sigs.k8s.io/cloud-provider-azure/pkg/retry" + utilsets "sigs.k8s.io/cloud-provider-azure/pkg/util/sets" "sigs.k8s.io/cloud-provider-azure/pkg/util/taints" ) @@ -331,11 +334,11 @@ type MultipleStandardLoadBalancerConfigurationSpec struct { // MultipleStandardLoadBalancerConfigurationStatus stores the properties regarding multiple standard load balancers. type MultipleStandardLoadBalancerConfigurationStatus struct { // ActiveServices stores the services that are supposed to use the load balancer. - ActiveServices sets.Set[string] `json:"activeServices" yaml:"activeServices"` + ActiveServices *utilsets.IgnoreCaseSet `json:"activeServices" yaml:"activeServices"` // ActiveNodes stores the nodes that are supposed to be in the load balancer. // It will be used in EnsureHostsInPool to make sure the given ones are in the backend pool. - ActiveNodes sets.Set[string] `json:"activeNodes" yaml:"activeNodes"` + ActiveNodes *utilsets.IgnoreCaseSet `json:"activeNodes" yaml:"activeNodes"` } // HasExtendedLocation returns true if extendedlocation prop are specified. @@ -392,17 +395,17 @@ type Cloud struct { // Lock for access to node caches, includes nodeZones, nodeResourceGroups, and unmanagedNodes. nodeCachesLock sync.RWMutex // nodeNames holds current nodes for tracking added nodes in VM caches. - nodeNames sets.Set[string] + nodeNames *utilsets.IgnoreCaseSet // nodeZones is a mapping from Zone to a sets.Set[string] of Node's names in the Zone // it is updated by the nodeInformer - nodeZones map[string]sets.Set[string] + nodeZones map[string]*utilsets.IgnoreCaseSet // nodeResourceGroups holds nodes external resource groups nodeResourceGroups map[string]string // unmanagedNodes holds a list of nodes not managed by Azure cloud provider. - unmanagedNodes sets.Set[string] + unmanagedNodes *utilsets.IgnoreCaseSet // excludeLoadBalancerNodes holds a list of nodes that should be excluded from LoadBalancer. - excludeLoadBalancerNodes sets.Set[string] - nodePrivateIPs map[string]sets.Set[string] + excludeLoadBalancerNodes *utilsets.IgnoreCaseSet + nodePrivateIPs map[string]*utilsets.IgnoreCaseSet nodePrivateIPToNodeNameMap map[string]string // nodeInformerSynced is for determining if the informer has synced. nodeInformerSynced cache.InformerSynced @@ -455,13 +458,13 @@ type Cloud struct { // NewCloud returns a Cloud with initialized clients func NewCloud(ctx context.Context, config *Config, callFromCCM bool) (cloudprovider.Interface, error) { az := &Cloud{ - nodeNames: sets.New[string](), - nodeZones: map[string]sets.Set[string]{}, + nodeNames: utilsets.NewString(), + nodeZones: map[string]*utilsets.IgnoreCaseSet{}, nodeResourceGroups: map[string]string{}, - unmanagedNodes: sets.New[string](), + unmanagedNodes: utilsets.NewString(), routeCIDRs: map[string]string{}, - excludeLoadBalancerNodes: sets.New[string](), - nodePrivateIPs: map[string]sets.Set[string]{}, + excludeLoadBalancerNodes: utilsets.NewString(), + nodePrivateIPs: map[string]*utilsets.IgnoreCaseSet{}, nodePrivateIPToNodeNameMap: map[string]string{}, } @@ -565,7 +568,7 @@ func (az *Cloud) InitializeCloudFromConfig(ctx context.Context, config *Config, // The default cloud config type is cloudConfigTypeMerge. config.CloudConfigType = configloader.CloudConfigTypeMerge } else { - supportedCloudConfigTypes := sets.New( + supportedCloudConfigTypes := utilsets.NewString( string(configloader.CloudConfigTypeMerge), string(configloader.CloudConfigTypeFile), string(configloader.CloudConfigTypeSecret)) @@ -579,7 +582,7 @@ func (az *Cloud) InitializeCloudFromConfig(ctx context.Context, config *Config, strings.EqualFold(config.LoadBalancerBackendPoolConfigurationType, consts.LoadBalancerBackendPoolConfigurationTypePODIP) { config.LoadBalancerBackendPoolConfigurationType = consts.LoadBalancerBackendPoolConfigurationTypeNodeIPConfiguration } else { - supportedLoadBalancerBackendPoolConfigurationTypes := sets.New( + supportedLoadBalancerBackendPoolConfigurationTypes := utilsets.NewString( strings.ToLower(consts.LoadBalancerBackendPoolConfigurationTypeNodeIPConfiguration), strings.ToLower(consts.LoadBalancerBackendPoolConfigurationTypeNodeIP), strings.ToLower(consts.LoadBalancerBackendPoolConfigurationTypePODIP)) @@ -591,7 +594,7 @@ func (az *Cloud) InitializeCloudFromConfig(ctx context.Context, config *Config, if config.ClusterServiceLoadBalancerHealthProbeMode == "" { config.ClusterServiceLoadBalancerHealthProbeMode = consts.ClusterServiceLoadBalancerHealthProbeModeServiceNodePort } else { - supportedClusterServiceLoadBalancerHealthProbeModes := sets.New( + supportedClusterServiceLoadBalancerHealthProbeModes := utilsets.NewString( strings.ToLower(consts.ClusterServiceLoadBalancerHealthProbeModeServiceNodePort), strings.ToLower(consts.ClusterServiceLoadBalancerHealthProbeModeShared), ) @@ -782,8 +785,8 @@ func (az *Cloud) checkEnableMultipleStandardLoadBalancers() error { return fmt.Errorf("multiple standard load balancers cannot be used with backend pool type %s", consts.LoadBalancerBackendPoolConfigurationTypeNodeIPConfiguration) } - names := sets.New[string]() - primaryVMSets := sets.New[string]() + names := utilsets.NewString() + primaryVMSets := utilsets.NewString() for _, multiSLBConfig := range az.MultipleStandardLoadBalancerConfigurations { if names.Has(multiSLBConfig.Name) { return fmt.Errorf("duplicated multiple standard load balancer configuration name %s", multiSLBConfig.Name) @@ -1257,15 +1260,12 @@ func (az *Cloud) updateNodeCaches(prevNode, newNode *v1.Node) { if newNode != nil { // Add to nodeNames cache. - az.nodeNames.Insert(newNode.ObjectMeta.Name) + az.nodeNames = utilsets.SafeInsert(az.nodeNames, newNode.ObjectMeta.Name) // Add to nodeZones cache. newZone, ok := newNode.ObjectMeta.Labels[v1.LabelTopologyZone] if ok && az.isAvailabilityZone(newZone) { - if az.nodeZones[newZone] == nil { - az.nodeZones[newZone] = sets.New[string]() - } - az.nodeZones[newZone].Insert(newNode.ObjectMeta.Name) + az.nodeZones[newZone] = utilsets.SafeInsert(az.nodeZones[newZone], newNode.ObjectMeta.Name) } // Add to nodeResourceGroups cache. @@ -1301,15 +1301,12 @@ func (az *Cloud) updateNodeCaches(prevNode, newNode *v1.Node) { // Add to nodePrivateIPs cache for _, address := range getNodePrivateIPAddresses(newNode) { - if az.nodePrivateIPs[strings.ToLower(newNode.Name)] == nil { - az.nodePrivateIPs[strings.ToLower(newNode.Name)] = sets.New[string]() - } if az.nodePrivateIPToNodeNameMap == nil { az.nodePrivateIPToNodeNameMap = make(map[string]string) } klog.V(6).Infof("adding IP address %s of the node %s", address, newNode.Name) - az.nodePrivateIPs[strings.ToLower(newNode.Name)].Insert(address) + az.nodePrivateIPs[strings.ToLower(newNode.Name)] = utilsets.SafeInsert(az.nodePrivateIPs[strings.ToLower(newNode.Name)], address) az.nodePrivateIPToNodeNameMap[address] = newNode.Name } } @@ -1345,7 +1342,7 @@ func (az *Cloud) updateNodeTaint(node *v1.Node) { } // GetActiveZones returns all the zones in which k8s nodes are currently running. -func (az *Cloud) GetActiveZones() (sets.Set[string], error) { +func (az *Cloud) GetActiveZones() (*utilsets.IgnoreCaseSet, error) { if az.nodeInformerSynced == nil { return nil, fmt.Errorf("azure cloud provider doesn't have informers set") } @@ -1356,9 +1353,9 @@ func (az *Cloud) GetActiveZones() (sets.Set[string], error) { return nil, fmt.Errorf("node informer is not synced when trying to GetActiveZones") } - zones := sets.New[string]() + zones := utilsets.NewString() for zone, nodes := range az.nodeZones { - if len(nodes) > 0 { + if nodes.Len() > 0 { zones.Insert(zone) } } @@ -1393,7 +1390,7 @@ func (az *Cloud) GetNodeResourceGroup(nodeName string) (string, error) { } // GetNodeNames returns a set of all node names in the k8s cluster. -func (az *Cloud) GetNodeNames() (sets.Set[string], error) { +func (az *Cloud) GetNodeNames() (*utilsets.IgnoreCaseSet, error) { // Kubelet won't set az.nodeInformerSynced, return nil. if az.nodeInformerSynced == nil { return nil, nil @@ -1405,14 +1402,14 @@ func (az *Cloud) GetNodeNames() (sets.Set[string], error) { return nil, fmt.Errorf("node informer is not synced when trying to GetNodeNames") } - return sets.New(az.nodeNames.UnsortedList()...), nil + return utilsets.NewString(az.nodeNames.UnsortedList()...), nil } // GetResourceGroups returns a set of resource groups that all nodes are running on. -func (az *Cloud) GetResourceGroups() (sets.Set[string], error) { +func (az *Cloud) GetResourceGroups() (*utilsets.IgnoreCaseSet, error) { // Kubelet won't set az.nodeInformerSynced, always return configured resourceGroup. if az.nodeInformerSynced == nil { - return sets.New(az.ResourceGroup), nil + return utilsets.NewString(az.ResourceGroup), nil } az.nodeCachesLock.RLock() @@ -1421,7 +1418,7 @@ func (az *Cloud) GetResourceGroups() (sets.Set[string], error) { return nil, fmt.Errorf("node informer is not synced when trying to GetResourceGroups") } - resourceGroups := sets.New(az.ResourceGroup) + resourceGroups := utilsets.NewString(az.ResourceGroup) for _, rg := range az.nodeResourceGroups { resourceGroups.Insert(rg) } @@ -1430,7 +1427,7 @@ func (az *Cloud) GetResourceGroups() (sets.Set[string], error) { } // GetUnmanagedNodes returns a list of nodes not managed by Azure cloud provider (e.g. on-prem nodes). -func (az *Cloud) GetUnmanagedNodes() (sets.Set[string], error) { +func (az *Cloud) GetUnmanagedNodes() (*utilsets.IgnoreCaseSet, error) { // Kubelet won't set az.nodeInformerSynced, always return nil. if az.nodeInformerSynced == nil { return nil, nil @@ -1442,7 +1439,7 @@ func (az *Cloud) GetUnmanagedNodes() (sets.Set[string], error) { return nil, fmt.Errorf("node informer is not synced when trying to GetUnmanagedNodes") } - return sets.New(az.unmanagedNodes.UnsortedList()...), nil + return utilsets.NewString(az.unmanagedNodes.UnsortedList()...), nil } // ShouldNodeExcludedFromLoadBalancer returns true if node is unmanaged, in external resource group or labeled with "node.kubernetes.io/exclude-from-external-load-balancers". @@ -1466,7 +1463,7 @@ func (az *Cloud) ShouldNodeExcludedFromLoadBalancer(nodeName string) (bool, erro return az.excludeLoadBalancerNodes.Has(nodeName), nil } -func (az *Cloud) getActiveNodesByLoadBalancerName(lbName string) sets.Set[string] { +func (az *Cloud) getActiveNodesByLoadBalancerName(lbName string) *utilsets.IgnoreCaseSet { az.multipleStandardLoadBalancersActiveNodesLock.Lock() defer az.multipleStandardLoadBalancersActiveNodesLock.Unlock() @@ -1476,7 +1473,7 @@ func (az *Cloud) getActiveNodesByLoadBalancerName(lbName string) sets.Set[string } } - return sets.New[string]() + return utilsets.NewString() } func isNodeReady(node *v1.Node) bool { diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_standard.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_standard.go index 48b0989d9566..a1eb4e9853d4 100644 --- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_standard.go +++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_standard.go @@ -153,7 +153,7 @@ func (as *availabilitySet) WaitForUpdateResult(ctx context.Context, future *azur } // DetachDisk detaches a disk from VM -func (as *availabilitySet) DetachDisk(ctx context.Context, nodeName types.NodeName, diskMap map[string]string) error { +func (as *availabilitySet) DetachDisk(ctx context.Context, nodeName types.NodeName, diskMap map[string]string, forceDetach bool) error { vm, err := as.getVirtualMachine(nodeName, azcache.CacheReadTypeDefault) if err != nil { // if host doesn't exist, no need to detach @@ -179,6 +179,9 @@ func (as *availabilitySet) DetachDisk(ctx context.Context, nodeName types.NodeNa // found the disk klog.V(2).Infof("azureDisk - detach disk: name %s uri %s", diskName, diskURI) disks[i].ToBeDetached = pointer.Bool(true) + if forceDetach { + disks[i].DetachOption = compute.ForceDetach + } bFoundDisk = true } } diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_vmss.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_vmss.go index 064961a4738a..28669c0db9b0 100644 --- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_vmss.go +++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_vmss.go @@ -163,7 +163,7 @@ func (ss *ScaleSet) WaitForUpdateResult(ctx context.Context, future *azure.Futur } // DetachDisk detaches a disk from VM -func (ss *ScaleSet) DetachDisk(ctx context.Context, nodeName types.NodeName, diskMap map[string]string) error { +func (ss *ScaleSet) DetachDisk(ctx context.Context, nodeName types.NodeName, diskMap map[string]string, forceDetach bool) error { vmName := mapNodeNameToVMName(nodeName) vm, err := ss.getVmssVM(vmName, azcache.CacheReadTypeDefault) if err != nil { @@ -193,6 +193,9 @@ func (ss *ScaleSet) DetachDisk(ctx context.Context, nodeName types.NodeName, dis // found the disk klog.V(2).Infof("azureDisk - detach disk: name %s uri %s", diskName, diskURI) disks[i].ToBeDetached = pointer.Bool(true) + if forceDetach { + disks[i].DetachOption = compute.ForceDetach + } bFoundDisk = true } } diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_vmssflex.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_vmssflex.go index 635292887521..97ba16690a3c 100644 --- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_vmssflex.go +++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_vmssflex.go @@ -122,7 +122,7 @@ func (fs *FlexScaleSet) AttachDisk(ctx context.Context, nodeName types.NodeName, } // DetachDisk detaches a disk from VM -func (fs *FlexScaleSet) DetachDisk(ctx context.Context, nodeName types.NodeName, diskMap map[string]string) error { +func (fs *FlexScaleSet) DetachDisk(ctx context.Context, nodeName types.NodeName, diskMap map[string]string, forceDetach bool) error { vmName := mapNodeNameToVMName(nodeName) vm, err := fs.getVmssFlexVM(vmName, azcache.CacheReadTypeDefault) if err != nil { @@ -148,6 +148,9 @@ func (fs *FlexScaleSet) DetachDisk(ctx context.Context, nodeName types.NodeName, // found the disk klog.V(2).Infof("azureDisk - detach disk: name %s uri %s", diskName, diskURI) disks[i].ToBeDetached = pointer.Bool(true) + if forceDetach { + disks[i].DetachOption = compute.ForceDetach + } bFoundDisk = true } } diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_fakes.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_fakes.go index 451609d3ed16..60d6ab70f20f 100644 --- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_fakes.go +++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_fakes.go @@ -25,7 +25,6 @@ import ( "go.uber.org/mock/gomock" - "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/tools/record" "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/diskclient/mockdiskclient" @@ -42,6 +41,7 @@ import ( "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssclient/mockvmssclient" "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssvmclient/mockvmssvmclient" "sigs.k8s.io/cloud-provider-azure/pkg/consts" + utilsets "sigs.k8s.io/cloud-provider-azure/pkg/util/sets" ) // NewTestScaleSet creates a fake ScaleSet for unit test @@ -96,12 +96,12 @@ func GetTestCloud(ctrl *gomock.Controller) (az *Cloud) { VMType: consts.VMTypeStandard, LoadBalancerBackendPoolConfigurationType: consts.LoadBalancerBackendPoolConfigurationTypeNodeIPConfiguration, }, - nodeZones: map[string]sets.Set[string]{}, + nodeZones: map[string]*utilsets.IgnoreCaseSet{}, nodeInformerSynced: func() bool { return true }, nodeResourceGroups: map[string]string{}, - unmanagedNodes: sets.New[string](), - excludeLoadBalancerNodes: sets.New[string](), - nodePrivateIPs: map[string]sets.Set[string]{}, + unmanagedNodes: utilsets.NewString(), + excludeLoadBalancerNodes: utilsets.NewString(), + nodePrivateIPs: map[string]*utilsets.IgnoreCaseSet{}, routeCIDRs: map[string]string{}, eventRecorder: &record.FakeRecorder{}, lockMap: newLockMap(), diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_instances_v1.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_instances_v1.go index e8a4a76f4bb3..2e5c9ebbfebb 100644 --- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_instances_v1.go +++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_instances_v1.go @@ -35,12 +35,6 @@ import ( var _ cloudprovider.Instances = (*Cloud)(nil) const ( - vmPowerStatePrefix = "PowerState/" - vmPowerStateStopped = "stopped" - vmPowerStateDeallocated = "deallocated" - vmPowerStateDeallocating = "deallocating" - vmPowerStateUnknown = "unknown" - // nodeNameEnvironmentName is the environment variable name for getting node name. // It is only used for out-of-tree cloud provider. nodeNameEnvironmentName = "NODE_NAME" @@ -266,7 +260,7 @@ func (az *Cloud) InstanceShutdownByProviderID(_ context.Context, providerID stri status := strings.ToLower(powerStatus) provisioningSucceeded := strings.EqualFold(strings.ToLower(provisioningState), strings.ToLower(string(consts.ProvisioningStateSucceeded))) - return provisioningSucceeded && (status == vmPowerStateStopped || status == vmPowerStateDeallocated || status == vmPowerStateDeallocating), nil + return provisioningSucceeded && (status == consts.VMPowerStateStopped || status == consts.VMPowerStateDeallocated || status == consts.VMPowerStateDeallocating), nil } func (az *Cloud) isCurrentInstance(name types.NodeName, metadataVMName string) (bool, error) { diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer.go index 1649ee008ddf..5d25ce124ea5 100644 --- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer.go +++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer.go @@ -36,7 +36,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" utilerrors "k8s.io/apimachinery/pkg/util/errors" - "k8s.io/apimachinery/pkg/util/sets" cloudprovider "k8s.io/cloud-provider" servicehelpers "k8s.io/cloud-provider/service/helpers" "k8s.io/klog/v2" @@ -50,6 +49,7 @@ import ( "sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer" "sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/iputil" "sigs.k8s.io/cloud-provider-azure/pkg/retry" + utilsets "sigs.k8s.io/cloud-provider-azure/pkg/util/sets" ) var _ cloudprovider.LoadBalancer = (*Cloud)(nil) @@ -596,12 +596,10 @@ func (az *Cloud) safeDeleteLoadBalancer(lb network.LoadBalancer, clusterName, vm trimSuffixIgnoreCase(pointer.StringDeref(lb.Name, ""), consts.InternalLoadBalancerNameSuffix), az.MultipleStandardLoadBalancerConfigurations[i].Name, ) { - if az.MultipleStandardLoadBalancerConfigurations[i].ActiveNodes != nil { - for nodeName := range az.MultipleStandardLoadBalancerConfigurations[i].ActiveNodes { - az.nodesWithCorrectLoadBalancerByPrimaryVMSet.Delete(strings.ToLower(nodeName)) - } + for _, nodeName := range az.MultipleStandardLoadBalancerConfigurations[i].ActiveNodes.UnsortedList() { + az.nodesWithCorrectLoadBalancerByPrimaryVMSet.Delete(nodeName) } - az.MultipleStandardLoadBalancerConfigurations[i].ActiveNodes = sets.New[string]() + az.MultipleStandardLoadBalancerConfigurations[i].ActiveNodes = utilsets.NewString() break } } @@ -1599,10 +1597,7 @@ func (az *Cloud) reconcileMultipleStandardLoadBalancerConfigurations( for i := range az.MultipleStandardLoadBalancerConfigurations { if strings.EqualFold(trimSuffixIgnoreCase(lbName, consts.InternalLoadBalancerNameSuffix), az.MultipleStandardLoadBalancerConfigurations[i].Name) { az.multipleStandardLoadBalancersActiveServicesLock.Lock() - if az.MultipleStandardLoadBalancerConfigurations[i].ActiveServices == nil { - az.MultipleStandardLoadBalancerConfigurations[i].ActiveServices = sets.New[string]() - } - az.MultipleStandardLoadBalancerConfigurations[i].ActiveServices.Insert(strings.ToLower(svcName)) + az.MultipleStandardLoadBalancerConfigurations[i].ActiveServices = utilsets.SafeInsert(az.MultipleStandardLoadBalancerConfigurations[i].ActiveServices, svcName) az.multipleStandardLoadBalancersActiveServicesLock.Unlock() klog.V(2).Infof("reconcileMultipleStandardLoadBalancerConfigurations: service(%s) is active on lb(%s)", svcName, lbName) } @@ -1918,9 +1913,9 @@ func (az *Cloud) removeNodeFromLBConfig(nodeNameToLBConfigIDXMap map[string]int, // removeDeletedNodesFromLoadBalancerConfigurations removes the deleted nodes // that do not exist in nodes list from the load balancer configurations func (az *Cloud) removeDeletedNodesFromLoadBalancerConfigurations(nodes []*v1.Node) map[string]int { - nodeNamesSet := sets.New[string]() + nodeNamesSet := utilsets.NewString() for _, node := range nodes { - nodeNamesSet.Insert(strings.ToLower(node.Name)) + nodeNamesSet.Insert(node.Name) } az.multipleStandardLoadBalancersActiveNodesLock.Lock() @@ -1930,12 +1925,12 @@ func (az *Cloud) removeDeletedNodesFromLoadBalancerConfigurations(nodes []*v1.No nodeNameToLBConfigIDXMap := make(map[string]int) for i, multiSLBConfig := range az.MultipleStandardLoadBalancerConfigurations { if multiSLBConfig.ActiveNodes != nil { - for nodeName := range multiSLBConfig.ActiveNodes { + for _, nodeName := range multiSLBConfig.ActiveNodes.UnsortedList() { if nodeNamesSet.Has(nodeName) { nodeNameToLBConfigIDXMap[nodeName] = i } else { klog.V(4).Infof("reconcileMultipleStandardLoadBalancerBackendNodes: node(%s) is gone, remove it from lb(%s)", nodeName, multiSLBConfig.Name) - az.MultipleStandardLoadBalancerConfigurations[i].ActiveNodes, _ = safeRemoveKeyFromStringsSet(az.MultipleStandardLoadBalancerConfigurations[i].ActiveNodes, strings.ToLower(nodeName)) + az.MultipleStandardLoadBalancerConfigurations[i].ActiveNodes.Delete(nodeName) } } } @@ -1971,14 +1966,14 @@ func (az *Cloud) accommodateNodesByPrimaryVMSet( continue } - az.nodesWithCorrectLoadBalancerByPrimaryVMSet.Store(strings.ToLower(node.Name), sets.Empty{}) + az.nodesWithCorrectLoadBalancerByPrimaryVMSet.Store(strings.ToLower(node.Name), struct{}{}) if !multiSLBConfig.ActiveNodes.Has(node.Name) { klog.V(4).Infof("accommodateNodesByPrimaryVMSet: node(%s) should be on lb(%s) because of primary vmSet (%s)", node.Name, multiSLBConfig.Name, vmSetName) az.removeNodeFromLBConfig(nodeNameToLBConfigIDXMap, node.Name) az.multipleStandardLoadBalancersActiveNodesLock.Lock() - az.MultipleStandardLoadBalancerConfigurations[i].ActiveNodes = safeAddKeyToStringsSet(az.MultipleStandardLoadBalancerConfigurations[i].ActiveNodes, strings.ToLower(node.Name)) + az.MultipleStandardLoadBalancerConfigurations[i].ActiveNodes = utilsets.SafeInsert(az.MultipleStandardLoadBalancerConfigurations[i].ActiveNodes, node.Name) az.multipleStandardLoadBalancersActiveNodesLock.Unlock() } break @@ -2077,7 +2072,7 @@ func (az *Cloud) accommodateNodesByNodeSelector( klog.V(4).Infof("accommodateNodesByNodeSelector: node(%s) should be on lb(%s) it is the eligible LB with fewest number of nodes", node.Name, az.MultipleStandardLoadBalancerConfigurations[minNodesIDX].Name) az.multipleStandardLoadBalancersActiveNodesLock.Lock() - az.MultipleStandardLoadBalancerConfigurations[minNodesIDX].ActiveNodes = safeAddKeyToStringsSet(az.MultipleStandardLoadBalancerConfigurations[minNodesIDX].ActiveNodes, strings.ToLower(node.Name)) + az.MultipleStandardLoadBalancerConfigurations[minNodesIDX].ActiveNodes = utilsets.SafeInsert(az.MultipleStandardLoadBalancerConfigurations[minNodesIDX].ActiveNodes, node.Name) az.multipleStandardLoadBalancersActiveNodesLock.Unlock() } @@ -2139,16 +2134,13 @@ func (az *Cloud) reconcileMultipleStandardLoadBalancerConfigurationStatus(wantLb for i := range az.MultipleStandardLoadBalancerConfigurations { if strings.EqualFold(lbName, az.MultipleStandardLoadBalancerConfigurations[i].Name) { az.multipleStandardLoadBalancersActiveServicesLock.Lock() - if az.MultipleStandardLoadBalancerConfigurations[i].ActiveServices == nil { - az.MultipleStandardLoadBalancerConfigurations[i].ActiveServices = sets.New[string]() - } if wantLb { klog.V(4).Infof("reconcileMultipleStandardLoadBalancerConfigurationStatus: service(%s) is active on lb(%s)", svcName, lbName) - az.MultipleStandardLoadBalancerConfigurations[i].ActiveServices.Insert(strings.ToLower(svcName)) + az.MultipleStandardLoadBalancerConfigurations[i].ActiveServices = utilsets.SafeInsert(az.MultipleStandardLoadBalancerConfigurations[i].ActiveServices, svcName) } else { klog.V(4).Infof("reconcileMultipleStandardLoadBalancerConfigurationStatus: service(%s) is not active on lb(%s) any more", svcName, lbName) - az.MultipleStandardLoadBalancerConfigurations[i].ActiveServices, _ = safeRemoveKeyFromStringsSet(az.MultipleStandardLoadBalancerConfigurations[i].ActiveServices, strings.ToLower(svcName)) + az.MultipleStandardLoadBalancerConfigurations[i].ActiveServices.Delete(svcName) } az.multipleStandardLoadBalancersActiveServicesLock.Unlock() break @@ -2864,6 +2856,13 @@ func (az *Cloud) reconcileSecurityGroup( consts.ServiceAnnotationAllowedIPRanges, consts.ServiceAnnotationAllowedServiceTags, )) } + + if len(accessControl.InvalidRanges) > 0 { + az.Event(service, v1.EventTypeWarning, "InvalidConfiguration", fmt.Sprintf( + "Found invalid LoadBalancerSourceRanges %v, ignoring and adding a default DenyAll rule in security group.", + accessControl.InvalidRanges, + )) + } } var ( @@ -3298,7 +3297,7 @@ func (az *Cloud) safeDeletePublicIP(service *v1.Service, pipResourceGroup string // Check whether there are still load balancer rules referring to it. if len(referencedLBRules) > 0 { - referencedLBRuleIDs := sets.New[string]() + referencedLBRuleIDs := utilsets.NewString() for _, refer := range referencedLBRules { referencedLBRuleIDs.Insert(pointer.StringDeref(refer.ID, "")) } @@ -3864,9 +3863,9 @@ func (az *Cloud) getEligibleLoadBalancersForService(service *v1.Service) ([]stri // If there is no annotation given, it selects all LBs. lbsFromAnnotation := consts.GetLoadBalancerConfigurationsNames(service) if len(lbsFromAnnotation) > 0 { - lbNamesSet := sets.New[string](lbsFromAnnotation...) + lbNamesSet := utilsets.NewString(lbsFromAnnotation...) for _, multiSLBConfig := range az.MultipleStandardLoadBalancerConfigurations { - if lbNamesSet.Has(strings.ToLower(multiSLBConfig.Name)) { + if lbNamesSet.Has(multiSLBConfig.Name) { logger.V(4).Info("selects the load balancer by annotation", "load balancer configuration name", multiSLBConfig.Name) eligibleLBs = append(eligibleLBs, multiSLBConfig) @@ -4002,10 +4001,7 @@ func (az *Cloud) isLoadBalancerInUseByService(service *v1.Service, lbConfig Mult defer az.multipleStandardLoadBalancersActiveServicesLock.Unlock() serviceName := getServiceName(service) - if lbConfig.ActiveServices != nil { - return lbConfig.ActiveServices.Has(serviceName) - } - return false + return lbConfig.ActiveServices.Has(serviceName) } // There are two cases when a service owns the frontend IP config: diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer_backendpool.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer_backendpool.go index 67f213c320b1..b33ee2e71c1a 100644 --- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer_backendpool.go +++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer_backendpool.go @@ -26,7 +26,6 @@ import ( "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2022-07-01/network" v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/util/sets" cloudprovider "k8s.io/cloud-provider" "k8s.io/klog/v2" utilnet "k8s.io/utils/net" @@ -35,6 +34,7 @@ import ( "sigs.k8s.io/cloud-provider-azure/pkg/cache" "sigs.k8s.io/cloud-provider-azure/pkg/consts" "sigs.k8s.io/cloud-provider-azure/pkg/metrics" + utilsets "sigs.k8s.io/cloud-provider-azure/pkg/util/sets" ) type BackendPool interface { @@ -304,8 +304,8 @@ func getBackendIPConfigurationsToBeDeleted( return []network.InterfaceIPConfiguration{} } - bipConfigNotFoundIDSet := sets.New[string]() - bipConfigExcludeIDSet := sets.New[string]() + bipConfigNotFoundIDSet := utilsets.NewString() + bipConfigExcludeIDSet := utilsets.NewString() for _, ipConfig := range bipConfigNotFound { bipConfigNotFoundIDSet.Insert(pointer.StringDeref(ipConfig.ID, "")) } @@ -344,7 +344,7 @@ func (bc *backendPoolTypeNodeIPConfig) GetBackendPrivateIPs(clusterName string, return nil, nil } - backendPrivateIPv4s, backendPrivateIPv6s := sets.New[string](), sets.New[string]() + backendPrivateIPv4s, backendPrivateIPv6s := utilsets.NewString(), utilsets.NewString() for _, bp := range *lb.BackendAddressPools { found, _ := isLBBackendPoolsExisting(lbBackendPoolNames, bp.Name) if found { @@ -399,7 +399,7 @@ func (bi *backendPoolTypeNodeIP) EnsureHostsInPool(service *v1.Service, nodes [] var ( changed bool numOfAdd, numOfDelete int - activeNodes sets.Set[string] + activeNodes *utilsets.IgnoreCaseSet err error ) if bi.useMultipleStandardLoadBalancers() { @@ -420,9 +420,6 @@ func (bi *backendPoolTypeNodeIP) EnsureHostsInPool(service *v1.Service, nodes [] return err } } - if activeNodes == nil { - activeNodes = sets.New[string]() - } } lbBackendPoolName := bi.getBackendPoolNameForService(service, clusterName, isIPv6) @@ -437,7 +434,7 @@ func (bi *backendPoolTypeNodeIP) EnsureHostsInPool(service *v1.Service, nodes [] backendPool.LoadBalancerBackendAddresses = &lbBackendPoolAddresses } - existingIPs := sets.New[string]() + existingIPs := utilsets.NewString() for _, loadBalancerBackendAddress := range *backendPool.LoadBalancerBackendAddresses { if loadBalancerBackendAddress.LoadBalancerBackendAddressPropertiesFormat != nil && loadBalancerBackendAddress.IPAddress != nil { @@ -447,7 +444,7 @@ func (bi *backendPoolTypeNodeIP) EnsureHostsInPool(service *v1.Service, nodes [] } var nodeIPsToBeAdded []string - nodePrivateIPsSet := sets.New[string]() + nodePrivateIPsSet := utilsets.NewString() for _, node := range nodes { if isControlPlaneNode(node) { klog.V(4).Infof("bi.EnsureHostsInPool: skipping control plane node %s", node.Name) @@ -458,7 +455,7 @@ func (bi *backendPoolTypeNodeIP) EnsureHostsInPool(service *v1.Service, nodes [] nodePrivateIPsSet.Insert(privateIP) if bi.useMultipleStandardLoadBalancers() { - if activeNodes == nil || !activeNodes.Has(node.Name) { + if !activeNodes.Has(node.Name) { klog.V(4).Infof("bi.EnsureHostsInPool: node %s should not be in load balancer %q", node.Name, lbName) continue } @@ -519,7 +516,7 @@ func (bi *backendPoolTypeNodeIP) CleanupVMSetFromBackendPoolByCondition(slb *net found, isIPv6 := isLBBackendPoolsExisting(lbBackendPoolNames, bp.Name) if found { klog.V(2).Infof("bi.CleanupVMSetFromBackendPoolByCondition: checking the backend pool %s from standard load balancer %s", pointer.StringDeref(bp.Name, ""), pointer.StringDeref(slb.Name, "")) - vmIPsToBeDeleted := sets.New[string]() + vmIPsToBeDeleted := utilsets.NewString() for _, node := range nodes { vmSetName, err := bi.VMSet.GetNodeVMSetName(node) if err != nil { @@ -662,8 +659,8 @@ func (bi *backendPoolTypeNodeIP) ReconcileBackendPools(clusterName string, servi for _, i := range bpIdxes { bp := newBackendPools[i] var nodeIPAddressesToBeDeleted []string - for nodeName := range bi.excludeLoadBalancerNodes { - for ip := range bi.nodePrivateIPs[strings.ToLower(nodeName)] { + for _, nodeName := range bi.excludeLoadBalancerNodes.UnsortedList() { + for _, ip := range bi.nodePrivateIPs[strings.ToLower(nodeName)].UnsortedList() { klog.V(2).Infof("bi.ReconcileBackendPools for service (%s): found unwanted node private IP %s, decouple it from the LB %s", serviceName, ip, lbName) nodeIPAddressesToBeDeleted = append(nodeIPAddressesToBeDeleted, ip) } @@ -744,7 +741,7 @@ func (bi *backendPoolTypeNodeIP) GetBackendPrivateIPs(clusterName string, servic return nil, nil } - backendPrivateIPv4s, backendPrivateIPv6s := sets.New[string](), sets.New[string]() + backendPrivateIPv4s, backendPrivateIPv6s := utilsets.NewString(), utilsets.NewString() for _, bp := range *lb.BackendAddressPools { found, _ := isLBBackendPoolsExisting(lbBackendPoolNames, bp.Name) if found { @@ -839,7 +836,7 @@ func removeNodeIPAddressesFromBackendPool( removeAll, useMultipleStandardLoadBalancers bool, ) bool { changed := false - nodeIPsSet := sets.New(nodeIPAddresses...) + nodeIPsSet := utilsets.NewString(nodeIPAddresses...) if backendPool.BackendAddressPoolPropertiesFormat == nil || backendPool.LoadBalancerBackendAddresses == nil { diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer_repo.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer_repo.go index c34b4738da3b..cfee45118a71 100644 --- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer_repo.go +++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer_repo.go @@ -26,8 +26,8 @@ import ( "time" "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2022-07-01/network" + v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/klog/v2" "k8s.io/utils/pointer" @@ -35,6 +35,7 @@ import ( azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache" "sigs.k8s.io/cloud-provider-azure/pkg/consts" "sigs.k8s.io/cloud-provider-azure/pkg/retry" + utilsets "sigs.k8s.io/cloud-provider-azure/pkg/util/sets" ) // DeleteLB invokes az.LoadBalancerClient.Delete with exponential backoff retry @@ -87,7 +88,7 @@ func (az *Cloud) ListManagedLBs(service *v1.Service, nodes []*v1.Node, clusterNa return nil, nil } - managedLBNames := sets.New[string](strings.ToLower(clusterName)) + managedLBNames := utilsets.NewString(clusterName) managedLBs := make([]network.LoadBalancer, 0) if strings.EqualFold(az.LoadBalancerSku, consts.LoadBalancerSkuBasic) { // return early if wantLb=false @@ -110,13 +111,13 @@ func (az *Cloud) ListManagedLBs(service *v1.Service, nodes []*v1.Node, clusterNa } for agentPoolVMSetName := range agentPoolVMSetNamesMap { - managedLBNames.Insert(strings.ToLower(az.mapVMSetNameToLoadBalancerName(agentPoolVMSetName, clusterName))) + managedLBNames.Insert(az.mapVMSetNameToLoadBalancerName(agentPoolVMSetName, clusterName)) } } if az.useMultipleStandardLoadBalancers() { for _, multiSLBConfig := range az.MultipleStandardLoadBalancerConfigurations { - managedLBNames.Insert(strings.ToLower(multiSLBConfig.Name), fmt.Sprintf("%s%s", strings.ToLower(multiSLBConfig.Name), consts.InternalLoadBalancerNameSuffix)) + managedLBNames.Insert(multiSLBConfig.Name, fmt.Sprintf("%s%s", multiSLBConfig.Name, consts.InternalLoadBalancerNameSuffix)) } } diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_local_services.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_local_services.go index 25ff0e816299..63640d7b4716 100644 --- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_local_services.go +++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_local_services.go @@ -38,6 +38,7 @@ import ( "sigs.k8s.io/cloud-provider-azure/pkg/consts" "sigs.k8s.io/cloud-provider-azure/pkg/retry" + utilsets "sigs.k8s.io/cloud-provider-azure/pkg/util/sets" ) // batchProcessor collects operations in a certain interval and then processes them in batches. @@ -329,11 +330,11 @@ func (az *Cloud) setUpEndpointSlicesInformer(informerFactory informers.SharedInf } for _, previousNodeName := range previousNodeNames { nodeIPsSet := az.nodePrivateIPs[strings.ToLower(previousNodeName)] - previousIPs = append(previousIPs, setToStrings(nodeIPsSet)...) + previousIPs = append(previousIPs, nodeIPsSet.UnsortedList()...) } for _, currentNodeName := range currentNodeNames { nodeIPsSet := az.nodePrivateIPs[strings.ToLower(currentNodeName)] - currentIPs = append(currentIPs, setToStrings(nodeIPsSet)...) + currentIPs = append(currentIPs, nodeIPsSet.UnsortedList()...) } if az.backendPoolUpdater != nil { @@ -462,7 +463,7 @@ func newServiceInfo(ipFamily, lbName string) *serviceInfo { } // getLocalServiceEndpointsNodeNames gets the node names that host all endpoints of the local service. -func (az *Cloud) getLocalServiceEndpointsNodeNames(service *v1.Service) (sets.Set[string], error) { +func (az *Cloud) getLocalServiceEndpointsNodeNames(service *v1.Service) (*utilsets.IgnoreCaseSet, error) { var ( ep *discovery_v1.EndpointSlice foundInCache bool @@ -505,7 +506,7 @@ func (az *Cloud) getLocalServiceEndpointsNodeNames(service *v1.Service) (sets.Se nodeNames = append(nodeNames, pointer.StringDeref(endpoint.NodeName, "")) } - return sets.New[string](nodeNames...), nil + return utilsets.NewString(nodeNames...), nil } // cleanupLocalServiceBackendPool cleans up the backend pool of @@ -553,11 +554,9 @@ func (az *Cloud) checkAndApplyLocalServiceBackendPoolUpdates(lb network.LoadBala return err } var expectedIPs []string - for nodeName := range endpointsNodeNames { + for _, nodeName := range endpointsNodeNames.UnsortedList() { ips := az.nodePrivateIPs[strings.ToLower(nodeName)] - for ip := range ips { - expectedIPs = append(expectedIPs, ip) - } + expectedIPs = append(expectedIPs, ips.UnsortedList()...) } currentIPsInBackendPools := make(map[string][]string) for _, bp := range *lb.BackendAddressPools { diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_mock_vmsets.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_mock_vmsets.go index 40daae0d742d..53c08f71325b 100644 --- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_mock_vmsets.go +++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_mock_vmsets.go @@ -95,7 +95,7 @@ func (mr *MockVMSetMockRecorder) DeleteCacheForNode(nodeName any) *gomock.Call { } // DetachDisk mocks base method. -func (m *MockVMSet) DetachDisk(ctx context.Context, nodeName types.NodeName, diskMap map[string]string) error { +func (m *MockVMSet) DetachDisk(ctx context.Context, nodeName types.NodeName, diskMap map[string]string, forceDetach bool) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "DetachDisk", ctx, nodeName, diskMap) ret0, _ := ret[0].(error) diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_standard.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_standard.go index 702435652156..1d1863b2c6c0 100644 --- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_standard.go +++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_standard.go @@ -38,10 +38,12 @@ import ( cloudprovider "k8s.io/cloud-provider" "k8s.io/klog/v2" "k8s.io/utils/pointer" + "k8s.io/utils/ptr" azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache" "sigs.k8s.io/cloud-provider-azure/pkg/consts" "sigs.k8s.io/cloud-provider-azure/pkg/metrics" + vmutil "sigs.k8s.io/cloud-provider-azure/pkg/util/vm" ) var ( @@ -501,19 +503,13 @@ func (as *availabilitySet) GetPowerStatusByNodeName(name string) (powerState str return powerState, err } - if vm.InstanceView != nil && vm.InstanceView.Statuses != nil { - statuses := *vm.InstanceView.Statuses - for _, status := range statuses { - state := pointer.StringDeref(status.Code, "") - if strings.HasPrefix(state, vmPowerStatePrefix) { - return strings.TrimPrefix(state, vmPowerStatePrefix), nil - } - } + if vm.InstanceView != nil { + return vmutil.GetVMPowerState(ptr.Deref(vm.Name, ""), vm.InstanceView.Statuses), nil } // vm.InstanceView or vm.InstanceView.Statuses are nil when the VM is under deleting. klog.V(3).Infof("InstanceView for node %q is nil, assuming it's deleting", name) - return vmPowerStateUnknown, nil + return consts.VMPowerStateUnknown, nil } // GetProvisioningStateByNodeName returns the provisioningState for the specified node. diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_storageaccount.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_storageaccount.go index fc071fd822d9..f6477ff766e7 100644 --- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_storageaccount.go +++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_storageaccount.go @@ -117,6 +117,7 @@ func (az *Cloud) getStorageAccounts(ctx context.Context, accountOptions *Account isTaggedWithSkip(acct) && isHnsPropertyEqual(acct, accountOptions) && isEnableNfsV3PropertyEqual(acct, accountOptions) && + isEnableHTTPSTrafficOnlyEqual(acct, accountOptions) && isAllowBlobPublicAccessEqual(acct, accountOptions) && isRequireInfrastructureEncryptionEqual(acct, accountOptions) && isAllowSharedKeyAccessEqual(acct, accountOptions) && @@ -840,17 +841,16 @@ func isTagsEqual(account storage.Account, accountOptions *AccountOptions) bool { return true } - for k, v := range account.Tags { - var value string - // nil and empty value should be regarded as equal - if v != nil { - value = *v - } - if accountOptions.Tags[k] != value { + if len(account.Tags) < len(accountOptions.Tags) { + return false + } + + // ensure all tags in accountOptions are in account + for k, v := range accountOptions.Tags { + if pointer.StringDeref(account.Tags[k], "") != v { return false } } - return true } @@ -862,6 +862,10 @@ func isEnableNfsV3PropertyEqual(account storage.Account, accountOptions *Account return pointer.BoolDeref(accountOptions.EnableNfsV3, false) == pointer.BoolDeref(account.EnableNfsV3, false) } +func isEnableHTTPSTrafficOnlyEqual(account storage.Account, accountOptions *AccountOptions) bool { + return accountOptions.EnableHTTPSTrafficOnly == pointer.BoolDeref(account.EnableHTTPSTrafficOnly, true) +} + func isPrivateEndpointAsExpected(account storage.Account, accountOptions *AccountOptions) bool { if accountOptions.CreatePrivateEndpoint == nil { // CreatePrivateEndpoint is not set, match current account diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_utils.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_utils.go index 89b74d42d779..541c74b182d8 100644 --- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_utils.go +++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_utils.go @@ -29,7 +29,6 @@ import ( "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2022-07-01/network" v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/util/sets" "k8s.io/klog/v2" utilnet "k8s.io/utils/net" "k8s.io/utils/pointer" @@ -505,36 +504,6 @@ func IntInSlice(i int, list []int) bool { return false } -func safeAddKeyToStringsSet(set sets.Set[string], key string) sets.Set[string] { - if set != nil { - set.Insert(key) - } else { - set = sets.New[string](key) - } - - return set -} - -func safeRemoveKeyFromStringsSet(set sets.Set[string], key string) (sets.Set[string], bool) { - var has bool - if set != nil { - if set.Has(key) { - has = true - } - set.Delete(key) - } - - return set, has -} - -func setToStrings(set sets.Set[string]) []string { - var res []string - for key := range set { - res = append(res, key) - } - return res -} - func isLocalService(service *v1.Service) bool { return service.Spec.ExternalTrafficPolicy == v1.ServiceExternalTrafficPolicyLocal } diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmsets.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmsets.go index 0ac8ae19196c..d18135a2eaaf 100644 --- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmsets.go +++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmsets.go @@ -79,7 +79,7 @@ type VMSet interface { // AttachDisk attaches a disk to vm AttachDisk(ctx context.Context, nodeName types.NodeName, diskMap map[string]*AttachDiskOptions) (*azure.Future, error) // DetachDisk detaches a disk from vm - DetachDisk(ctx context.Context, nodeName types.NodeName, diskMap map[string]string) error + DetachDisk(ctx context.Context, nodeName types.NodeName, diskMap map[string]string, forceDetach bool) error // WaitForUpdateResult waits for the response of the update request WaitForUpdateResult(ctx context.Context, future *azure.Future, nodeName types.NodeName, source string) error diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmss.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmss.go index 69389b263833..3c84f767b578 100644 --- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmss.go +++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmss.go @@ -35,11 +35,13 @@ import ( cloudprovider "k8s.io/cloud-provider" "k8s.io/klog/v2" "k8s.io/utils/pointer" + "k8s.io/utils/ptr" azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache" "sigs.k8s.io/cloud-provider-azure/pkg/consts" "sigs.k8s.io/cloud-provider-azure/pkg/metrics" "sigs.k8s.io/cloud-provider-azure/pkg/provider/virtualmachine" + vmutil "sigs.k8s.io/cloud-provider-azure/pkg/util/vm" ) var ( @@ -282,20 +284,14 @@ func (ss *ScaleSet) GetPowerStatusByNodeName(name string) (powerState string, er if vm.IsVirtualMachineScaleSetVM() { v := vm.AsVirtualMachineScaleSetVM() - if v.InstanceView != nil && v.InstanceView.Statuses != nil { - statuses := *v.InstanceView.Statuses - for _, status := range statuses { - state := pointer.StringDeref(status.Code, "") - if strings.HasPrefix(state, vmPowerStatePrefix) { - return strings.TrimPrefix(state, vmPowerStatePrefix), nil - } - } + if v.InstanceView != nil { + return vmutil.GetVMPowerState(ptr.Deref(v.Name, ""), v.InstanceView.Statuses), nil } } // vm.InstanceView or vm.InstanceView.Statuses are nil when the VM is under deleting. klog.V(3).Infof("InstanceView for node %q is nil, assuming it's deleting", name) - return vmPowerStateUnknown, nil + return consts.VMPowerStateUnknown, nil } // GetProvisioningStateByNodeName returns the provisioningState for the specified node. @@ -1029,6 +1025,8 @@ func getPrimaryIPConfigFromVMSSNetworkConfig(config *compute.VirtualMachineScale // EnsureHostInPool ensures the given VM's Primary NIC's Primary IP Configuration is // participating in the specified LoadBalancer Backend Pool, which returns (resourceGroup, vmasName, instanceID, vmssVM, error). func (ss *ScaleSet) EnsureHostInPool(_ *v1.Service, nodeName types.NodeName, backendPoolID string, vmSetNameOfLB string) (string, string, string, *compute.VirtualMachineScaleSetVM, error) { + logger := klog.Background().WithName("EnsureHostInPool"). + WithValues("nodeName", nodeName, "backendPoolID", backendPoolID, "vmSetNameOfLB", vmSetNameOfLB) vmName := mapNodeNameToVMName(nodeName) vm, err := ss.getVmssVM(vmName, azcache.CacheReadTypeDefault) if err != nil { @@ -1037,13 +1035,20 @@ func (ss *ScaleSet) EnsureHostInPool(_ *v1.Service, nodeName types.NodeName, bac return "", "", "", nil, nil } - klog.Errorf("EnsureHostInPool: failed to get VMSS VM %s: %v", vmName, err) + logger.Error(err, "failed to get vmss vm", "vmName", vmName) if !errors.Is(err, ErrorNotVmssInstance) { return "", "", "", nil, err } } + statuses := vm.GetInstanceViewStatus() + vmPowerState := vmutil.GetVMPowerState(vm.Name, statuses) + provisioningState := vm.GetProvisioningState() + if vmutil.IsNotActiveVMState(provisioningState, vmPowerState) { + logger.V(2).Info("skip updating the node because it is not in an active state", "vmName", vmName, "provisioningState", provisioningState, "vmPowerState", vmPowerState) + return "", "", "", nil, nil + } - klog.V(2).Infof("ensuring node %q of scaleset %q in LB backendpool %q", nodeName, vm.VMSSName, backendPoolID) + logger.V(2).Info("ensuring the vmss node in LB backendpool", "vmss name", vm.VMSSName) // Check scale set name: // - For basic SKU load balancer, return nil if the node's scale set is mismatched with vmSetNameOfLB. @@ -1057,14 +1062,13 @@ func (ss *ScaleSet) EnsureHostInPool(_ *v1.Service, nodeName types.NodeName, bac } if vmSetNameOfLB != "" && needCheck && !strings.EqualFold(vmSetNameOfLB, vm.VMSSName) { - klog.V(3).Infof("EnsureHostInPool skips node %s because it is not in the ScaleSet %s", vmName, vmSetNameOfLB) + logger.V(3).Info("skips the node %s because it is not in the ScaleSet %s", vmName, vmSetNameOfLB) return "", "", "", nil, nil } // Find primary network interface configuration. if vm.VirtualMachineScaleSetVMProperties.NetworkProfileConfiguration.NetworkInterfaceConfigurations == nil { - klog.V(4).Infof("EnsureHostInPool: cannot obtain the primary network interface configuration, of vm %s, "+ - "probably because the vm's being deleted", vmName) + logger.V(4).Info("cannot obtain the primary network interface configuration, of the vm, probably because the vm's being deleted", "vmName", vmName) return "", "", "", nil, nil } @@ -1114,7 +1118,7 @@ func (ss *ScaleSet) EnsureHostInPool(_ *v1.Service, nodeName types.NodeName, bac return "", "", "", nil, err } if !isSameLB { - klog.V(4).Infof("Node %q has already been added to LB %q, omit adding it to a new one", nodeName, oldLBName) + logger.V(4).Info("The node has already been added to an LB, omit adding it to a new one", "lbName", oldLBName) return "", "", "", nil, nil } } @@ -1221,7 +1225,7 @@ func (ss *ScaleSet) ensureVMSSInPool(_ *v1.Service, nodes []*v1.Node, backendPoo // When vmss is being deleted, CreateOrUpdate API would report "the vmss is being deleted" error. // Since it is being deleted, we shouldn't send more CreateOrUpdate requests for it. - if vmss.ProvisioningState != nil && strings.EqualFold(*vmss.ProvisioningState, consts.VirtualMachineScaleSetsDeallocating) { + if vmss.ProvisioningState != nil && strings.EqualFold(*vmss.ProvisioningState, consts.ProvisionStateDeleting) { klog.V(3).Infof("ensureVMSSInPool: found vmss %s being deleted, skipping", vmssName) continue } @@ -1493,6 +1497,7 @@ func (ss *ScaleSet) EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, bac // ensureBackendPoolDeletedFromNode ensures the loadBalancer backendAddressPools deleted // from the specified node, which returns (resourceGroup, vmasName, instanceID, vmssVM, error). func (ss *ScaleSet) ensureBackendPoolDeletedFromNode(nodeName string, backendPoolIDs []string) (string, string, string, *compute.VirtualMachineScaleSetVM, error) { + logger := klog.Background().WithName("ensureBackendPoolDeletedFromNode").WithValues("nodeName", nodeName, "backendPoolIDs", backendPoolIDs) vm, err := ss.getVmssVM(nodeName, azcache.CacheReadTypeDefault) if err != nil { if errors.Is(err, cloudprovider.InstanceNotFound) { @@ -1503,6 +1508,14 @@ func (ss *ScaleSet) ensureBackendPoolDeletedFromNode(nodeName string, backendPoo return "", "", "", nil, err } + statuses := vm.GetInstanceViewStatus() + vmPowerState := vmutil.GetVMPowerState(vm.Name, statuses) + provisioningState := vm.GetProvisioningState() + if vmutil.IsNotActiveVMState(provisioningState, vmPowerState) { + logger.V(2).Info("skip updating the node because it is not in an active state", "provisioningState", provisioningState, "vmPowerState", vmPowerState) + return "", "", "", nil, nil + } + // Find primary network interface configuration. if vm.VirtualMachineScaleSetVMProperties.NetworkProfileConfiguration.NetworkInterfaceConfigurations == nil { klog.V(4).Infof("ensureBackendPoolDeletedFromNode: cannot obtain the primary network interface configuration, of vm %s, "+ @@ -1679,7 +1692,7 @@ func (ss *ScaleSet) ensureBackendPoolDeletedFromVmssUniform(backendPoolIDs []str // When vmss is being deleted, CreateOrUpdate API would report "the vmss is being deleted" error. // Since it is being deleted, we shouldn't send more CreateOrUpdate requests for it. - if vmss.ProvisioningState != nil && strings.EqualFold(*vmss.ProvisioningState, consts.VirtualMachineScaleSetsDeallocating) { + if vmss.ProvisioningState != nil && strings.EqualFold(*vmss.ProvisioningState, consts.ProvisionStateDeleting) { klog.V(3).Infof("ensureBackendPoolDeletedFromVMSS: found vmss %s being deleted, skipping", pointer.StringDeref(vmss.Name, "")) return true } @@ -2086,7 +2099,7 @@ func (ss *ScaleSet) EnsureBackendPoolDeletedFromVMSets(vmssNamesMap map[string]b // When vmss is being deleted, CreateOrUpdate API would report "the vmss is being deleted" error. // Since it is being deleted, we shouldn't send more CreateOrUpdate requests for it. - if vmss.ProvisioningState != nil && strings.EqualFold(*vmss.ProvisioningState, consts.VirtualMachineScaleSetsDeallocating) { + if vmss.ProvisioningState != nil && strings.EqualFold(*vmss.ProvisioningState, consts.ProvisionStateDeleting) { klog.V(3).Infof("EnsureBackendPoolDeletedFromVMSets: found vmss %s being deleted, skipping", vmssName) continue } diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmss_cache.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmss_cache.go index 8cc176333e61..2efd93ac3b09 100644 --- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmss_cache.go +++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmss_cache.go @@ -25,12 +25,12 @@ import ( "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" - "k8s.io/apimachinery/pkg/util/sets" "k8s.io/klog/v2" "k8s.io/utils/pointer" azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache" "sigs.k8s.io/cloud-provider-azure/pkg/consts" + utilsets "sigs.k8s.io/cloud-provider-azure/pkg/util/sets" ) type VMSSVirtualMachineEntry struct { @@ -48,11 +48,11 @@ type VMSSEntry struct { } type NonVmssUniformNodesEntry struct { - VMSSFlexVMNodeNames sets.Set[string] - VMSSFlexVMProviderIDs sets.Set[string] - AvSetVMNodeNames sets.Set[string] - AvSetVMProviderIDs sets.Set[string] - ClusterNodeNames sets.Set[string] + VMSSFlexVMNodeNames *utilsets.IgnoreCaseSet + VMSSFlexVMProviderIDs *utilsets.IgnoreCaseSet + AvSetVMNodeNames *utilsets.IgnoreCaseSet + AvSetVMProviderIDs *utilsets.IgnoreCaseSet + ClusterNodeNames *utilsets.IgnoreCaseSet } type VMManagementType string @@ -324,10 +324,10 @@ func (ss *ScaleSet) updateCache(nodeName, resourceGroupName, vmssName, instanceI func (ss *ScaleSet) newNonVmssUniformNodesCache() (azcache.Resource, error) { getter := func(key string) (interface{}, error) { - vmssFlexVMNodeNames := sets.New[string]() - vmssFlexVMProviderIDs := sets.New[string]() - avSetVMNodeNames := sets.New[string]() - avSetVMProviderIDs := sets.New[string]() + vmssFlexVMNodeNames := utilsets.NewString() + vmssFlexVMProviderIDs := utilsets.NewString() + avSetVMNodeNames := utilsets.NewString() + avSetVMProviderIDs := utilsets.NewString() resourceGroups, err := ss.GetResourceGroups() if err != nil { return nil, err @@ -495,7 +495,17 @@ func (ss *ScaleSet) getVMManagementTypeByIPConfigurationID(ipConfigurationID str vmName := strings.Replace(nicName, "-nic", "", 1) cachedAvSetVMs := cached.(NonVmssUniformNodesEntry).AvSetVMNodeNames + if cachedAvSetVMs.Has(vmName) { + return ManagedByAvSet, nil + } + + // If the node is not in the cache, assume the node has joined after the last cache refresh and attempt to refresh the cache + cached, err = ss.nonVmssUniformNodesCache.Get(consts.NonVmssUniformNodesKey, azcache.CacheReadTypeForceRefresh) + if err != nil { + return ManagedByUnknownVMSet, err + } + cachedAvSetVMs = cached.(NonVmssUniformNodesEntry).AvSetVMNodeNames if cachedAvSetVMs.Has(vmName) { return ManagedByAvSet, nil } diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmss_repo.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmss_repo.go index 6721dceaba8c..89b5a900f200 100644 --- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmss_repo.go +++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmss_repo.go @@ -39,7 +39,7 @@ func (az *Cloud) CreateOrUpdateVMSS(resourceGroupName string, VMScaleSetName str klog.Errorf("CreateOrUpdateVMSS: error getting vmss(%s): %v", VMScaleSetName, rerr) return rerr } - if vmss.ProvisioningState != nil && strings.EqualFold(*vmss.ProvisioningState, consts.VirtualMachineScaleSetsDeallocating) { + if vmss.ProvisioningState != nil && strings.EqualFold(*vmss.ProvisioningState, consts.ProvisionStateDeleting) { klog.V(3).Infof("CreateOrUpdateVMSS: found vmss %s being deleted, skipping", VMScaleSetName) return nil } diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmssflex.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmssflex.go index ef98ae0eb513..cf12dc73e569 100644 --- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmssflex.go +++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmssflex.go @@ -27,16 +27,19 @@ import ( "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2022-07-01/network" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" utilerrors "k8s.io/apimachinery/pkg/util/errors" cloudprovider "k8s.io/cloud-provider" "k8s.io/klog/v2" "k8s.io/utils/pointer" + "k8s.io/utils/ptr" azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache" "sigs.k8s.io/cloud-provider-azure/pkg/consts" "sigs.k8s.io/cloud-provider-azure/pkg/metrics" + vmutil "sigs.k8s.io/cloud-provider-azure/pkg/util/vm" ) var ( @@ -267,19 +270,13 @@ func (fs *FlexScaleSet) GetPowerStatusByNodeName(name string) (powerState string return powerState, err } - if vm.InstanceView != nil && vm.InstanceView.Statuses != nil { - statuses := *vm.InstanceView.Statuses - for _, status := range statuses { - state := pointer.StringDeref(status.Code, "") - if strings.HasPrefix(state, vmPowerStatePrefix) { - return strings.TrimPrefix(state, vmPowerStatePrefix), nil - } - } + if vm.InstanceView != nil { + return vmutil.GetVMPowerState(ptr.Deref(vm.Name, ""), vm.InstanceView.Statuses), nil } // vm.InstanceView or vm.InstanceView.Statuses are nil when the VM is under deleting. klog.V(3).Infof("InstanceView for node %q is nil, assuming it's deleting", name) - return vmPowerStateUnknown, nil + return consts.VMPowerStateUnknown, nil } // GetPrimaryInterface gets machine primary network interface by node name. @@ -391,12 +388,12 @@ func (fs *FlexScaleSet) getNodeInformationByIPConfigurationID(ipConfigurationID // get vmName by nic name vmName, err := fs.GetVMNameByIPConfigurationName(nicResourceGroup, nicName) if err != nil { - return "", "", "", fmt.Errorf("failed to get vm name of ip config ID %s", ipConfigurationID) + return "", "", "", fmt.Errorf("failed to get vm name of ip config ID %s: %w", ipConfigurationID, err) } nodeName, err := fs.getNodeNameByVMName(vmName) if err != nil { - return "", "", "", fmt.Errorf("failed to map VM Name to NodeName: VM Name %s", vmName) + return "", "", "", fmt.Errorf("failed to map VM Name to NodeName: VM Name %s: %w", vmName, err) } vmssFlexName, err := fs.getNodeVmssFlexName(nodeName) @@ -616,7 +613,7 @@ func (fs *FlexScaleSet) ensureVMSSFlexInPool(_ *v1.Service, nodes []*v1.Node, ba // When vmss is being deleted, CreateOrUpdate API would report "the vmss is being deleted" error. // Since it is being deleted, we shouldn't send more CreateOrUpdate requests for it. - if vmssFlex.ProvisioningState != nil && strings.EqualFold(*vmssFlex.ProvisioningState, consts.VirtualMachineScaleSetsDeallocating) { + if vmssFlex.ProvisioningState != nil && strings.EqualFold(*vmssFlex.ProvisioningState, consts.ProvisionStateDeleting) { klog.V(3).Infof("ensureVMSSFlexInPool: found vmss %s being deleted, skipping", vmssFlexID) continue } @@ -790,7 +787,7 @@ func (fs *FlexScaleSet) EnsureBackendPoolDeletedFromVMSets(vmssNamesMap map[stri // When vmss is being deleted, CreateOrUpdate API would report "the vmss is being deleted" error. // Since it is being deleted, we shouldn't send more CreateOrUpdate requests for it. - if vmss.ProvisioningState != nil && strings.EqualFold(*vmss.ProvisioningState, consts.VirtualMachineScaleSetsDeallocating) { + if vmss.ProvisioningState != nil && strings.EqualFold(*vmss.ProvisioningState, consts.ProvisionStateDeleting) { klog.V(3).Infof("fs.EnsureBackendPoolDeletedFromVMSets: found vmss %s being deleted, skipping", vmssName) continue } diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/config/azure_auth.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/config/azure_auth.go index e149bcac29b3..58741f9e4848 100644 --- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/config/azure_auth.go +++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/config/azure_auth.go @@ -17,8 +17,6 @@ limitations under the License. package config import ( - "crypto/rsa" - "crypto/x509" "errors" "fmt" "io" @@ -30,8 +28,6 @@ import ( "github.com/Azure/go-autorest/autorest/adal" "github.com/Azure/go-autorest/autorest/azure" - "golang.org/x/crypto/pkcs12" - "k8s.io/klog/v2" "sigs.k8s.io/cloud-provider-azure/pkg/azclient" @@ -147,13 +143,13 @@ func GetServicePrincipalToken(config *AzureAuthConfig, env *azure.Environment, r resource) } - if len(config.AADClientCertPath) > 0 && len(config.AADClientCertPassword) > 0 { + if len(config.AADClientCertPath) > 0 { klog.V(2).Infoln("azure: using jwt client_assertion (client_cert+client_private_key) to retrieve access token") certData, err := os.ReadFile(config.AADClientCertPath) if err != nil { return nil, fmt.Errorf("reading the client certificate from file %s: %w", config.AADClientCertPath, err) } - certificate, privateKey, err := decodePkcs12(certData, config.AADClientCertPassword) + certificate, privateKey, err := adal.DecodePfxCertificateData(certData, config.AADClientCertPassword) if err != nil { return nil, fmt.Errorf("decoding the client certificate: %w", err) } @@ -197,8 +193,22 @@ func GetMultiTenantServicePrincipalToken(config *AzureAuthConfig, env *azure.Env env.ServiceManagementEndpoint) } - if len(config.AADClientCertPath) > 0 && len(config.AADClientCertPassword) > 0 { - return nil, fmt.Errorf("AAD Application client certificate authentication is not supported in getting multi-tenant service principal token") + if len(config.AADClientCertPath) > 0 { + klog.V(2).Infoln("azure: using jwt client_assertion (client_cert+client_private_key) to retrieve multi-tenant access token") + certData, err := os.ReadFile(config.AADClientCertPath) + if err != nil { + return nil, fmt.Errorf("reading the client certificate from file %s: %w", config.AADClientCertPath, err) + } + certificate, privateKey, err := adal.DecodePfxCertificateData(certData, config.AADClientCertPassword) + if err != nil { + return nil, fmt.Errorf("decoding the client certificate: %w", err) + } + return adal.NewMultiTenantServicePrincipalTokenFromCertificate( + multiTenantOAuthConfig, + config.AADClientID, + certificate, + privateKey, + env.ServiceManagementEndpoint) } return nil, ErrorNoAuth @@ -230,8 +240,22 @@ func GetNetworkResourceServicePrincipalToken(config *AzureAuthConfig, env *azure env.ServiceManagementEndpoint) } - if len(config.AADClientCertPath) > 0 && len(config.AADClientCertPassword) > 0 { - return nil, fmt.Errorf("AAD Application client certificate authentication is not supported in getting network resources service principal token") + if len(config.AADClientCertPath) > 0 { + klog.V(2).Infoln("azure: using jwt client_assertion (client_cert+client_private_key) to retrieve access token for network resources tenant") + certData, err := os.ReadFile(config.AADClientCertPath) + if err != nil { + return nil, fmt.Errorf("reading the client certificate from file %s: %w", config.AADClientCertPath, err) + } + certificate, privateKey, err := adal.DecodePfxCertificateData(certData, config.AADClientCertPassword) + if err != nil { + return nil, fmt.Errorf("decoding the client certificate: %w", err) + } + return adal.NewServicePrincipalTokenFromCertificate( + *oauthConfig, + config.AADClientID, + certificate, + privateKey, + env.ServiceManagementEndpoint) } return nil, ErrorNoAuth @@ -303,21 +327,6 @@ func (config *AzureAuthConfig) UsesNetworkResourceInDifferentSubscription() bool return len(config.NetworkResourceSubscriptionID) > 0 && !strings.EqualFold(config.NetworkResourceSubscriptionID, config.SubscriptionID) } -// decodePkcs12 decodes a PKCS#12 client certificate by extracting the public certificate and -// the private RSA key -func decodePkcs12(pkcs []byte, password string) (*x509.Certificate, *rsa.PrivateKey, error) { - privateKey, certificate, err := pkcs12.Decode(pkcs, password) - if err != nil { - return nil, nil, fmt.Errorf("decoding the PKCS#12 client certificate: %w", err) - } - rsaPrivateKey, isRsaKey := privateKey.(*rsa.PrivateKey) - if !isRsaKey { - return nil, nil, fmt.Errorf("PKCS#12 certificate must contain a RSA private key") - } - - return certificate, rsaPrivateKey, nil -} - // azureStackOverrides ensures that the Environment matches what AKSe currently generates for Azure Stack func azureStackOverrides(env *azure.Environment, resourceManagerEndpoint, identitySystem string) { env.ManagementPortalURL = strings.Replace(resourceManagerEndpoint, "https://management.", "https://portal.", -1) diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/accesscontrol.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/accesscontrol.go index 05e393d6dc68..fcdb92c92d08 100644 --- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/accesscontrol.go +++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/accesscontrol.go @@ -46,6 +46,7 @@ type AccessControl struct { // immutable pre-compute states. SourceRanges []netip.Prefix AllowedIPRanges []netip.Prefix + InvalidRanges []string AllowedServiceTags []string securityRuleDestinationPortsByProtocol map[network.SecurityRuleProtocol][]int32 } @@ -82,20 +83,17 @@ func NewAccessControl(svc *v1.Service, sg *network.SecurityGroup, opts ...Access logger.Error(err, "Failed to initialize RuleHelper") return nil, err } - sourceRanges, err := SourceRanges(svc) + sourceRanges, invalidSourceRanges, err := SourceRanges(svc) if err != nil && !options.SkipAnnotationValidation { logger.Error(err, "Failed to parse SourceRange configuration") - return nil, err } - allowedIPRanges, err := AllowedIPRanges(svc) + allowedIPRanges, invalidAllowedIPRanges, err := AllowedIPRanges(svc) if err != nil && !options.SkipAnnotationValidation { logger.Error(err, "Failed to parse AllowedIPRanges configuration") - return nil, err } allowedServiceTags, err := AllowedServiceTags(svc) if err != nil && !options.SkipAnnotationValidation { logger.Error(err, "Failed to parse AllowedServiceTags configuration") - return nil, err } securityRuleDestinationPortsByProtocol, err := securityRuleDestinationPortsByProtocol(svc) if err != nil { @@ -114,13 +112,14 @@ func NewAccessControl(svc *v1.Service, sg *network.SecurityGroup, opts ...Access SourceRanges: sourceRanges, AllowedIPRanges: allowedIPRanges, AllowedServiceTags: allowedServiceTags, + InvalidRanges: append(invalidSourceRanges, invalidAllowedIPRanges...), securityRuleDestinationPortsByProtocol: securityRuleDestinationPortsByProtocol, }, nil } // IsAllowFromInternet returns true if the given service is allowed to be accessed from internet. // To be specific, -// 1. For all types of LB, it returns false if the given service is specified with `service tags` or `not allowed all IP ranges`. +// 1. For all types of LB, it returns false if the given service is specified with `service tags` or `not allowed all IP ranges`, including invalid IP ranges. // 2. For internal LB, it returns true iff the given service is explicitly specified with `allowed all IP ranges`. Refer: https://github.com/kubernetes-sigs/cloud-provider-azure/issues/698 func (ac *AccessControl) IsAllowFromInternet() bool { if len(ac.AllowedServiceTags) > 0 { @@ -132,6 +131,9 @@ func (ac *AccessControl) IsAllowFromInternet() bool { if len(ac.AllowedIPRanges) > 0 && !iputil.IsPrefixesAllowAll(ac.AllowedIPRanges) { return false } + if len(ac.InvalidRanges) > 0 { + return false + } if !IsInternal(ac.svc) { return true } @@ -143,10 +145,11 @@ func (ac *AccessControl) IsAllowFromInternet() bool { // By default, NSG allow traffic from the VNet. func (ac *AccessControl) DenyAllExceptSourceRanges() bool { var ( - annotationEnabled = strings.EqualFold(ac.svc.Annotations[consts.ServiceAnnotationDenyAllExceptLoadBalancerSourceRanges], "true") - sourceRangeSpecified = len(ac.SourceRanges) > 0 || len(ac.AllowedIPRanges) > 0 + annotationEnabled = strings.EqualFold(ac.svc.Annotations[consts.ServiceAnnotationDenyAllExceptLoadBalancerSourceRanges], "true") + sourceRangeSpecified = len(ac.SourceRanges) > 0 || len(ac.AllowedIPRanges) > 0 + invalidRangesSpecified = len(ac.InvalidRanges) > 0 ) - return annotationEnabled && sourceRangeSpecified + return (annotationEnabled && sourceRangeSpecified) || invalidRangesSpecified } // AllowedIPv4Ranges returns the IPv4 ranges that are allowed to access the LoadBalancer. diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/configuration.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/configuration.go index 436bca4b11c2..04d04d448193 100644 --- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/configuration.go +++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/configuration.go @@ -17,6 +17,7 @@ limitations under the License. package loadbalancer import ( + "errors" "fmt" "net/netip" "strings" @@ -45,55 +46,75 @@ func AllowedServiceTags(svc *v1.Service) ([]string, error) { return nil, nil } - return strings.Split(strings.TrimSpace(value), Sep), nil + tags := strings.Split(strings.TrimSpace(value), Sep) + for i := range tags { + tags[i] = strings.TrimSpace(tags[i]) + } + return tags, nil } -// AllowedIPRanges returns the allowed IP ranges configured by user through AKS custom annotation. -func AllowedIPRanges(svc *v1.Service) ([]netip.Prefix, error) { +// AllowedIPRanges returns the allowed IP ranges configured by user through AKS custom annotations: +// service.beta.kubernetes.io/azure-allowed-ip-ranges and service.beta.kubernetes.io/load-balancer-source-ranges +func AllowedIPRanges(svc *v1.Service) ([]netip.Prefix, []string, error) { const ( Sep = "," - Key = consts.ServiceAnnotationAllowedIPRanges + ) + var ( + errs []error + validRanges []netip.Prefix + invalidRanges []string ) - value, found := svc.Annotations[Key] - if !found { - return nil, nil - } + for _, key := range []string{consts.ServiceAnnotationAllowedIPRanges, v1.AnnotationLoadBalancerSourceRangesKey} { + value, found := svc.Annotations[key] + if !found { + continue + } - rv, err := iputil.ParsePrefixes(strings.Split(strings.TrimSpace(value), Sep)) - if err != nil { - return nil, NewErrAnnotationValue(Key, value, err) + var errsByKey []error + for _, p := range strings.Split(strings.TrimSpace(value), Sep) { + p = strings.TrimSpace(p) + prefix, err := iputil.ParsePrefix(p) + if err != nil { + errsByKey = append(errsByKey, err) + invalidRanges = append(invalidRanges, p) + } else { + validRanges = append(validRanges, prefix) + } + } + if len(errsByKey) > 0 { + errs = append(errs, NewErrAnnotationValue(key, value, errors.Join(errsByKey...))) + } } - return rv, nil + if len(errs) > 0 { + return validRanges, invalidRanges, errors.Join(errs...) + } + return validRanges, invalidRanges, nil } -// SourceRanges returns the allowed IP ranges configured by user through `spec.LoadBalancerSourceRanges` and standard annotation. -// If `spec.LoadBalancerSourceRanges` is not set, it will try to parse the annotation. -func SourceRanges(svc *v1.Service) ([]netip.Prefix, error) { - // Read from spec - if len(svc.Spec.LoadBalancerSourceRanges) > 0 { - rv, err := iputil.ParsePrefixes(svc.Spec.LoadBalancerSourceRanges) +// SourceRanges returns the allowed IP ranges configured by user through `spec.LoadBalancerSourceRanges`. +func SourceRanges(svc *v1.Service) ([]netip.Prefix, []string, error) { + var ( + errs []error + validRanges []netip.Prefix + invalidRanges []string + ) + + for _, p := range svc.Spec.LoadBalancerSourceRanges { + p = strings.TrimSpace(p) + prefix, err := iputil.ParsePrefix(p) if err != nil { - return nil, fmt.Errorf("invalid service.Spec.LoadBalancerSourceRanges [%v]: %w", svc.Spec.LoadBalancerSourceRanges, err) + errs = append(errs, err) + invalidRanges = append(invalidRanges, p) + } else { + validRanges = append(validRanges, prefix) } - return rv, nil } - - // Read from annotation - const ( - Sep = "," - Key = v1.AnnotationLoadBalancerSourceRangesKey - ) - value, found := svc.Annotations[Key] - if !found { - return nil, nil - } - rv, err := iputil.ParsePrefixes(strings.Split(strings.TrimSpace(value), Sep)) - if err != nil { - return nil, NewErrAnnotationValue(Key, value, err) + if len(errs) > 0 { + return validRanges, invalidRanges, fmt.Errorf("invalid service.Spec.LoadBalancerSourceRanges [%v]: %w", svc.Spec.LoadBalancerSourceRanges, errors.Join(errs...)) } - return rv, nil + return validRanges, invalidRanges, nil } func AdditionalPublicIPs(svc *v1.Service) ([]netip.Addr, error) { diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/iputil/prefix.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/iputil/prefix.go index a5644bfbcd55..fdde2d86e870 100644 --- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/iputil/prefix.go +++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/iputil/prefix.go @@ -30,14 +30,14 @@ func IsPrefixesAllowAll(prefixes []netip.Prefix) bool { return false } -func ParsePrefixes(vs []string) ([]netip.Prefix, error) { - var rv []netip.Prefix - for _, v := range vs { - prefix, err := netip.ParsePrefix(v) - if err != nil { - return nil, fmt.Errorf("invalid CIDR `%s`: %w", v, err) - } - rv = append(rv, prefix) +func ParsePrefix(v string) (netip.Prefix, error) { + prefix, err := netip.ParsePrefix(v) + if err != nil { + return netip.Prefix{}, fmt.Errorf("invalid CIDR `%s`: %w", v, err) + } + masked := prefix.Masked() + if prefix.Addr().Compare(masked.Addr()) != 0 { + return netip.Prefix{}, fmt.Errorf("invalid CIDR `%s`: not a valid network prefix, should be properly masked like %s", v, masked) } - return rv, nil + return prefix, nil } diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/securitygroup/securitygroup.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/securitygroup/securitygroup.go index 82b617e2594f..659944f989df 100644 --- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/securitygroup/securitygroup.go +++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/securitygroup/securitygroup.go @@ -313,10 +313,11 @@ func (helper *RuleHelper) RemoveDestinationPrefixesFromRules(prefixes []string) } for _, rule := range helper.rules { + if rule.DestinationAddressPrefix != nil && index[*rule.DestinationAddressPrefix] { + rule.DestinationAddressPrefix = nil + continue + } if rule.DestinationAddressPrefixes == nil { - if rule.DestinationAddressPrefix != nil && index[*rule.DestinationAddressPrefix] { - rule.DestinationAddressPrefix = nil - } continue } diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/virtualmachine/virtualmachine.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/virtualmachine/virtualmachine.go index bd6b77535be3..924f08c8eb29 100644 --- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/virtualmachine/virtualmachine.go +++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/virtualmachine/virtualmachine.go @@ -18,7 +18,10 @@ package virtualmachine import ( "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" + "k8s.io/utils/pointer" + + "sigs.k8s.io/cloud-provider-azure/pkg/consts" ) type Variant string @@ -145,6 +148,36 @@ func (vm *VirtualMachine) AsVirtualMachineScaleSetVM() *compute.VirtualMachineSc return vm.vmssVM } +func (vm *VirtualMachine) GetInstanceViewStatus() *[]compute.InstanceViewStatus { + if vm.IsVirtualMachine() && vm.vm != nil && + vm.vm.VirtualMachineProperties != nil && + vm.vm.VirtualMachineProperties.InstanceView != nil { + return vm.vm.VirtualMachineProperties.InstanceView.Statuses + } + if vm.IsVirtualMachineScaleSetVM() && + vm.vmssVM != nil && + vm.vmssVM.VirtualMachineScaleSetVMProperties != nil && + vm.vmssVM.VirtualMachineScaleSetVMProperties.InstanceView != nil { + return vm.vmssVM.VirtualMachineScaleSetVMProperties.InstanceView.Statuses + } + return nil +} + +func (vm *VirtualMachine) GetProvisioningState() string { + if vm.IsVirtualMachine() && vm.vm != nil && + vm.vm.VirtualMachineProperties != nil && + vm.vm.VirtualMachineProperties.ProvisioningState != nil { + return *vm.vm.VirtualMachineProperties.ProvisioningState + } + if vm.IsVirtualMachineScaleSetVM() && + vm.vmssVM != nil && + vm.vmssVM.VirtualMachineScaleSetVMProperties != nil && + vm.vmssVM.VirtualMachineScaleSetVMProperties.ProvisioningState != nil { + return *vm.vmssVM.VirtualMachineScaleSetVMProperties.ProvisioningState + } + return consts.ProvisioningStateUnknown +} + // StringMap returns a map of strings built from the map of string pointers. The empty string is // used for nil pointers. func stringMap(msp map[string]*string) map[string]string { diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/retry/azure_error.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/retry/azure_error.go index 81691fc89f9d..923e33de0dca 100644 --- a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/retry/azure_error.go +++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/retry/azure_error.go @@ -149,7 +149,7 @@ func GetError(resp *http.Response, err error) *Error { return nil } - if err == nil && resp != nil && isSuccessHTTPResponse(resp) { + if err == nil && resp != nil && IsSuccessHTTPResponse(resp) { // HTTP 2xx suggests a successful response return nil } @@ -166,8 +166,8 @@ func GetError(resp *http.Response, err error) *Error { } } -// isSuccessHTTPResponse determines if the response from an HTTP request suggests success -func isSuccessHTTPResponse(resp *http.Response) bool { +// IsSuccessHTTPResponse determines if the response from an HTTP request suggests success +func IsSuccessHTTPResponse(resp *http.Response) bool { if resp == nil { return false } @@ -219,7 +219,7 @@ func shouldRetryHTTPRequest(resp *http.Response, err error) bool { } // should retry on <200, error>. - if isSuccessHTTPResponse(resp) && err != nil { + if IsSuccessHTTPResponse(resp) && err != nil { return true } diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/util/sets/string.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/util/sets/string.go new file mode 100644 index 000000000000..2562fcf6ea69 --- /dev/null +++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/util/sets/string.go @@ -0,0 +1,100 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sets + +import ( + "strings" + + "k8s.io/apimachinery/pkg/util/sets" +) + +// IgnoreCaseSet is a set of strings that is case-insensitive. +type IgnoreCaseSet struct { + set sets.Set[string] +} + +// NewString creates a new IgnoreCaseSet with the given items. +func NewString(items ...string) *IgnoreCaseSet { + var lowerItems []string + for _, item := range items { + lowerItems = append(lowerItems, strings.ToLower(item)) + } + set := sets.New[string](lowerItems...) + return &IgnoreCaseSet{set: set} +} + +// Insert adds the given items to the set. It only works if the set is initialized. +func (s *IgnoreCaseSet) Insert(items ...string) { + var lowerItems []string + for _, item := range items { + lowerItems = append(lowerItems, strings.ToLower(item)) + } + for _, item := range lowerItems { + s.set.Insert(item) + } +} + +// SafeInsert creates a new IgnoreCaseSet with the given items if the set is not initialized. +// This is the recommended way to insert elements into the set. +func SafeInsert(s *IgnoreCaseSet, items ...string) *IgnoreCaseSet { + if s.Initialized() { + s.Insert(items...) + return s + } + return NewString(items...) +} + +// Delete removes the given item from the set. +// It will be a no-op if the set is not initialized or the item is not in the set. +func (s *IgnoreCaseSet) Delete(item string) bool { + var has bool + item = strings.ToLower(item) + if s.Initialized() && s.Has(item) { + s.set.Delete(item) + has = true + } + return has +} + +// Has returns true if the given item is in the set, and the set is initialized. +func (s *IgnoreCaseSet) Has(item string) bool { + if !s.Initialized() { + return false + } + return s.set.Has(strings.ToLower(item)) +} + +// Initialized returns true if the set is initialized. +func (s *IgnoreCaseSet) Initialized() bool { + return s != nil && s.set != nil +} + +// UnsortedList returns the items in the set in an arbitrary order. +func (s *IgnoreCaseSet) UnsortedList() []string { + if !s.Initialized() { + return []string{} + } + return s.set.UnsortedList() +} + +// Len returns the number of items in the set. +func (s *IgnoreCaseSet) Len() int { + if !s.Initialized() { + return 0 + } + return s.set.Len() +} diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/util/string/string.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/util/string/string.go new file mode 100644 index 000000000000..10510ecc7900 --- /dev/null +++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/util/string/string.go @@ -0,0 +1,24 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package stringutils + +import "strings" + +// HasPrefixCaseInsensitive returns true if the string has the prefix, case-insensitive. +func HasPrefixCaseInsensitive(s string, prefix string) bool { + return strings.HasPrefix(strings.ToLower(s), strings.ToLower(prefix)) +} diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/util/vm/vm.go b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/util/vm/vm.go new file mode 100644 index 000000000000..a5084f30b944 --- /dev/null +++ b/cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/util/vm/vm.go @@ -0,0 +1,55 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vm + +import ( + "strings" + + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" + + "k8s.io/klog/v2" + "k8s.io/utils/ptr" + + "sigs.k8s.io/cloud-provider-azure/pkg/consts" + stringutils "sigs.k8s.io/cloud-provider-azure/pkg/util/string" +) + +// GetVMPowerState returns the power state of the VM +func GetVMPowerState(vmName string, vmStatuses *[]compute.InstanceViewStatus) string { + logger := klog.Background().WithName("getVMSSVMPowerState").WithValues("vmName", vmName) + if vmStatuses != nil { + for _, status := range *vmStatuses { + state := ptr.Deref(status.Code, "") + if stringutils.HasPrefixCaseInsensitive(state, consts.VMPowerStatePrefix) { + return strings.TrimPrefix(state, consts.VMPowerStatePrefix) + } + } + } + logger.V(3).Info("vm status is nil in the instance view or there is no power state in the status") + return consts.VMPowerStateUnknown +} + +// IsNotActiveVMState checks if the VM is in the active states +func IsNotActiveVMState(provisioningState, powerState string) bool { + return strings.EqualFold(provisioningState, consts.ProvisioningStateDeleting) || + strings.EqualFold(provisioningState, consts.ProvisioningStateUnknown) || + strings.EqualFold(powerState, consts.VMPowerStateUnknown) || + strings.EqualFold(powerState, consts.VMPowerStateStopped) || + strings.EqualFold(powerState, consts.VMPowerStateStopping) || + strings.EqualFold(powerState, consts.VMPowerStateDeallocated) || + strings.EqualFold(powerState, consts.VMPowerStateDeallocating) +}