diff --git a/CHANGELOG.md b/CHANGELOG.md index 5da91978eaa7..331e90f58851 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,14 @@ # CHANGELOG +## `v39.3.0` + +### New Packages + +| Package Name | API Version | +| -----------: | :---------: | +| containerservice | 2019-10-27-preview | +| synapse | 2019-06-01-preview | + ## `v39.2.0` ### New Packages diff --git a/Gopkg.lock b/Gopkg.lock index e33a2b23f376..5c4beec85736 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -2,7 +2,7 @@ [[projects]] - digest = "1:46eaa8d8d35d2004f218ad6c48b6c81e0b1c3457915f55a40831b5af3fb43b76" + digest = "1:0f5cec94ea46ab37f6b95e52a4b70761ada57b809059aba2d332ec0813416920" name = "github.com/Azure/go-autorest" packages = [ "autorest", @@ -17,8 +17,8 @@ "tracing", ] pruneopts = "UT" - revision = "20a15b4e99645bc30a1cf8de518a6e8ac331c864" - version = "v13.3.2" + revision = "327ff6ce8b902f9afe648d5ea8ff6dc26c26f150" + version = "v13.4.0" [[projects]] digest = "1:f313598719f69ec4edbcd75b0f2ef1c8ed9e00f6ff3d7583ac3966db2d70d326" @@ -86,12 +86,12 @@ version = "v0.2.0" [[projects]] - digest = "1:15b5cc79aad436d47019f814fde81a10221c740dc8ddf769221a65097fb6c2e9" + digest = "1:7218fd69ff5436d016101bbc6183cdc289aa45ac37b48e78846318e4ef389bea" name = "github.com/kr/text" packages = ["."] pruneopts = "UT" - revision = "e2ffdb16a802fe2bb95e2e35ff34f0e53aeef34f" - version = "v0.1.0" + revision = "702c74938df48b97370179f33ce2107bd7ff3b3e" + version = "v0.2.0" [[projects]] digest = "1:5d231480e1c64a726869bc4142d270184c419749d34f167646baa21008eb0a79" @@ -150,11 +150,22 @@ "pkcs12/internal/rc2", ] pruneopts = "UT" - revision = "69ecbb4d6d5dab05e49161c6e77ea40a030884e1" + revision = "1d94cc7ab1c630336ab82ccb9c9cda72a875c382" + +[[projects]] + digest = "1:467bb8fb8fa786448b8d486cd0bb7c1a5577dcd7310441aa02a20110cd9f727d" + name = "golang.org/x/mod" + packages = [ + "module", + "semver", + ] + pruneopts = "UT" + revision = "ed3ec21bb8e252814c380df79a80f366440ddb2d" + version = "v0.2.0" [[projects]] branch = "master" - digest = "1:22bcbe2280ed955f8207e503921c3999bd63de4b07daf41a5306e34e492e4f57" + digest = "1:d85ff5b2e5b485021f821f10030cdf46f723c3c84c9482835a4c9c462a7edfba" name = "golang.org/x/tools" packages = [ "go/ast/astutil", @@ -162,11 +173,20 @@ "internal/fastwalk", "internal/gopathwalk", "internal/imports", - "internal/module", - "internal/semver", ] pruneopts = "UT" - revision = "2f3ba24bd6e75104fb11be4edf062de340ffd1ab" + revision = "5fb17a1e7b9b41efa62ea3f2c593156af7609897" + +[[projects]] + branch = "master" + digest = "1:918a46e4a2fb83df33f668f5a6bd51b2996775d073fce1800d3ec01b0a5ddd2b" + name = "golang.org/x/xerrors" + packages = [ + ".", + "internal", + ] + pruneopts = "UT" + revision = "9bdfabe68543c54f90421aeb9a60ef8061b5b544" [[projects]] branch = "v1" diff --git a/Gopkg.toml b/Gopkg.toml index d6f2e6cc7899..bba7ad883b27 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -25,7 +25,7 @@ [[constraint]] name = "github.com/Azure/go-autorest" - version = "13.3.2" + version = "13.4.0" [[constraint]] branch = "master" diff --git a/profiles/preview/preview/containerservice/mgmt/containerservice/containerserviceapi/models.go b/profiles/preview/preview/containerservice/mgmt/containerservice/containerserviceapi/models.go index dcfba68016fb..7138fe5424cb 100644 --- a/profiles/preview/preview/containerservice/mgmt/containerservice/containerserviceapi/models.go +++ b/profiles/preview/preview/containerservice/mgmt/containerservice/containerserviceapi/models.go @@ -19,7 +19,7 @@ package containerserviceapi -import original "github.com/Azure/azure-sdk-for-go/services/preview/containerservice/mgmt/2019-09-30-preview/containerservice/containerserviceapi" +import original "github.com/Azure/azure-sdk-for-go/services/preview/containerservice/mgmt/2019-10-27-preview/containerservice/containerserviceapi" type AgentPoolsClientAPI = original.AgentPoolsClientAPI type ContainerServicesClientAPI = original.ContainerServicesClientAPI diff --git a/profiles/preview/preview/containerservice/mgmt/containerservice/models.go b/profiles/preview/preview/containerservice/mgmt/containerservice/models.go index 5a69cee93d8c..659d0b240828 100644 --- a/profiles/preview/preview/containerservice/mgmt/containerservice/models.go +++ b/profiles/preview/preview/containerservice/mgmt/containerservice/models.go @@ -22,7 +22,7 @@ package containerservice import ( "context" - original "github.com/Azure/azure-sdk-for-go/services/preview/containerservice/mgmt/2019-09-30-preview/containerservice" + original "github.com/Azure/azure-sdk-for-go/services/preview/containerservice/mgmt/2019-10-27-preview/containerservice" ) const ( @@ -126,6 +126,13 @@ const ( Swarm OrchestratorTypes = original.Swarm ) +type OutboundType = original.OutboundType + +const ( + LoadBalancer OutboundType = original.LoadBalancer + UserDefinedRouting OutboundType = original.UserDefinedRouting +) + type ResourceIdentityType = original.ResourceIdentityType const ( @@ -370,6 +377,7 @@ type ManagedClusterAADProfile = original.ManagedClusterAADProfile type ManagedClusterAPIServerAccessProfile = original.ManagedClusterAPIServerAccessProfile type ManagedClusterAccessProfile = original.ManagedClusterAccessProfile type ManagedClusterAddonProfile = original.ManagedClusterAddonProfile +type ManagedClusterAddonProfileIdentity = original.ManagedClusterAddonProfileIdentity type ManagedClusterAgentPoolProfile = original.ManagedClusterAgentPoolProfile type ManagedClusterAgentPoolProfileProperties = original.ManagedClusterAgentPoolProfileProperties type ManagedClusterIdentity = original.ManagedClusterIdentity @@ -383,6 +391,7 @@ type ManagedClusterLoadBalancerProfileOutboundIPs = original.ManagedClusterLoadB type ManagedClusterPoolUpgradeProfile = original.ManagedClusterPoolUpgradeProfile type ManagedClusterPoolUpgradeProfileUpgradesItem = original.ManagedClusterPoolUpgradeProfileUpgradesItem type ManagedClusterProperties = original.ManagedClusterProperties +type ManagedClusterPropertiesIdentityProfileValue = original.ManagedClusterPropertiesIdentityProfileValue type ManagedClusterServicePrincipalProfile = original.ManagedClusterServicePrincipalProfile type ManagedClusterUpgradeProfile = original.ManagedClusterUpgradeProfile type ManagedClusterUpgradeProfileProperties = original.ManagedClusterUpgradeProfileProperties @@ -397,6 +406,7 @@ type ManagedClustersUpdateTagsFuture = original.ManagedClustersUpdateTagsFuture type MasterProfile = original.MasterProfile type NetworkProfile = original.NetworkProfile type NetworkProfileType = original.NetworkProfileType +type OpenShiftAPIProperties = original.OpenShiftAPIProperties type OpenShiftManagedCluster = original.OpenShiftManagedCluster type OpenShiftManagedClusterAADIdentityProvider = original.OpenShiftManagedClusterAADIdentityProvider type OpenShiftManagedClusterAgentPoolProfile = original.OpenShiftManagedClusterAgentPoolProfile @@ -432,6 +442,7 @@ type SSHPublicKey = original.SSHPublicKey type ServicePrincipalProfile = original.ServicePrincipalProfile type SubResource = original.SubResource type TagsObject = original.TagsObject +type UserAssignedIdentity = original.UserAssignedIdentity type VMDiagnostics = original.VMDiagnostics type WindowsProfile = original.WindowsProfile @@ -522,6 +533,9 @@ func PossibleOpenShiftContainerServiceVMSizeValues() []OpenShiftContainerService func PossibleOrchestratorTypesValues() []OrchestratorTypes { return original.PossibleOrchestratorTypesValues() } +func PossibleOutboundTypeValues() []OutboundType { + return original.PossibleOutboundTypeValues() +} func PossibleResourceIdentityTypeValues() []ResourceIdentityType { return original.PossibleResourceIdentityTypeValues() } diff --git a/profiles/preview/preview/synapse/mgmt/synapse/models.go b/profiles/preview/preview/synapse/mgmt/synapse/models.go new file mode 100644 index 000000000000..0422d26066d5 --- /dev/null +++ b/profiles/preview/preview/synapse/mgmt/synapse/models.go @@ -0,0 +1,753 @@ +// +build go1.9 + +// Copyright 2020 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This code was auto-generated by: +// github.com/Azure/azure-sdk-for-go/tools/profileBuilder + +package synapse + +import ( + "context" + + original "github.com/Azure/azure-sdk-for-go/services/preview/synapse/mgmt/2019-06-01-preview/synapse" +) + +const ( + DefaultBaseURI = original.DefaultBaseURI +) + +type ActualState = original.ActualState + +const ( + Disabled ActualState = original.Disabled + Disabling ActualState = original.Disabling + Enabled ActualState = original.Enabled + Enabling ActualState = original.Enabling + Unknown ActualState = original.Unknown +) + +type BlobAuditingPolicyState = original.BlobAuditingPolicyState + +const ( + BlobAuditingPolicyStateDisabled BlobAuditingPolicyState = original.BlobAuditingPolicyStateDisabled + BlobAuditingPolicyStateEnabled BlobAuditingPolicyState = original.BlobAuditingPolicyStateEnabled +) + +type ColumnDataType = original.ColumnDataType + +const ( + Bigint ColumnDataType = original.Bigint + Binary ColumnDataType = original.Binary + Bit ColumnDataType = original.Bit + Char ColumnDataType = original.Char + Date ColumnDataType = original.Date + Datetime ColumnDataType = original.Datetime + Datetime2 ColumnDataType = original.Datetime2 + Datetimeoffset ColumnDataType = original.Datetimeoffset + Decimal ColumnDataType = original.Decimal + Float ColumnDataType = original.Float + Geography ColumnDataType = original.Geography + Geometry ColumnDataType = original.Geometry + Hierarchyid ColumnDataType = original.Hierarchyid + Image ColumnDataType = original.Image + Int ColumnDataType = original.Int + Money ColumnDataType = original.Money + Nchar ColumnDataType = original.Nchar + Ntext ColumnDataType = original.Ntext + Numeric ColumnDataType = original.Numeric + Nvarchar ColumnDataType = original.Nvarchar + Real ColumnDataType = original.Real + Smalldatetime ColumnDataType = original.Smalldatetime + Smallint ColumnDataType = original.Smallint + Smallmoney ColumnDataType = original.Smallmoney + SQLVariant ColumnDataType = original.SQLVariant + Sysname ColumnDataType = original.Sysname + Text ColumnDataType = original.Text + Time ColumnDataType = original.Time + Timestamp ColumnDataType = original.Timestamp + Tinyint ColumnDataType = original.Tinyint + Uniqueidentifier ColumnDataType = original.Uniqueidentifier + Varbinary ColumnDataType = original.Varbinary + Varchar ColumnDataType = original.Varchar + XML ColumnDataType = original.XML +) + +type DesiredState = original.DesiredState + +const ( + DesiredStateDisabled DesiredState = original.DesiredStateDisabled + DesiredStateEnabled DesiredState = original.DesiredStateEnabled +) + +type GeoBackupPolicyState = original.GeoBackupPolicyState + +const ( + GeoBackupPolicyStateDisabled GeoBackupPolicyState = original.GeoBackupPolicyStateDisabled + GeoBackupPolicyStateEnabled GeoBackupPolicyState = original.GeoBackupPolicyStateEnabled +) + +type ManagementOperationState = original.ManagementOperationState + +const ( + CancelInProgress ManagementOperationState = original.CancelInProgress + Cancelled ManagementOperationState = original.Cancelled + Failed ManagementOperationState = original.Failed + InProgress ManagementOperationState = original.InProgress + Pending ManagementOperationState = original.Pending + Succeeded ManagementOperationState = original.Succeeded +) + +type NodeSize = original.NodeSize + +const ( + Large NodeSize = original.Large + Medium NodeSize = original.Medium + None NodeSize = original.None + Small NodeSize = original.Small +) + +type NodeSizeFamily = original.NodeSizeFamily + +const ( + NodeSizeFamilyMemoryOptimized NodeSizeFamily = original.NodeSizeFamilyMemoryOptimized + NodeSizeFamilyNone NodeSizeFamily = original.NodeSizeFamilyNone +) + +type OperationStatus = original.OperationStatus + +const ( + OperationStatusCanceled OperationStatus = original.OperationStatusCanceled + OperationStatusFailed OperationStatus = original.OperationStatusFailed + OperationStatusInProgress OperationStatus = original.OperationStatusInProgress + OperationStatusSucceeded OperationStatus = original.OperationStatusSucceeded +) + +type ProvisioningState = original.ProvisioningState + +const ( + ProvisioningStateDeleteError ProvisioningState = original.ProvisioningStateDeleteError + ProvisioningStateDeleting ProvisioningState = original.ProvisioningStateDeleting + ProvisioningStateFailed ProvisioningState = original.ProvisioningStateFailed + ProvisioningStateProvisioning ProvisioningState = original.ProvisioningStateProvisioning + ProvisioningStateSucceeded ProvisioningState = original.ProvisioningStateSucceeded +) + +type QueryAggregationFunction = original.QueryAggregationFunction + +const ( + Avg QueryAggregationFunction = original.Avg + Max QueryAggregationFunction = original.Max + Min QueryAggregationFunction = original.Min + Sum QueryAggregationFunction = original.Sum +) + +type QueryExecutionType = original.QueryExecutionType + +const ( + Aborted QueryExecutionType = original.Aborted + Any QueryExecutionType = original.Any + Exception QueryExecutionType = original.Exception + Irregular QueryExecutionType = original.Irregular + Regular QueryExecutionType = original.Regular +) + +type QueryMetricUnit = original.QueryMetricUnit + +const ( + KB QueryMetricUnit = original.KB + Microseconds QueryMetricUnit = original.Microseconds + Percentage QueryMetricUnit = original.Percentage +) + +type QueryObservedMetricType = original.QueryObservedMetricType + +const ( + CPU QueryObservedMetricType = original.CPU + Duration QueryObservedMetricType = original.Duration + ExecutionCount QueryObservedMetricType = original.ExecutionCount + Io QueryObservedMetricType = original.Io + Logio QueryObservedMetricType = original.Logio +) + +type ReplicationRole = original.ReplicationRole + +const ( + Copy ReplicationRole = original.Copy + NonReadableSecondary ReplicationRole = original.NonReadableSecondary + Primary ReplicationRole = original.Primary + Secondary ReplicationRole = original.Secondary + Source ReplicationRole = original.Source +) + +type ReplicationState = original.ReplicationState + +const ( + CATCHUP ReplicationState = original.CATCHUP + PENDING ReplicationState = original.PENDING + SEEDING ReplicationState = original.SEEDING + SUSPENDED ReplicationState = original.SUSPENDED +) + +type ResourceIdentityType = original.ResourceIdentityType + +const ( + ResourceIdentityTypeNone ResourceIdentityType = original.ResourceIdentityTypeNone + ResourceIdentityTypeSystemAssigned ResourceIdentityType = original.ResourceIdentityTypeSystemAssigned +) + +type RestorePointType = original.RestorePointType + +const ( + CONTINUOUS RestorePointType = original.CONTINUOUS + DISCRETE RestorePointType = original.DISCRETE +) + +type SecurityAlertPolicyState = original.SecurityAlertPolicyState + +const ( + SecurityAlertPolicyStateDisabled SecurityAlertPolicyState = original.SecurityAlertPolicyStateDisabled + SecurityAlertPolicyStateEnabled SecurityAlertPolicyState = original.SecurityAlertPolicyStateEnabled + SecurityAlertPolicyStateNew SecurityAlertPolicyState = original.SecurityAlertPolicyStateNew +) + +type TransparentDataEncryptionStatus = original.TransparentDataEncryptionStatus + +const ( + TransparentDataEncryptionStatusDisabled TransparentDataEncryptionStatus = original.TransparentDataEncryptionStatusDisabled + TransparentDataEncryptionStatusEnabled TransparentDataEncryptionStatus = original.TransparentDataEncryptionStatusEnabled +) + +type VulnerabilityAssessmentPolicyBaselineName = original.VulnerabilityAssessmentPolicyBaselineName + +const ( + Default VulnerabilityAssessmentPolicyBaselineName = original.Default + Master VulnerabilityAssessmentPolicyBaselineName = original.Master +) + +type VulnerabilityAssessmentScanState = original.VulnerabilityAssessmentScanState + +const ( + VulnerabilityAssessmentScanStateFailed VulnerabilityAssessmentScanState = original.VulnerabilityAssessmentScanStateFailed + VulnerabilityAssessmentScanStateFailedToRun VulnerabilityAssessmentScanState = original.VulnerabilityAssessmentScanStateFailedToRun + VulnerabilityAssessmentScanStateInProgress VulnerabilityAssessmentScanState = original.VulnerabilityAssessmentScanStateInProgress + VulnerabilityAssessmentScanStatePassed VulnerabilityAssessmentScanState = original.VulnerabilityAssessmentScanStatePassed +) + +type VulnerabilityAssessmentScanTriggerType = original.VulnerabilityAssessmentScanTriggerType + +const ( + OnDemand VulnerabilityAssessmentScanTriggerType = original.OnDemand + Recurring VulnerabilityAssessmentScanTriggerType = original.Recurring +) + +type AadAdminProperties = original.AadAdminProperties +type AutoPauseProperties = original.AutoPauseProperties +type AutoScaleProperties = original.AutoScaleProperties +type AvailableRpOperation = original.AvailableRpOperation +type AvailableRpOperationDisplayInfo = original.AvailableRpOperationDisplayInfo +type AzureEntityResource = original.AzureEntityResource +type BaseClient = original.BaseClient +type BigDataPoolPatchInfo = original.BigDataPoolPatchInfo +type BigDataPoolResourceInfo = original.BigDataPoolResourceInfo +type BigDataPoolResourceInfoListResult = original.BigDataPoolResourceInfoListResult +type BigDataPoolResourceInfoListResultIterator = original.BigDataPoolResourceInfoListResultIterator +type BigDataPoolResourceInfoListResultPage = original.BigDataPoolResourceInfoListResultPage +type BigDataPoolResourceProperties = original.BigDataPoolResourceProperties +type BigDataPoolsClient = original.BigDataPoolsClient +type BigDataPoolsCreateOrUpdateFuture = original.BigDataPoolsCreateOrUpdateFuture +type BigDataPoolsDeleteFuture = original.BigDataPoolsDeleteFuture +type CheckNameAvailabilityRequest = original.CheckNameAvailabilityRequest +type CheckNameAvailabilityResponse = original.CheckNameAvailabilityResponse +type CreateSQLPoolRestorePointDefinition = original.CreateSQLPoolRestorePointDefinition +type DataLakeStorageAccountDetails = original.DataLakeStorageAccountDetails +type DataWarehouseUserActivities = original.DataWarehouseUserActivities +type DataWarehouseUserActivitiesProperties = original.DataWarehouseUserActivitiesProperties +type ErrorAdditionalInfo = original.ErrorAdditionalInfo +type ErrorContract = original.ErrorContract +type ErrorDetail = original.ErrorDetail +type ErrorResponse = original.ErrorResponse +type GeoBackupPolicy = original.GeoBackupPolicy +type GeoBackupPolicyProperties = original.GeoBackupPolicyProperties +type IPFirewallRuleInfo = original.IPFirewallRuleInfo +type IPFirewallRuleInfoListResult = original.IPFirewallRuleInfoListResult +type IPFirewallRuleInfoListResultIterator = original.IPFirewallRuleInfoListResultIterator +type IPFirewallRuleInfoListResultPage = original.IPFirewallRuleInfoListResultPage +type IPFirewallRuleProperties = original.IPFirewallRuleProperties +type IPFirewallRulesClient = original.IPFirewallRulesClient +type IPFirewallRulesCreateOrUpdateFuture = original.IPFirewallRulesCreateOrUpdateFuture +type IPFirewallRulesDeleteFuture = original.IPFirewallRulesDeleteFuture +type IPFirewallRulesReplaceAllFuture = original.IPFirewallRulesReplaceAllFuture +type LibraryRequirements = original.LibraryRequirements +type ListAvailableRpOperation = original.ListAvailableRpOperation +type ManagedIdentity = original.ManagedIdentity +type ManagedIdentitySQLControlSettingsModel = original.ManagedIdentitySQLControlSettingsModel +type ManagedIdentitySQLControlSettingsModelProperties = original.ManagedIdentitySQLControlSettingsModelProperties +type ManagedIdentitySQLControlSettingsModelPropertiesGrantSQLControlToManagedIdentity = original.ManagedIdentitySQLControlSettingsModelPropertiesGrantSQLControlToManagedIdentity +type MetadataSyncConfig = original.MetadataSyncConfig +type MetadataSyncConfigProperties = original.MetadataSyncConfigProperties +type OperationMetaLogSpecification = original.OperationMetaLogSpecification +type OperationMetaMetricDimensionSpecification = original.OperationMetaMetricDimensionSpecification +type OperationMetaMetricSpecification = original.OperationMetaMetricSpecification +type OperationMetaPropertyInfo = original.OperationMetaPropertyInfo +type OperationMetaServiceSpecification = original.OperationMetaServiceSpecification +type OperationResource = original.OperationResource +type OperationsClient = original.OperationsClient +type ProxyResource = original.ProxyResource +type QueryInterval = original.QueryInterval +type QueryMetric = original.QueryMetric +type QueryStatistic = original.QueryStatistic +type ReplaceAllFirewallRulesOperationResponse = original.ReplaceAllFirewallRulesOperationResponse +type ReplaceAllIPFirewallRulesRequest = original.ReplaceAllIPFirewallRulesRequest +type ReplicationLink = original.ReplicationLink +type ReplicationLinkListResult = original.ReplicationLinkListResult +type ReplicationLinkListResultIterator = original.ReplicationLinkListResultIterator +type ReplicationLinkListResultPage = original.ReplicationLinkListResultPage +type ReplicationLinkProperties = original.ReplicationLinkProperties +type Resource = original.Resource +type ResourceMoveDefinition = original.ResourceMoveDefinition +type RestorePoint = original.RestorePoint +type RestorePointListResult = original.RestorePointListResult +type RestorePointListResultIterator = original.RestorePointListResultIterator +type RestorePointListResultPage = original.RestorePointListResultPage +type RestorePointProperties = original.RestorePointProperties +type SQLPool = original.SQLPool +type SQLPoolBlobAuditingPoliciesClient = original.SQLPoolBlobAuditingPoliciesClient +type SQLPoolBlobAuditingPolicy = original.SQLPoolBlobAuditingPolicy +type SQLPoolBlobAuditingPolicyProperties = original.SQLPoolBlobAuditingPolicyProperties +type SQLPoolBlobAuditingPolicySQLPoolOperationListResult = original.SQLPoolBlobAuditingPolicySQLPoolOperationListResult +type SQLPoolBlobAuditingPolicySQLPoolOperationListResultIterator = original.SQLPoolBlobAuditingPolicySQLPoolOperationListResultIterator +type SQLPoolBlobAuditingPolicySQLPoolOperationListResultPage = original.SQLPoolBlobAuditingPolicySQLPoolOperationListResultPage +type SQLPoolColumn = original.SQLPoolColumn +type SQLPoolColumnListResult = original.SQLPoolColumnListResult +type SQLPoolColumnListResultIterator = original.SQLPoolColumnListResultIterator +type SQLPoolColumnListResultPage = original.SQLPoolColumnListResultPage +type SQLPoolColumnProperties = original.SQLPoolColumnProperties +type SQLPoolConnectionPoliciesClient = original.SQLPoolConnectionPoliciesClient +type SQLPoolConnectionPolicy = original.SQLPoolConnectionPolicy +type SQLPoolConnectionPolicyProperties = original.SQLPoolConnectionPolicyProperties +type SQLPoolDataWarehouseUserActivitiesClient = original.SQLPoolDataWarehouseUserActivitiesClient +type SQLPoolGeoBackupPoliciesClient = original.SQLPoolGeoBackupPoliciesClient +type SQLPoolInfoListResult = original.SQLPoolInfoListResult +type SQLPoolInfoListResultIterator = original.SQLPoolInfoListResultIterator +type SQLPoolInfoListResultPage = original.SQLPoolInfoListResultPage +type SQLPoolMetadataSyncConfigsClient = original.SQLPoolMetadataSyncConfigsClient +type SQLPoolOperation = original.SQLPoolOperation +type SQLPoolOperationProperties = original.SQLPoolOperationProperties +type SQLPoolOperationResultsClient = original.SQLPoolOperationResultsClient +type SQLPoolOperationsClient = original.SQLPoolOperationsClient +type SQLPoolPatchInfo = original.SQLPoolPatchInfo +type SQLPoolReplicationLinksClient = original.SQLPoolReplicationLinksClient +type SQLPoolResourceProperties = original.SQLPoolResourceProperties +type SQLPoolRestorePointsClient = original.SQLPoolRestorePointsClient +type SQLPoolRestorePointsCreateFuture = original.SQLPoolRestorePointsCreateFuture +type SQLPoolSchema = original.SQLPoolSchema +type SQLPoolSchemaListResult = original.SQLPoolSchemaListResult +type SQLPoolSchemaListResultIterator = original.SQLPoolSchemaListResultIterator +type SQLPoolSchemaListResultPage = original.SQLPoolSchemaListResultPage +type SQLPoolSchemasClient = original.SQLPoolSchemasClient +type SQLPoolSecurityAlertPoliciesClient = original.SQLPoolSecurityAlertPoliciesClient +type SQLPoolSecurityAlertPolicy = original.SQLPoolSecurityAlertPolicy +type SQLPoolSensitivityLabelsClient = original.SQLPoolSensitivityLabelsClient +type SQLPoolTable = original.SQLPoolTable +type SQLPoolTableColumnsClient = original.SQLPoolTableColumnsClient +type SQLPoolTableListResult = original.SQLPoolTableListResult +type SQLPoolTableListResultIterator = original.SQLPoolTableListResultIterator +type SQLPoolTableListResultPage = original.SQLPoolTableListResultPage +type SQLPoolTablesClient = original.SQLPoolTablesClient +type SQLPoolTransparentDataEncryptionsClient = original.SQLPoolTransparentDataEncryptionsClient +type SQLPoolUsage = original.SQLPoolUsage +type SQLPoolUsageListResult = original.SQLPoolUsageListResult +type SQLPoolUsageListResultIterator = original.SQLPoolUsageListResultIterator +type SQLPoolUsageListResultPage = original.SQLPoolUsageListResultPage +type SQLPoolUsagesClient = original.SQLPoolUsagesClient +type SQLPoolVulnerabilityAssessment = original.SQLPoolVulnerabilityAssessment +type SQLPoolVulnerabilityAssessmentListResult = original.SQLPoolVulnerabilityAssessmentListResult +type SQLPoolVulnerabilityAssessmentListResultIterator = original.SQLPoolVulnerabilityAssessmentListResultIterator +type SQLPoolVulnerabilityAssessmentListResultPage = original.SQLPoolVulnerabilityAssessmentListResultPage +type SQLPoolVulnerabilityAssessmentProperties = original.SQLPoolVulnerabilityAssessmentProperties +type SQLPoolVulnerabilityAssessmentRuleBaseline = original.SQLPoolVulnerabilityAssessmentRuleBaseline +type SQLPoolVulnerabilityAssessmentRuleBaselineItem = original.SQLPoolVulnerabilityAssessmentRuleBaselineItem +type SQLPoolVulnerabilityAssessmentRuleBaselineProperties = original.SQLPoolVulnerabilityAssessmentRuleBaselineProperties +type SQLPoolVulnerabilityAssessmentRuleBaselinesClient = original.SQLPoolVulnerabilityAssessmentRuleBaselinesClient +type SQLPoolVulnerabilityAssessmentScanExportProperties = original.SQLPoolVulnerabilityAssessmentScanExportProperties +type SQLPoolVulnerabilityAssessmentScansClient = original.SQLPoolVulnerabilityAssessmentScansClient +type SQLPoolVulnerabilityAssessmentScansExport = original.SQLPoolVulnerabilityAssessmentScansExport +type SQLPoolVulnerabilityAssessmentScansInitiateScanFuture = original.SQLPoolVulnerabilityAssessmentScansInitiateScanFuture +type SQLPoolVulnerabilityAssessmentsClient = original.SQLPoolVulnerabilityAssessmentsClient +type SQLPoolsClient = original.SQLPoolsClient +type SQLPoolsCreateFuture = original.SQLPoolsCreateFuture +type SQLPoolsDeleteFuture = original.SQLPoolsDeleteFuture +type SQLPoolsPauseFuture = original.SQLPoolsPauseFuture +type SQLPoolsResumeFuture = original.SQLPoolsResumeFuture +type SecurityAlertPolicyProperties = original.SecurityAlertPolicyProperties +type SensitivityLabel = original.SensitivityLabel +type SensitivityLabelListResult = original.SensitivityLabelListResult +type SensitivityLabelListResultIterator = original.SensitivityLabelListResultIterator +type SensitivityLabelListResultPage = original.SensitivityLabelListResultPage +type SensitivityLabelProperties = original.SensitivityLabelProperties +type SetObject = original.SetObject +type Sku = original.Sku +type TopQueries = original.TopQueries +type TopQueriesListResult = original.TopQueriesListResult +type TrackedResource = original.TrackedResource +type TransparentDataEncryption = original.TransparentDataEncryption +type TransparentDataEncryptionProperties = original.TransparentDataEncryptionProperties +type VirtualNetworkProfile = original.VirtualNetworkProfile +type VulnerabilityAssessmentRecurringScansProperties = original.VulnerabilityAssessmentRecurringScansProperties +type VulnerabilityAssessmentScanError = original.VulnerabilityAssessmentScanError +type VulnerabilityAssessmentScanRecord = original.VulnerabilityAssessmentScanRecord +type VulnerabilityAssessmentScanRecordListResult = original.VulnerabilityAssessmentScanRecordListResult +type VulnerabilityAssessmentScanRecordListResultIterator = original.VulnerabilityAssessmentScanRecordListResultIterator +type VulnerabilityAssessmentScanRecordListResultPage = original.VulnerabilityAssessmentScanRecordListResultPage +type VulnerabilityAssessmentScanRecordProperties = original.VulnerabilityAssessmentScanRecordProperties +type Workspace = original.Workspace +type WorkspaceAadAdminInfo = original.WorkspaceAadAdminInfo +type WorkspaceAadAdminsClient = original.WorkspaceAadAdminsClient +type WorkspaceAadAdminsCreateOrUpdateFuture = original.WorkspaceAadAdminsCreateOrUpdateFuture +type WorkspaceAadAdminsDeleteFuture = original.WorkspaceAadAdminsDeleteFuture +type WorkspaceInfoListResult = original.WorkspaceInfoListResult +type WorkspaceInfoListResultIterator = original.WorkspaceInfoListResultIterator +type WorkspaceInfoListResultPage = original.WorkspaceInfoListResultPage +type WorkspaceManagedIdentitySQLControlSettingsClient = original.WorkspaceManagedIdentitySQLControlSettingsClient +type WorkspacePatchInfo = original.WorkspacePatchInfo +type WorkspacePatchProperties = original.WorkspacePatchProperties +type WorkspaceProperties = original.WorkspaceProperties +type WorkspacesClient = original.WorkspacesClient +type WorkspacesCreateOrUpdateFuture = original.WorkspacesCreateOrUpdateFuture +type WorkspacesDeleteFuture = original.WorkspacesDeleteFuture +type WorkspacesUpdateFuture = original.WorkspacesUpdateFuture + +func New(subscriptionID string) BaseClient { + return original.New(subscriptionID) +} +func NewBigDataPoolResourceInfoListResultIterator(page BigDataPoolResourceInfoListResultPage) BigDataPoolResourceInfoListResultIterator { + return original.NewBigDataPoolResourceInfoListResultIterator(page) +} +func NewBigDataPoolResourceInfoListResultPage(getNextPage func(context.Context, BigDataPoolResourceInfoListResult) (BigDataPoolResourceInfoListResult, error)) BigDataPoolResourceInfoListResultPage { + return original.NewBigDataPoolResourceInfoListResultPage(getNextPage) +} +func NewBigDataPoolsClient(subscriptionID string) BigDataPoolsClient { + return original.NewBigDataPoolsClient(subscriptionID) +} +func NewBigDataPoolsClientWithBaseURI(baseURI string, subscriptionID string) BigDataPoolsClient { + return original.NewBigDataPoolsClientWithBaseURI(baseURI, subscriptionID) +} +func NewIPFirewallRuleInfoListResultIterator(page IPFirewallRuleInfoListResultPage) IPFirewallRuleInfoListResultIterator { + return original.NewIPFirewallRuleInfoListResultIterator(page) +} +func NewIPFirewallRuleInfoListResultPage(getNextPage func(context.Context, IPFirewallRuleInfoListResult) (IPFirewallRuleInfoListResult, error)) IPFirewallRuleInfoListResultPage { + return original.NewIPFirewallRuleInfoListResultPage(getNextPage) +} +func NewIPFirewallRulesClient(subscriptionID string) IPFirewallRulesClient { + return original.NewIPFirewallRulesClient(subscriptionID) +} +func NewIPFirewallRulesClientWithBaseURI(baseURI string, subscriptionID string) IPFirewallRulesClient { + return original.NewIPFirewallRulesClientWithBaseURI(baseURI, subscriptionID) +} +func NewOperationsClient(subscriptionID string) OperationsClient { + return original.NewOperationsClient(subscriptionID) +} +func NewOperationsClientWithBaseURI(baseURI string, subscriptionID string) OperationsClient { + return original.NewOperationsClientWithBaseURI(baseURI, subscriptionID) +} +func NewReplicationLinkListResultIterator(page ReplicationLinkListResultPage) ReplicationLinkListResultIterator { + return original.NewReplicationLinkListResultIterator(page) +} +func NewReplicationLinkListResultPage(getNextPage func(context.Context, ReplicationLinkListResult) (ReplicationLinkListResult, error)) ReplicationLinkListResultPage { + return original.NewReplicationLinkListResultPage(getNextPage) +} +func NewRestorePointListResultIterator(page RestorePointListResultPage) RestorePointListResultIterator { + return original.NewRestorePointListResultIterator(page) +} +func NewRestorePointListResultPage(getNextPage func(context.Context, RestorePointListResult) (RestorePointListResult, error)) RestorePointListResultPage { + return original.NewRestorePointListResultPage(getNextPage) +} +func NewSQLPoolBlobAuditingPoliciesClient(subscriptionID string) SQLPoolBlobAuditingPoliciesClient { + return original.NewSQLPoolBlobAuditingPoliciesClient(subscriptionID) +} +func NewSQLPoolBlobAuditingPoliciesClientWithBaseURI(baseURI string, subscriptionID string) SQLPoolBlobAuditingPoliciesClient { + return original.NewSQLPoolBlobAuditingPoliciesClientWithBaseURI(baseURI, subscriptionID) +} +func NewSQLPoolBlobAuditingPolicySQLPoolOperationListResultIterator(page SQLPoolBlobAuditingPolicySQLPoolOperationListResultPage) SQLPoolBlobAuditingPolicySQLPoolOperationListResultIterator { + return original.NewSQLPoolBlobAuditingPolicySQLPoolOperationListResultIterator(page) +} +func NewSQLPoolBlobAuditingPolicySQLPoolOperationListResultPage(getNextPage func(context.Context, SQLPoolBlobAuditingPolicySQLPoolOperationListResult) (SQLPoolBlobAuditingPolicySQLPoolOperationListResult, error)) SQLPoolBlobAuditingPolicySQLPoolOperationListResultPage { + return original.NewSQLPoolBlobAuditingPolicySQLPoolOperationListResultPage(getNextPage) +} +func NewSQLPoolColumnListResultIterator(page SQLPoolColumnListResultPage) SQLPoolColumnListResultIterator { + return original.NewSQLPoolColumnListResultIterator(page) +} +func NewSQLPoolColumnListResultPage(getNextPage func(context.Context, SQLPoolColumnListResult) (SQLPoolColumnListResult, error)) SQLPoolColumnListResultPage { + return original.NewSQLPoolColumnListResultPage(getNextPage) +} +func NewSQLPoolConnectionPoliciesClient(subscriptionID string) SQLPoolConnectionPoliciesClient { + return original.NewSQLPoolConnectionPoliciesClient(subscriptionID) +} +func NewSQLPoolConnectionPoliciesClientWithBaseURI(baseURI string, subscriptionID string) SQLPoolConnectionPoliciesClient { + return original.NewSQLPoolConnectionPoliciesClientWithBaseURI(baseURI, subscriptionID) +} +func NewSQLPoolDataWarehouseUserActivitiesClient(subscriptionID string) SQLPoolDataWarehouseUserActivitiesClient { + return original.NewSQLPoolDataWarehouseUserActivitiesClient(subscriptionID) +} +func NewSQLPoolDataWarehouseUserActivitiesClientWithBaseURI(baseURI string, subscriptionID string) SQLPoolDataWarehouseUserActivitiesClient { + return original.NewSQLPoolDataWarehouseUserActivitiesClientWithBaseURI(baseURI, subscriptionID) +} +func NewSQLPoolGeoBackupPoliciesClient(subscriptionID string) SQLPoolGeoBackupPoliciesClient { + return original.NewSQLPoolGeoBackupPoliciesClient(subscriptionID) +} +func NewSQLPoolGeoBackupPoliciesClientWithBaseURI(baseURI string, subscriptionID string) SQLPoolGeoBackupPoliciesClient { + return original.NewSQLPoolGeoBackupPoliciesClientWithBaseURI(baseURI, subscriptionID) +} +func NewSQLPoolInfoListResultIterator(page SQLPoolInfoListResultPage) SQLPoolInfoListResultIterator { + return original.NewSQLPoolInfoListResultIterator(page) +} +func NewSQLPoolInfoListResultPage(getNextPage func(context.Context, SQLPoolInfoListResult) (SQLPoolInfoListResult, error)) SQLPoolInfoListResultPage { + return original.NewSQLPoolInfoListResultPage(getNextPage) +} +func NewSQLPoolMetadataSyncConfigsClient(subscriptionID string) SQLPoolMetadataSyncConfigsClient { + return original.NewSQLPoolMetadataSyncConfigsClient(subscriptionID) +} +func NewSQLPoolMetadataSyncConfigsClientWithBaseURI(baseURI string, subscriptionID string) SQLPoolMetadataSyncConfigsClient { + return original.NewSQLPoolMetadataSyncConfigsClientWithBaseURI(baseURI, subscriptionID) +} +func NewSQLPoolOperationResultsClient(subscriptionID string) SQLPoolOperationResultsClient { + return original.NewSQLPoolOperationResultsClient(subscriptionID) +} +func NewSQLPoolOperationResultsClientWithBaseURI(baseURI string, subscriptionID string) SQLPoolOperationResultsClient { + return original.NewSQLPoolOperationResultsClientWithBaseURI(baseURI, subscriptionID) +} +func NewSQLPoolOperationsClient(subscriptionID string) SQLPoolOperationsClient { + return original.NewSQLPoolOperationsClient(subscriptionID) +} +func NewSQLPoolOperationsClientWithBaseURI(baseURI string, subscriptionID string) SQLPoolOperationsClient { + return original.NewSQLPoolOperationsClientWithBaseURI(baseURI, subscriptionID) +} +func NewSQLPoolReplicationLinksClient(subscriptionID string) SQLPoolReplicationLinksClient { + return original.NewSQLPoolReplicationLinksClient(subscriptionID) +} +func NewSQLPoolReplicationLinksClientWithBaseURI(baseURI string, subscriptionID string) SQLPoolReplicationLinksClient { + return original.NewSQLPoolReplicationLinksClientWithBaseURI(baseURI, subscriptionID) +} +func NewSQLPoolRestorePointsClient(subscriptionID string) SQLPoolRestorePointsClient { + return original.NewSQLPoolRestorePointsClient(subscriptionID) +} +func NewSQLPoolRestorePointsClientWithBaseURI(baseURI string, subscriptionID string) SQLPoolRestorePointsClient { + return original.NewSQLPoolRestorePointsClientWithBaseURI(baseURI, subscriptionID) +} +func NewSQLPoolSchemaListResultIterator(page SQLPoolSchemaListResultPage) SQLPoolSchemaListResultIterator { + return original.NewSQLPoolSchemaListResultIterator(page) +} +func NewSQLPoolSchemaListResultPage(getNextPage func(context.Context, SQLPoolSchemaListResult) (SQLPoolSchemaListResult, error)) SQLPoolSchemaListResultPage { + return original.NewSQLPoolSchemaListResultPage(getNextPage) +} +func NewSQLPoolSchemasClient(subscriptionID string) SQLPoolSchemasClient { + return original.NewSQLPoolSchemasClient(subscriptionID) +} +func NewSQLPoolSchemasClientWithBaseURI(baseURI string, subscriptionID string) SQLPoolSchemasClient { + return original.NewSQLPoolSchemasClientWithBaseURI(baseURI, subscriptionID) +} +func NewSQLPoolSecurityAlertPoliciesClient(subscriptionID string) SQLPoolSecurityAlertPoliciesClient { + return original.NewSQLPoolSecurityAlertPoliciesClient(subscriptionID) +} +func NewSQLPoolSecurityAlertPoliciesClientWithBaseURI(baseURI string, subscriptionID string) SQLPoolSecurityAlertPoliciesClient { + return original.NewSQLPoolSecurityAlertPoliciesClientWithBaseURI(baseURI, subscriptionID) +} +func NewSQLPoolSensitivityLabelsClient(subscriptionID string) SQLPoolSensitivityLabelsClient { + return original.NewSQLPoolSensitivityLabelsClient(subscriptionID) +} +func NewSQLPoolSensitivityLabelsClientWithBaseURI(baseURI string, subscriptionID string) SQLPoolSensitivityLabelsClient { + return original.NewSQLPoolSensitivityLabelsClientWithBaseURI(baseURI, subscriptionID) +} +func NewSQLPoolTableColumnsClient(subscriptionID string) SQLPoolTableColumnsClient { + return original.NewSQLPoolTableColumnsClient(subscriptionID) +} +func NewSQLPoolTableColumnsClientWithBaseURI(baseURI string, subscriptionID string) SQLPoolTableColumnsClient { + return original.NewSQLPoolTableColumnsClientWithBaseURI(baseURI, subscriptionID) +} +func NewSQLPoolTableListResultIterator(page SQLPoolTableListResultPage) SQLPoolTableListResultIterator { + return original.NewSQLPoolTableListResultIterator(page) +} +func NewSQLPoolTableListResultPage(getNextPage func(context.Context, SQLPoolTableListResult) (SQLPoolTableListResult, error)) SQLPoolTableListResultPage { + return original.NewSQLPoolTableListResultPage(getNextPage) +} +func NewSQLPoolTablesClient(subscriptionID string) SQLPoolTablesClient { + return original.NewSQLPoolTablesClient(subscriptionID) +} +func NewSQLPoolTablesClientWithBaseURI(baseURI string, subscriptionID string) SQLPoolTablesClient { + return original.NewSQLPoolTablesClientWithBaseURI(baseURI, subscriptionID) +} +func NewSQLPoolTransparentDataEncryptionsClient(subscriptionID string) SQLPoolTransparentDataEncryptionsClient { + return original.NewSQLPoolTransparentDataEncryptionsClient(subscriptionID) +} +func NewSQLPoolTransparentDataEncryptionsClientWithBaseURI(baseURI string, subscriptionID string) SQLPoolTransparentDataEncryptionsClient { + return original.NewSQLPoolTransparentDataEncryptionsClientWithBaseURI(baseURI, subscriptionID) +} +func NewSQLPoolUsageListResultIterator(page SQLPoolUsageListResultPage) SQLPoolUsageListResultIterator { + return original.NewSQLPoolUsageListResultIterator(page) +} +func NewSQLPoolUsageListResultPage(getNextPage func(context.Context, SQLPoolUsageListResult) (SQLPoolUsageListResult, error)) SQLPoolUsageListResultPage { + return original.NewSQLPoolUsageListResultPage(getNextPage) +} +func NewSQLPoolUsagesClient(subscriptionID string) SQLPoolUsagesClient { + return original.NewSQLPoolUsagesClient(subscriptionID) +} +func NewSQLPoolUsagesClientWithBaseURI(baseURI string, subscriptionID string) SQLPoolUsagesClient { + return original.NewSQLPoolUsagesClientWithBaseURI(baseURI, subscriptionID) +} +func NewSQLPoolVulnerabilityAssessmentListResultIterator(page SQLPoolVulnerabilityAssessmentListResultPage) SQLPoolVulnerabilityAssessmentListResultIterator { + return original.NewSQLPoolVulnerabilityAssessmentListResultIterator(page) +} +func NewSQLPoolVulnerabilityAssessmentListResultPage(getNextPage func(context.Context, SQLPoolVulnerabilityAssessmentListResult) (SQLPoolVulnerabilityAssessmentListResult, error)) SQLPoolVulnerabilityAssessmentListResultPage { + return original.NewSQLPoolVulnerabilityAssessmentListResultPage(getNextPage) +} +func NewSQLPoolVulnerabilityAssessmentRuleBaselinesClient(subscriptionID string) SQLPoolVulnerabilityAssessmentRuleBaselinesClient { + return original.NewSQLPoolVulnerabilityAssessmentRuleBaselinesClient(subscriptionID) +} +func NewSQLPoolVulnerabilityAssessmentRuleBaselinesClientWithBaseURI(baseURI string, subscriptionID string) SQLPoolVulnerabilityAssessmentRuleBaselinesClient { + return original.NewSQLPoolVulnerabilityAssessmentRuleBaselinesClientWithBaseURI(baseURI, subscriptionID) +} +func NewSQLPoolVulnerabilityAssessmentScansClient(subscriptionID string) SQLPoolVulnerabilityAssessmentScansClient { + return original.NewSQLPoolVulnerabilityAssessmentScansClient(subscriptionID) +} +func NewSQLPoolVulnerabilityAssessmentScansClientWithBaseURI(baseURI string, subscriptionID string) SQLPoolVulnerabilityAssessmentScansClient { + return original.NewSQLPoolVulnerabilityAssessmentScansClientWithBaseURI(baseURI, subscriptionID) +} +func NewSQLPoolVulnerabilityAssessmentsClient(subscriptionID string) SQLPoolVulnerabilityAssessmentsClient { + return original.NewSQLPoolVulnerabilityAssessmentsClient(subscriptionID) +} +func NewSQLPoolVulnerabilityAssessmentsClientWithBaseURI(baseURI string, subscriptionID string) SQLPoolVulnerabilityAssessmentsClient { + return original.NewSQLPoolVulnerabilityAssessmentsClientWithBaseURI(baseURI, subscriptionID) +} +func NewSQLPoolsClient(subscriptionID string) SQLPoolsClient { + return original.NewSQLPoolsClient(subscriptionID) +} +func NewSQLPoolsClientWithBaseURI(baseURI string, subscriptionID string) SQLPoolsClient { + return original.NewSQLPoolsClientWithBaseURI(baseURI, subscriptionID) +} +func NewSensitivityLabelListResultIterator(page SensitivityLabelListResultPage) SensitivityLabelListResultIterator { + return original.NewSensitivityLabelListResultIterator(page) +} +func NewSensitivityLabelListResultPage(getNextPage func(context.Context, SensitivityLabelListResult) (SensitivityLabelListResult, error)) SensitivityLabelListResultPage { + return original.NewSensitivityLabelListResultPage(getNextPage) +} +func NewVulnerabilityAssessmentScanRecordListResultIterator(page VulnerabilityAssessmentScanRecordListResultPage) VulnerabilityAssessmentScanRecordListResultIterator { + return original.NewVulnerabilityAssessmentScanRecordListResultIterator(page) +} +func NewVulnerabilityAssessmentScanRecordListResultPage(getNextPage func(context.Context, VulnerabilityAssessmentScanRecordListResult) (VulnerabilityAssessmentScanRecordListResult, error)) VulnerabilityAssessmentScanRecordListResultPage { + return original.NewVulnerabilityAssessmentScanRecordListResultPage(getNextPage) +} +func NewWithBaseURI(baseURI string, subscriptionID string) BaseClient { + return original.NewWithBaseURI(baseURI, subscriptionID) +} +func NewWorkspaceAadAdminsClient(subscriptionID string) WorkspaceAadAdminsClient { + return original.NewWorkspaceAadAdminsClient(subscriptionID) +} +func NewWorkspaceAadAdminsClientWithBaseURI(baseURI string, subscriptionID string) WorkspaceAadAdminsClient { + return original.NewWorkspaceAadAdminsClientWithBaseURI(baseURI, subscriptionID) +} +func NewWorkspaceInfoListResultIterator(page WorkspaceInfoListResultPage) WorkspaceInfoListResultIterator { + return original.NewWorkspaceInfoListResultIterator(page) +} +func NewWorkspaceInfoListResultPage(getNextPage func(context.Context, WorkspaceInfoListResult) (WorkspaceInfoListResult, error)) WorkspaceInfoListResultPage { + return original.NewWorkspaceInfoListResultPage(getNextPage) +} +func NewWorkspaceManagedIdentitySQLControlSettingsClient(subscriptionID string) WorkspaceManagedIdentitySQLControlSettingsClient { + return original.NewWorkspaceManagedIdentitySQLControlSettingsClient(subscriptionID) +} +func NewWorkspaceManagedIdentitySQLControlSettingsClientWithBaseURI(baseURI string, subscriptionID string) WorkspaceManagedIdentitySQLControlSettingsClient { + return original.NewWorkspaceManagedIdentitySQLControlSettingsClientWithBaseURI(baseURI, subscriptionID) +} +func NewWorkspacesClient(subscriptionID string) WorkspacesClient { + return original.NewWorkspacesClient(subscriptionID) +} +func NewWorkspacesClientWithBaseURI(baseURI string, subscriptionID string) WorkspacesClient { + return original.NewWorkspacesClientWithBaseURI(baseURI, subscriptionID) +} +func PossibleActualStateValues() []ActualState { + return original.PossibleActualStateValues() +} +func PossibleBlobAuditingPolicyStateValues() []BlobAuditingPolicyState { + return original.PossibleBlobAuditingPolicyStateValues() +} +func PossibleColumnDataTypeValues() []ColumnDataType { + return original.PossibleColumnDataTypeValues() +} +func PossibleDesiredStateValues() []DesiredState { + return original.PossibleDesiredStateValues() +} +func PossibleGeoBackupPolicyStateValues() []GeoBackupPolicyState { + return original.PossibleGeoBackupPolicyStateValues() +} +func PossibleManagementOperationStateValues() []ManagementOperationState { + return original.PossibleManagementOperationStateValues() +} +func PossibleNodeSizeFamilyValues() []NodeSizeFamily { + return original.PossibleNodeSizeFamilyValues() +} +func PossibleNodeSizeValues() []NodeSize { + return original.PossibleNodeSizeValues() +} +func PossibleOperationStatusValues() []OperationStatus { + return original.PossibleOperationStatusValues() +} +func PossibleProvisioningStateValues() []ProvisioningState { + return original.PossibleProvisioningStateValues() +} +func PossibleQueryAggregationFunctionValues() []QueryAggregationFunction { + return original.PossibleQueryAggregationFunctionValues() +} +func PossibleQueryExecutionTypeValues() []QueryExecutionType { + return original.PossibleQueryExecutionTypeValues() +} +func PossibleQueryMetricUnitValues() []QueryMetricUnit { + return original.PossibleQueryMetricUnitValues() +} +func PossibleQueryObservedMetricTypeValues() []QueryObservedMetricType { + return original.PossibleQueryObservedMetricTypeValues() +} +func PossibleReplicationRoleValues() []ReplicationRole { + return original.PossibleReplicationRoleValues() +} +func PossibleReplicationStateValues() []ReplicationState { + return original.PossibleReplicationStateValues() +} +func PossibleResourceIdentityTypeValues() []ResourceIdentityType { + return original.PossibleResourceIdentityTypeValues() +} +func PossibleRestorePointTypeValues() []RestorePointType { + return original.PossibleRestorePointTypeValues() +} +func PossibleSecurityAlertPolicyStateValues() []SecurityAlertPolicyState { + return original.PossibleSecurityAlertPolicyStateValues() +} +func PossibleTransparentDataEncryptionStatusValues() []TransparentDataEncryptionStatus { + return original.PossibleTransparentDataEncryptionStatusValues() +} +func PossibleVulnerabilityAssessmentPolicyBaselineNameValues() []VulnerabilityAssessmentPolicyBaselineName { + return original.PossibleVulnerabilityAssessmentPolicyBaselineNameValues() +} +func PossibleVulnerabilityAssessmentScanStateValues() []VulnerabilityAssessmentScanState { + return original.PossibleVulnerabilityAssessmentScanStateValues() +} +func PossibleVulnerabilityAssessmentScanTriggerTypeValues() []VulnerabilityAssessmentScanTriggerType { + return original.PossibleVulnerabilityAssessmentScanTriggerTypeValues() +} +func UserAgent() string { + return original.UserAgent() + " profiles/preview" +} +func Version() string { + return original.Version() +} diff --git a/profiles/preview/preview/synapse/mgmt/synapse/synapseapi/models.go b/profiles/preview/preview/synapse/mgmt/synapse/synapseapi/models.go new file mode 100644 index 000000000000..b3725d718230 --- /dev/null +++ b/profiles/preview/preview/synapse/mgmt/synapse/synapseapi/models.go @@ -0,0 +1,49 @@ +// +build go1.9 + +// Copyright 2020 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This code was auto-generated by: +// github.com/Azure/azure-sdk-for-go/tools/profileBuilder + +package synapseapi + +import original "github.com/Azure/azure-sdk-for-go/services/preview/synapse/mgmt/2019-06-01-preview/synapse/synapseapi" + +type BigDataPoolsClientAPI = original.BigDataPoolsClientAPI +type IPFirewallRulesClientAPI = original.IPFirewallRulesClientAPI +type OperationsClientAPI = original.OperationsClientAPI +type SQLPoolBlobAuditingPoliciesClientAPI = original.SQLPoolBlobAuditingPoliciesClientAPI +type SQLPoolConnectionPoliciesClientAPI = original.SQLPoolConnectionPoliciesClientAPI +type SQLPoolDataWarehouseUserActivitiesClientAPI = original.SQLPoolDataWarehouseUserActivitiesClientAPI +type SQLPoolGeoBackupPoliciesClientAPI = original.SQLPoolGeoBackupPoliciesClientAPI +type SQLPoolMetadataSyncConfigsClientAPI = original.SQLPoolMetadataSyncConfigsClientAPI +type SQLPoolOperationResultsClientAPI = original.SQLPoolOperationResultsClientAPI +type SQLPoolOperationsClientAPI = original.SQLPoolOperationsClientAPI +type SQLPoolReplicationLinksClientAPI = original.SQLPoolReplicationLinksClientAPI +type SQLPoolRestorePointsClientAPI = original.SQLPoolRestorePointsClientAPI +type SQLPoolSchemasClientAPI = original.SQLPoolSchemasClientAPI +type SQLPoolSecurityAlertPoliciesClientAPI = original.SQLPoolSecurityAlertPoliciesClientAPI +type SQLPoolSensitivityLabelsClientAPI = original.SQLPoolSensitivityLabelsClientAPI +type SQLPoolTableColumnsClientAPI = original.SQLPoolTableColumnsClientAPI +type SQLPoolTablesClientAPI = original.SQLPoolTablesClientAPI +type SQLPoolTransparentDataEncryptionsClientAPI = original.SQLPoolTransparentDataEncryptionsClientAPI +type SQLPoolUsagesClientAPI = original.SQLPoolUsagesClientAPI +type SQLPoolVulnerabilityAssessmentRuleBaselinesClientAPI = original.SQLPoolVulnerabilityAssessmentRuleBaselinesClientAPI +type SQLPoolVulnerabilityAssessmentScansClientAPI = original.SQLPoolVulnerabilityAssessmentScansClientAPI +type SQLPoolVulnerabilityAssessmentsClientAPI = original.SQLPoolVulnerabilityAssessmentsClientAPI +type SQLPoolsClientAPI = original.SQLPoolsClientAPI +type WorkspaceAadAdminsClientAPI = original.WorkspaceAadAdminsClientAPI +type WorkspaceManagedIdentitySQLControlSettingsClientAPI = original.WorkspaceManagedIdentitySQLControlSettingsClientAPI +type WorkspacesClientAPI = original.WorkspacesClientAPI diff --git a/services/preview/containerservice/mgmt/2019-10-27-preview/containerservice/agentpools.go b/services/preview/containerservice/mgmt/2019-10-27-preview/containerservice/agentpools.go new file mode 100644 index 000000000000..54f029262777 --- /dev/null +++ b/services/preview/containerservice/mgmt/2019-10-27-preview/containerservice/agentpools.go @@ -0,0 +1,616 @@ +package containerservice + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// AgentPoolsClient is the the Container Service Client. +type AgentPoolsClient struct { + BaseClient +} + +// NewAgentPoolsClient creates an instance of the AgentPoolsClient client. +func NewAgentPoolsClient(subscriptionID string) AgentPoolsClient { + return NewAgentPoolsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewAgentPoolsClientWithBaseURI creates an instance of the AgentPoolsClient client using a custom endpoint. Use this +// when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). +func NewAgentPoolsClientWithBaseURI(baseURI string, subscriptionID string) AgentPoolsClient { + return AgentPoolsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates an agent pool in the specified managed cluster. +// Parameters: +// resourceGroupName - the name of the resource group. +// resourceName - the name of the managed cluster resource. +// agentPoolName - the name of the agent pool. +// parameters - parameters supplied to the Create or Update an agent pool operation. +func (client AgentPoolsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, agentPoolName string, parameters AgentPool) (result AgentPoolsCreateOrUpdateFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AgentPoolsClient.CreateOrUpdate") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceName, + Constraints: []validation.Constraint{{Target: "resourceName", Name: validation.MaxLength, Rule: 63, Chain: nil}, + {Target: "resourceName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$`, Chain: nil}}}, + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.ManagedClusterAgentPoolProfileProperties", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.ManagedClusterAgentPoolProfileProperties.Count", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "parameters.ManagedClusterAgentPoolProfileProperties.Count", Name: validation.InclusiveMaximum, Rule: int64(100), Chain: nil}, + {Target: "parameters.ManagedClusterAgentPoolProfileProperties.Count", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, + }}, + }}}}}); err != nil { + return result, validation.NewError("containerservice.AgentPoolsClient", "CreateOrUpdate", err.Error()) + } + + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, resourceName, agentPoolName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.AgentPoolsClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + result, err = client.CreateOrUpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.AgentPoolsClient", "CreateOrUpdate", result.Response(), "Failure sending request") + return + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client AgentPoolsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, resourceName string, agentPoolName string, parameters AgentPool) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "agentPoolName": autorest.Encode("path", agentPoolName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "resourceName": autorest.Encode("path", resourceName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2020-01-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client AgentPoolsClient) CreateOrUpdateSender(req *http.Request) (future AgentPoolsCreateOrUpdateFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client AgentPoolsClient) CreateOrUpdateResponder(resp *http.Response) (result AgentPool, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes the agent pool in the specified managed cluster. +// Parameters: +// resourceGroupName - the name of the resource group. +// resourceName - the name of the managed cluster resource. +// agentPoolName - the name of the agent pool. +func (client AgentPoolsClient) Delete(ctx context.Context, resourceGroupName string, resourceName string, agentPoolName string) (result AgentPoolsDeleteFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AgentPoolsClient.Delete") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceName, + Constraints: []validation.Constraint{{Target: "resourceName", Name: validation.MaxLength, Rule: 63, Chain: nil}, + {Target: "resourceName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("containerservice.AgentPoolsClient", "Delete", err.Error()) + } + + req, err := client.DeletePreparer(ctx, resourceGroupName, resourceName, agentPoolName) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.AgentPoolsClient", "Delete", nil, "Failure preparing request") + return + } + + result, err = client.DeleteSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.AgentPoolsClient", "Delete", result.Response(), "Failure sending request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client AgentPoolsClient) DeletePreparer(ctx context.Context, resourceGroupName string, resourceName string, agentPoolName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "agentPoolName": autorest.Encode("path", agentPoolName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "resourceName": autorest.Encode("path", resourceName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2020-01-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client AgentPoolsClient) DeleteSender(req *http.Request) (future AgentPoolsDeleteFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client AgentPoolsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets the details of the agent pool by managed cluster and resource group. +// Parameters: +// resourceGroupName - the name of the resource group. +// resourceName - the name of the managed cluster resource. +// agentPoolName - the name of the agent pool. +func (client AgentPoolsClient) Get(ctx context.Context, resourceGroupName string, resourceName string, agentPoolName string) (result AgentPool, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AgentPoolsClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceName, + Constraints: []validation.Constraint{{Target: "resourceName", Name: validation.MaxLength, Rule: 63, Chain: nil}, + {Target: "resourceName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("containerservice.AgentPoolsClient", "Get", err.Error()) + } + + req, err := client.GetPreparer(ctx, resourceGroupName, resourceName, agentPoolName) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.AgentPoolsClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "containerservice.AgentPoolsClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.AgentPoolsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client AgentPoolsClient) GetPreparer(ctx context.Context, resourceGroupName string, resourceName string, agentPoolName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "agentPoolName": autorest.Encode("path", agentPoolName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "resourceName": autorest.Encode("path", resourceName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2020-01-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client AgentPoolsClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client AgentPoolsClient) GetResponder(resp *http.Response) (result AgentPool, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetAvailableAgentPoolVersions gets a list of supported versions for the specified agent pool. +// Parameters: +// resourceGroupName - the name of the resource group. +// resourceName - the name of the managed cluster resource. +func (client AgentPoolsClient) GetAvailableAgentPoolVersions(ctx context.Context, resourceGroupName string, resourceName string) (result AgentPoolAvailableVersions, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AgentPoolsClient.GetAvailableAgentPoolVersions") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceName, + Constraints: []validation.Constraint{{Target: "resourceName", Name: validation.MaxLength, Rule: 63, Chain: nil}, + {Target: "resourceName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("containerservice.AgentPoolsClient", "GetAvailableAgentPoolVersions", err.Error()) + } + + req, err := client.GetAvailableAgentPoolVersionsPreparer(ctx, resourceGroupName, resourceName) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.AgentPoolsClient", "GetAvailableAgentPoolVersions", nil, "Failure preparing request") + return + } + + resp, err := client.GetAvailableAgentPoolVersionsSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "containerservice.AgentPoolsClient", "GetAvailableAgentPoolVersions", resp, "Failure sending request") + return + } + + result, err = client.GetAvailableAgentPoolVersionsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.AgentPoolsClient", "GetAvailableAgentPoolVersions", resp, "Failure responding to request") + } + + return +} + +// GetAvailableAgentPoolVersionsPreparer prepares the GetAvailableAgentPoolVersions request. +func (client AgentPoolsClient) GetAvailableAgentPoolVersionsPreparer(ctx context.Context, resourceGroupName string, resourceName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "resourceName": autorest.Encode("path", resourceName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2020-01-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/availableAgentPoolVersions", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetAvailableAgentPoolVersionsSender sends the GetAvailableAgentPoolVersions request. The method will close the +// http.Response Body if it receives an error. +func (client AgentPoolsClient) GetAvailableAgentPoolVersionsSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetAvailableAgentPoolVersionsResponder handles the response to the GetAvailableAgentPoolVersions request. The method always +// closes the http.Response Body. +func (client AgentPoolsClient) GetAvailableAgentPoolVersionsResponder(resp *http.Response) (result AgentPoolAvailableVersions, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetUpgradeProfile gets the details of the upgrade profile for an agent pool with a specified resource group and +// managed cluster name. +// Parameters: +// resourceGroupName - the name of the resource group. +// resourceName - the name of the managed cluster resource. +// agentPoolName - the name of the agent pool. +func (client AgentPoolsClient) GetUpgradeProfile(ctx context.Context, resourceGroupName string, resourceName string, agentPoolName string) (result AgentPoolUpgradeProfile, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AgentPoolsClient.GetUpgradeProfile") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceName, + Constraints: []validation.Constraint{{Target: "resourceName", Name: validation.MaxLength, Rule: 63, Chain: nil}, + {Target: "resourceName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("containerservice.AgentPoolsClient", "GetUpgradeProfile", err.Error()) + } + + req, err := client.GetUpgradeProfilePreparer(ctx, resourceGroupName, resourceName, agentPoolName) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.AgentPoolsClient", "GetUpgradeProfile", nil, "Failure preparing request") + return + } + + resp, err := client.GetUpgradeProfileSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "containerservice.AgentPoolsClient", "GetUpgradeProfile", resp, "Failure sending request") + return + } + + result, err = client.GetUpgradeProfileResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.AgentPoolsClient", "GetUpgradeProfile", resp, "Failure responding to request") + } + + return +} + +// GetUpgradeProfilePreparer prepares the GetUpgradeProfile request. +func (client AgentPoolsClient) GetUpgradeProfilePreparer(ctx context.Context, resourceGroupName string, resourceName string, agentPoolName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "agentPoolName": autorest.Encode("path", agentPoolName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "resourceName": autorest.Encode("path", resourceName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2020-01-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}/upgradeProfiles/default", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetUpgradeProfileSender sends the GetUpgradeProfile request. The method will close the +// http.Response Body if it receives an error. +func (client AgentPoolsClient) GetUpgradeProfileSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetUpgradeProfileResponder handles the response to the GetUpgradeProfile request. The method always +// closes the http.Response Body. +func (client AgentPoolsClient) GetUpgradeProfileResponder(resp *http.Response) (result AgentPoolUpgradeProfile, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets a list of agent pools in the specified managed cluster. The operation returns properties of each agent +// pool. +// Parameters: +// resourceGroupName - the name of the resource group. +// resourceName - the name of the managed cluster resource. +func (client AgentPoolsClient) List(ctx context.Context, resourceGroupName string, resourceName string) (result AgentPoolListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AgentPoolsClient.List") + defer func() { + sc := -1 + if result.aplr.Response.Response != nil { + sc = result.aplr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceName, + Constraints: []validation.Constraint{{Target: "resourceName", Name: validation.MaxLength, Rule: 63, Chain: nil}, + {Target: "resourceName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("containerservice.AgentPoolsClient", "List", err.Error()) + } + + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, resourceGroupName, resourceName) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.AgentPoolsClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.aplr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "containerservice.AgentPoolsClient", "List", resp, "Failure sending request") + return + } + + result.aplr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.AgentPoolsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client AgentPoolsClient) ListPreparer(ctx context.Context, resourceGroupName string, resourceName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "resourceName": autorest.Encode("path", resourceName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2020-01-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client AgentPoolsClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client AgentPoolsClient) ListResponder(resp *http.Response) (result AgentPoolListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client AgentPoolsClient) listNextResults(ctx context.Context, lastResults AgentPoolListResult) (result AgentPoolListResult, err error) { + req, err := lastResults.agentPoolListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "containerservice.AgentPoolsClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "containerservice.AgentPoolsClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.AgentPoolsClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client AgentPoolsClient) ListComplete(ctx context.Context, resourceGroupName string, resourceName string) (result AgentPoolListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AgentPoolsClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx, resourceGroupName, resourceName) + return +} diff --git a/services/preview/containerservice/mgmt/2019-10-27-preview/containerservice/client.go b/services/preview/containerservice/mgmt/2019-10-27-preview/containerservice/client.go new file mode 100644 index 000000000000..858e168f208f --- /dev/null +++ b/services/preview/containerservice/mgmt/2019-10-27-preview/containerservice/client.go @@ -0,0 +1,52 @@ +// Package containerservice implements the Azure ARM Containerservice service API version . +// +// The Container Service Client. +package containerservice + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" +) + +const ( + // DefaultBaseURI is the default URI used for the service Containerservice + DefaultBaseURI = "https://management.azure.com" +) + +// BaseClient is the base client for Containerservice. +type BaseClient struct { + autorest.Client + BaseURI string + SubscriptionID string +} + +// New creates an instance of the BaseClient client. +func New(subscriptionID string) BaseClient { + return NewWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewWithBaseURI creates an instance of the BaseClient client using a custom endpoint. Use this when interacting with +// an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). +func NewWithBaseURI(baseURI string, subscriptionID string) BaseClient { + return BaseClient{ + Client: autorest.NewClientWithUserAgent(UserAgent()), + BaseURI: baseURI, + SubscriptionID: subscriptionID, + } +} diff --git a/services/preview/containerservice/mgmt/2019-10-27-preview/containerservice/containerserviceapi/interfaces.go b/services/preview/containerservice/mgmt/2019-10-27-preview/containerservice/containerserviceapi/interfaces.go new file mode 100644 index 000000000000..cef9274540ae --- /dev/null +++ b/services/preview/containerservice/mgmt/2019-10-27-preview/containerservice/containerserviceapi/interfaces.go @@ -0,0 +1,93 @@ +package containerserviceapi + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/azure-sdk-for-go/services/preview/containerservice/mgmt/2019-10-27-preview/containerservice" +) + +// OpenShiftManagedClustersClientAPI contains the set of methods on the OpenShiftManagedClustersClient type. +type OpenShiftManagedClustersClientAPI interface { + CreateOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, parameters containerservice.OpenShiftManagedCluster) (result containerservice.OpenShiftManagedClustersCreateOrUpdateFuture, err error) + Delete(ctx context.Context, resourceGroupName string, resourceName string) (result containerservice.OpenShiftManagedClustersDeleteFuture, err error) + Get(ctx context.Context, resourceGroupName string, resourceName string) (result containerservice.OpenShiftManagedCluster, err error) + List(ctx context.Context) (result containerservice.OpenShiftManagedClusterListResultPage, err error) + ListComplete(ctx context.Context) (result containerservice.OpenShiftManagedClusterListResultIterator, err error) + ListByResourceGroup(ctx context.Context, resourceGroupName string) (result containerservice.OpenShiftManagedClusterListResultPage, err error) + ListByResourceGroupComplete(ctx context.Context, resourceGroupName string) (result containerservice.OpenShiftManagedClusterListResultIterator, err error) + UpdateTags(ctx context.Context, resourceGroupName string, resourceName string, parameters containerservice.TagsObject) (result containerservice.OpenShiftManagedClustersUpdateTagsFuture, err error) +} + +var _ OpenShiftManagedClustersClientAPI = (*containerservice.OpenShiftManagedClustersClient)(nil) + +// ContainerServicesClientAPI contains the set of methods on the ContainerServicesClient type. +type ContainerServicesClientAPI interface { + CreateOrUpdate(ctx context.Context, resourceGroupName string, containerServiceName string, parameters containerservice.ContainerService) (result containerservice.ContainerServicesCreateOrUpdateFutureType, err error) + Delete(ctx context.Context, resourceGroupName string, containerServiceName string) (result containerservice.ContainerServicesDeleteFutureType, err error) + Get(ctx context.Context, resourceGroupName string, containerServiceName string) (result containerservice.ContainerService, err error) + List(ctx context.Context) (result containerservice.ListResultPage, err error) + ListComplete(ctx context.Context) (result containerservice.ListResultIterator, err error) + ListByResourceGroup(ctx context.Context, resourceGroupName string) (result containerservice.ListResultPage, err error) + ListByResourceGroupComplete(ctx context.Context, resourceGroupName string) (result containerservice.ListResultIterator, err error) + ListOrchestrators(ctx context.Context, location string, resourceType string) (result containerservice.OrchestratorVersionProfileListResult, err error) +} + +var _ ContainerServicesClientAPI = (*containerservice.ContainerServicesClient)(nil) + +// OperationsClientAPI contains the set of methods on the OperationsClient type. +type OperationsClientAPI interface { + List(ctx context.Context) (result containerservice.OperationListResult, err error) +} + +var _ OperationsClientAPI = (*containerservice.OperationsClient)(nil) + +// ManagedClustersClientAPI contains the set of methods on the ManagedClustersClient type. +type ManagedClustersClientAPI interface { + CreateOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, parameters containerservice.ManagedCluster) (result containerservice.ManagedClustersCreateOrUpdateFuture, err error) + Delete(ctx context.Context, resourceGroupName string, resourceName string) (result containerservice.ManagedClustersDeleteFuture, err error) + Get(ctx context.Context, resourceGroupName string, resourceName string) (result containerservice.ManagedCluster, err error) + GetAccessProfile(ctx context.Context, resourceGroupName string, resourceName string, roleName string) (result containerservice.ManagedClusterAccessProfile, err error) + GetUpgradeProfile(ctx context.Context, resourceGroupName string, resourceName string) (result containerservice.ManagedClusterUpgradeProfile, err error) + List(ctx context.Context) (result containerservice.ManagedClusterListResultPage, err error) + ListComplete(ctx context.Context) (result containerservice.ManagedClusterListResultIterator, err error) + ListByResourceGroup(ctx context.Context, resourceGroupName string) (result containerservice.ManagedClusterListResultPage, err error) + ListByResourceGroupComplete(ctx context.Context, resourceGroupName string) (result containerservice.ManagedClusterListResultIterator, err error) + ListClusterAdminCredentials(ctx context.Context, resourceGroupName string, resourceName string) (result containerservice.CredentialResults, err error) + ListClusterMonitoringUserCredentials(ctx context.Context, resourceGroupName string, resourceName string) (result containerservice.CredentialResults, err error) + ListClusterUserCredentials(ctx context.Context, resourceGroupName string, resourceName string) (result containerservice.CredentialResults, err error) + ResetAADProfile(ctx context.Context, resourceGroupName string, resourceName string, parameters containerservice.ManagedClusterAADProfile) (result containerservice.ManagedClustersResetAADProfileFuture, err error) + ResetServicePrincipalProfile(ctx context.Context, resourceGroupName string, resourceName string, parameters containerservice.ManagedClusterServicePrincipalProfile) (result containerservice.ManagedClustersResetServicePrincipalProfileFuture, err error) + RotateClusterCertificates(ctx context.Context, resourceGroupName string, resourceName string) (result containerservice.ManagedClustersRotateClusterCertificatesFuture, err error) + UpdateTags(ctx context.Context, resourceGroupName string, resourceName string, parameters containerservice.TagsObject) (result containerservice.ManagedClustersUpdateTagsFuture, err error) +} + +var _ ManagedClustersClientAPI = (*containerservice.ManagedClustersClient)(nil) + +// AgentPoolsClientAPI contains the set of methods on the AgentPoolsClient type. +type AgentPoolsClientAPI interface { + CreateOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, agentPoolName string, parameters containerservice.AgentPool) (result containerservice.AgentPoolsCreateOrUpdateFuture, err error) + Delete(ctx context.Context, resourceGroupName string, resourceName string, agentPoolName string) (result containerservice.AgentPoolsDeleteFuture, err error) + Get(ctx context.Context, resourceGroupName string, resourceName string, agentPoolName string) (result containerservice.AgentPool, err error) + GetAvailableAgentPoolVersions(ctx context.Context, resourceGroupName string, resourceName string) (result containerservice.AgentPoolAvailableVersions, err error) + GetUpgradeProfile(ctx context.Context, resourceGroupName string, resourceName string, agentPoolName string) (result containerservice.AgentPoolUpgradeProfile, err error) + List(ctx context.Context, resourceGroupName string, resourceName string) (result containerservice.AgentPoolListResultPage, err error) + ListComplete(ctx context.Context, resourceGroupName string, resourceName string) (result containerservice.AgentPoolListResultIterator, err error) +} + +var _ AgentPoolsClientAPI = (*containerservice.AgentPoolsClient)(nil) diff --git a/services/preview/containerservice/mgmt/2019-10-27-preview/containerservice/containerservices.go b/services/preview/containerservice/mgmt/2019-10-27-preview/containerservice/containerservices.go new file mode 100644 index 000000000000..c059245f0e0e --- /dev/null +++ b/services/preview/containerservice/mgmt/2019-10-27-preview/containerservice/containerservices.go @@ -0,0 +1,618 @@ +package containerservice + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// ContainerServicesClient is the the Container Service Client. +type ContainerServicesClient struct { + BaseClient +} + +// NewContainerServicesClient creates an instance of the ContainerServicesClient client. +func NewContainerServicesClient(subscriptionID string) ContainerServicesClient { + return NewContainerServicesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewContainerServicesClientWithBaseURI creates an instance of the ContainerServicesClient client using a custom +// endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure +// stack). +func NewContainerServicesClientWithBaseURI(baseURI string, subscriptionID string) ContainerServicesClient { + return ContainerServicesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates a container service with the specified configuration of orchestrator, masters, and +// agents. +// Parameters: +// resourceGroupName - the name of the resource group. +// containerServiceName - the name of the container service in the specified subscription and resource group. +// parameters - parameters supplied to the Create or Update a Container Service operation. +func (client ContainerServicesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, containerServiceName string, parameters ContainerService) (result ContainerServicesCreateOrUpdateFutureType, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ContainerServicesClient.CreateOrUpdate") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.Properties", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.Properties.OrchestratorProfile", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.Properties.CustomProfile", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.Properties.CustomProfile.Orchestrator", Name: validation.Null, Rule: true, Chain: nil}}}, + {Target: "parameters.Properties.ServicePrincipalProfile", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.Properties.ServicePrincipalProfile.ClientID", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.Properties.ServicePrincipalProfile.KeyVaultSecretRef", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.Properties.ServicePrincipalProfile.KeyVaultSecretRef.VaultID", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.Properties.ServicePrincipalProfile.KeyVaultSecretRef.SecretName", Name: validation.Null, Rule: true, Chain: nil}, + }}, + }}, + {Target: "parameters.Properties.MasterProfile", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "parameters.Properties.MasterProfile.DNSPrefix", Name: validation.Null, Rule: true, Chain: nil}}}, + {Target: "parameters.Properties.WindowsProfile", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.Properties.WindowsProfile.AdminUsername", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "parameters.Properties.WindowsProfile.AdminUsername", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]+([._]?[a-zA-Z0-9]+)*$`, Chain: nil}}}, + {Target: "parameters.Properties.WindowsProfile.AdminPassword", Name: validation.Null, Rule: true, Chain: nil}, + }}, + {Target: "parameters.Properties.LinuxProfile", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "parameters.Properties.LinuxProfile.AdminUsername", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "parameters.Properties.LinuxProfile.AdminUsername", Name: validation.Pattern, Rule: `^[A-Za-z][-A-Za-z0-9_]*$`, Chain: nil}}}, + {Target: "parameters.Properties.LinuxProfile.SSH", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "parameters.Properties.LinuxProfile.SSH.PublicKeys", Name: validation.Null, Rule: true, Chain: nil}}}, + }}, + {Target: "parameters.Properties.DiagnosticsProfile", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.Properties.DiagnosticsProfile.VMDiagnostics", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "parameters.Properties.DiagnosticsProfile.VMDiagnostics.Enabled", Name: validation.Null, Rule: true, Chain: nil}}}, + }}, + }}}}}); err != nil { + return result, validation.NewError("containerservice.ContainerServicesClient", "CreateOrUpdate", err.Error()) + } + + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, containerServiceName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.ContainerServicesClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + result, err = client.CreateOrUpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.ContainerServicesClient", "CreateOrUpdate", result.Response(), "Failure sending request") + return + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client ContainerServicesClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, containerServiceName string, parameters ContainerService) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "containerServiceName": autorest.Encode("path", containerServiceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/containerServices/{containerServiceName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client ContainerServicesClient) CreateOrUpdateSender(req *http.Request) (future ContainerServicesCreateOrUpdateFutureType, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client ContainerServicesClient) CreateOrUpdateResponder(resp *http.Response) (result ContainerService, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes the specified container service in the specified subscription and resource group. The operation does +// not delete other resources created as part of creating a container service, including storage accounts, VMs, and +// availability sets. All the other resources created with the container service are part of the same resource group +// and can be deleted individually. +// Parameters: +// resourceGroupName - the name of the resource group. +// containerServiceName - the name of the container service in the specified subscription and resource group. +func (client ContainerServicesClient) Delete(ctx context.Context, resourceGroupName string, containerServiceName string) (result ContainerServicesDeleteFutureType, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ContainerServicesClient.Delete") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DeletePreparer(ctx, resourceGroupName, containerServiceName) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.ContainerServicesClient", "Delete", nil, "Failure preparing request") + return + } + + result, err = client.DeleteSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.ContainerServicesClient", "Delete", result.Response(), "Failure sending request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client ContainerServicesClient) DeletePreparer(ctx context.Context, resourceGroupName string, containerServiceName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "containerServiceName": autorest.Encode("path", containerServiceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/containerServices/{containerServiceName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client ContainerServicesClient) DeleteSender(req *http.Request) (future ContainerServicesDeleteFutureType, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client ContainerServicesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets the properties of the specified container service in the specified subscription and resource group. The +// operation returns the properties including state, orchestrator, number of masters and agents, and FQDNs of masters +// and agents. +// Parameters: +// resourceGroupName - the name of the resource group. +// containerServiceName - the name of the container service in the specified subscription and resource group. +func (client ContainerServicesClient) Get(ctx context.Context, resourceGroupName string, containerServiceName string) (result ContainerService, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ContainerServicesClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, resourceGroupName, containerServiceName) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.ContainerServicesClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "containerservice.ContainerServicesClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.ContainerServicesClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client ContainerServicesClient) GetPreparer(ctx context.Context, resourceGroupName string, containerServiceName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "containerServiceName": autorest.Encode("path", containerServiceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/containerServices/{containerServiceName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client ContainerServicesClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client ContainerServicesClient) GetResponder(resp *http.Response) (result ContainerService, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets a list of container services in the specified subscription. The operation returns properties of each +// container service including state, orchestrator, number of masters and agents, and FQDNs of masters and agents. +func (client ContainerServicesClient) List(ctx context.Context) (result ListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ContainerServicesClient.List") + defer func() { + sc := -1 + if result.lr.Response.Response != nil { + sc = result.lr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.ContainerServicesClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.lr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "containerservice.ContainerServicesClient", "List", resp, "Failure sending request") + return + } + + result.lr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.ContainerServicesClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client ContainerServicesClient) ListPreparer(ctx context.Context) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.ContainerService/containerServices", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client ContainerServicesClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client ContainerServicesClient) ListResponder(resp *http.Response) (result ListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client ContainerServicesClient) listNextResults(ctx context.Context, lastResults ListResult) (result ListResult, err error) { + req, err := lastResults.listResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "containerservice.ContainerServicesClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "containerservice.ContainerServicesClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.ContainerServicesClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client ContainerServicesClient) ListComplete(ctx context.Context) (result ListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ContainerServicesClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx) + return +} + +// ListByResourceGroup gets a list of container services in the specified subscription and resource group. The +// operation returns properties of each container service including state, orchestrator, number of masters and agents, +// and FQDNs of masters and agents. +// Parameters: +// resourceGroupName - the name of the resource group. +func (client ContainerServicesClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (result ListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ContainerServicesClient.ListByResourceGroup") + defer func() { + sc := -1 + if result.lr.Response.Response != nil { + sc = result.lr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listByResourceGroupNextResults + req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.ContainerServicesClient", "ListByResourceGroup", nil, "Failure preparing request") + return + } + + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.lr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "containerservice.ContainerServicesClient", "ListByResourceGroup", resp, "Failure sending request") + return + } + + result.lr, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.ContainerServicesClient", "ListByResourceGroup", resp, "Failure responding to request") + } + + return +} + +// ListByResourceGroupPreparer prepares the ListByResourceGroup request. +func (client ContainerServicesClient) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/containerServices", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the +// http.Response Body if it receives an error. +func (client ContainerServicesClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always +// closes the http.Response Body. +func (client ContainerServicesClient) ListByResourceGroupResponder(resp *http.Response) (result ListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listByResourceGroupNextResults retrieves the next set of results, if any. +func (client ContainerServicesClient) listByResourceGroupNextResults(ctx context.Context, lastResults ListResult) (result ListResult, err error) { + req, err := lastResults.listResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "containerservice.ContainerServicesClient", "listByResourceGroupNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "containerservice.ContainerServicesClient", "listByResourceGroupNextResults", resp, "Failure sending next results request") + } + result, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.ContainerServicesClient", "listByResourceGroupNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListByResourceGroupComplete enumerates all values, automatically crossing page boundaries as required. +func (client ContainerServicesClient) ListByResourceGroupComplete(ctx context.Context, resourceGroupName string) (result ListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ContainerServicesClient.ListByResourceGroup") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.ListByResourceGroup(ctx, resourceGroupName) + return +} + +// ListOrchestrators gets a list of supported orchestrators in the specified subscription. The operation returns +// properties of each orchestrator including version, available upgrades and whether that version or upgrades are in +// preview. +// Parameters: +// location - the name of a supported Azure region. +// resourceType - resource type for which the list of orchestrators needs to be returned +func (client ContainerServicesClient) ListOrchestrators(ctx context.Context, location string, resourceType string) (result OrchestratorVersionProfileListResult, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ContainerServicesClient.ListOrchestrators") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.ListOrchestratorsPreparer(ctx, location, resourceType) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.ContainerServicesClient", "ListOrchestrators", nil, "Failure preparing request") + return + } + + resp, err := client.ListOrchestratorsSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "containerservice.ContainerServicesClient", "ListOrchestrators", resp, "Failure sending request") + return + } + + result, err = client.ListOrchestratorsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.ContainerServicesClient", "ListOrchestrators", resp, "Failure responding to request") + } + + return +} + +// ListOrchestratorsPreparer prepares the ListOrchestrators request. +func (client ContainerServicesClient) ListOrchestratorsPreparer(ctx context.Context, location string, resourceType string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "location": autorest.Encode("path", location), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-08-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(resourceType) > 0 { + queryParameters["resource-type"] = autorest.Encode("query", resourceType) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.ContainerService/locations/{location}/orchestrators", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListOrchestratorsSender sends the ListOrchestrators request. The method will close the +// http.Response Body if it receives an error. +func (client ContainerServicesClient) ListOrchestratorsSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListOrchestratorsResponder handles the response to the ListOrchestrators request. The method always +// closes the http.Response Body. +func (client ContainerServicesClient) ListOrchestratorsResponder(resp *http.Response) (result OrchestratorVersionProfileListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/services/preview/containerservice/mgmt/2019-10-27-preview/containerservice/managedclusters.go b/services/preview/containerservice/mgmt/2019-10-27-preview/containerservice/managedclusters.go new file mode 100644 index 000000000000..c9f3ea67c162 --- /dev/null +++ b/services/preview/containerservice/mgmt/2019-10-27-preview/containerservice/managedclusters.go @@ -0,0 +1,1370 @@ +package containerservice + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// ManagedClustersClient is the the Container Service Client. +type ManagedClustersClient struct { + BaseClient +} + +// NewManagedClustersClient creates an instance of the ManagedClustersClient client. +func NewManagedClustersClient(subscriptionID string) ManagedClustersClient { + return NewManagedClustersClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewManagedClustersClientWithBaseURI creates an instance of the ManagedClustersClient client using a custom endpoint. +// Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). +func NewManagedClustersClientWithBaseURI(baseURI string, subscriptionID string) ManagedClustersClient { + return ManagedClustersClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates a managed cluster with the specified configuration for agents and Kubernetes +// version. +// Parameters: +// resourceGroupName - the name of the resource group. +// resourceName - the name of the managed cluster resource. +// parameters - parameters supplied to the Create or Update a Managed Cluster operation. +func (client ManagedClustersClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, parameters ManagedCluster) (result ManagedClustersCreateOrUpdateFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ManagedClustersClient.CreateOrUpdate") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceName, + Constraints: []validation.Constraint{{Target: "resourceName", Name: validation.MaxLength, Rule: 63, Chain: nil}, + {Target: "resourceName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$`, Chain: nil}}}, + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.ManagedClusterProperties", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.ManagedClusterProperties.LinuxProfile", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.ManagedClusterProperties.LinuxProfile.AdminUsername", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "parameters.ManagedClusterProperties.LinuxProfile.AdminUsername", Name: validation.Pattern, Rule: `^[A-Za-z][-A-Za-z0-9_]*$`, Chain: nil}}}, + {Target: "parameters.ManagedClusterProperties.LinuxProfile.SSH", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "parameters.ManagedClusterProperties.LinuxProfile.SSH.PublicKeys", Name: validation.Null, Rule: true, Chain: nil}}}, + }}, + {Target: "parameters.ManagedClusterProperties.WindowsProfile", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.ManagedClusterProperties.WindowsProfile.AdminUsername", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "parameters.ManagedClusterProperties.WindowsProfile.AdminUsername", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]+([._]?[a-zA-Z0-9]+)*$`, Chain: nil}}}, + }}, + {Target: "parameters.ManagedClusterProperties.ServicePrincipalProfile", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.ManagedClusterProperties.ServicePrincipalProfile.ClientID", Name: validation.Null, Rule: true, Chain: nil}}}, + {Target: "parameters.ManagedClusterProperties.NetworkProfile", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.ManagedClusterProperties.NetworkProfile.PodCidr", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.ManagedClusterProperties.NetworkProfile.PodCidr", Name: validation.Pattern, Rule: `^([0-9]{1,3}\.){3}[0-9]{1,3}(\/([0-9]|[1-2][0-9]|3[0-2]))?$`, Chain: nil}}}, + {Target: "parameters.ManagedClusterProperties.NetworkProfile.ServiceCidr", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.ManagedClusterProperties.NetworkProfile.ServiceCidr", Name: validation.Pattern, Rule: `^([0-9]{1,3}\.){3}[0-9]{1,3}(\/([0-9]|[1-2][0-9]|3[0-2]))?$`, Chain: nil}}}, + {Target: "parameters.ManagedClusterProperties.NetworkProfile.DNSServiceIP", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.ManagedClusterProperties.NetworkProfile.DNSServiceIP", Name: validation.Pattern, Rule: `^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$`, Chain: nil}}}, + {Target: "parameters.ManagedClusterProperties.NetworkProfile.DockerBridgeCidr", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.ManagedClusterProperties.NetworkProfile.DockerBridgeCidr", Name: validation.Pattern, Rule: `^([0-9]{1,3}\.){3}[0-9]{1,3}(\/([0-9]|[1-2][0-9]|3[0-2]))?$`, Chain: nil}}}, + {Target: "parameters.ManagedClusterProperties.NetworkProfile.LoadBalancerProfile", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.ManagedClusterProperties.NetworkProfile.LoadBalancerProfile.ManagedOutboundIPs", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.ManagedClusterProperties.NetworkProfile.LoadBalancerProfile.ManagedOutboundIPs.Count", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.ManagedClusterProperties.NetworkProfile.LoadBalancerProfile.ManagedOutboundIPs.Count", Name: validation.InclusiveMaximum, Rule: int64(100), Chain: nil}, + {Target: "parameters.ManagedClusterProperties.NetworkProfile.LoadBalancerProfile.ManagedOutboundIPs.Count", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, + }}, + }}, + {Target: "parameters.ManagedClusterProperties.NetworkProfile.LoadBalancerProfile.AllocatedOutboundPorts", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.ManagedClusterProperties.NetworkProfile.LoadBalancerProfile.AllocatedOutboundPorts", Name: validation.InclusiveMaximum, Rule: int64(64000), Chain: nil}, + {Target: "parameters.ManagedClusterProperties.NetworkProfile.LoadBalancerProfile.AllocatedOutboundPorts", Name: validation.InclusiveMinimum, Rule: int64(0), Chain: nil}, + }}, + {Target: "parameters.ManagedClusterProperties.NetworkProfile.LoadBalancerProfile.IdleTimeoutInMinutes", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.ManagedClusterProperties.NetworkProfile.LoadBalancerProfile.IdleTimeoutInMinutes", Name: validation.InclusiveMaximum, Rule: int64(120), Chain: nil}, + {Target: "parameters.ManagedClusterProperties.NetworkProfile.LoadBalancerProfile.IdleTimeoutInMinutes", Name: validation.InclusiveMinimum, Rule: int64(4), Chain: nil}, + }}, + }}, + }}, + {Target: "parameters.ManagedClusterProperties.AadProfile", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.ManagedClusterProperties.AadProfile.ClientAppID", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.ManagedClusterProperties.AadProfile.ServerAppID", Name: validation.Null, Rule: true, Chain: nil}, + }}, + }}}}}); err != nil { + return result, validation.NewError("containerservice.ManagedClustersClient", "CreateOrUpdate", err.Error()) + } + + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, resourceName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + result, err = client.CreateOrUpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "CreateOrUpdate", result.Response(), "Failure sending request") + return + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client ManagedClustersClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, resourceName string, parameters ManagedCluster) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "resourceName": autorest.Encode("path", resourceName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2020-01-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client ManagedClustersClient) CreateOrUpdateSender(req *http.Request) (future ManagedClustersCreateOrUpdateFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client ManagedClustersClient) CreateOrUpdateResponder(resp *http.Response) (result ManagedCluster, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes the managed cluster with a specified resource group and name. +// Parameters: +// resourceGroupName - the name of the resource group. +// resourceName - the name of the managed cluster resource. +func (client ManagedClustersClient) Delete(ctx context.Context, resourceGroupName string, resourceName string) (result ManagedClustersDeleteFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ManagedClustersClient.Delete") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceName, + Constraints: []validation.Constraint{{Target: "resourceName", Name: validation.MaxLength, Rule: 63, Chain: nil}, + {Target: "resourceName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("containerservice.ManagedClustersClient", "Delete", err.Error()) + } + + req, err := client.DeletePreparer(ctx, resourceGroupName, resourceName) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "Delete", nil, "Failure preparing request") + return + } + + result, err = client.DeleteSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "Delete", result.Response(), "Failure sending request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client ManagedClustersClient) DeletePreparer(ctx context.Context, resourceGroupName string, resourceName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "resourceName": autorest.Encode("path", resourceName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2020-01-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client ManagedClustersClient) DeleteSender(req *http.Request) (future ManagedClustersDeleteFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client ManagedClustersClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets the details of the managed cluster with a specified resource group and name. +// Parameters: +// resourceGroupName - the name of the resource group. +// resourceName - the name of the managed cluster resource. +func (client ManagedClustersClient) Get(ctx context.Context, resourceGroupName string, resourceName string) (result ManagedCluster, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ManagedClustersClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceName, + Constraints: []validation.Constraint{{Target: "resourceName", Name: validation.MaxLength, Rule: 63, Chain: nil}, + {Target: "resourceName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("containerservice.ManagedClustersClient", "Get", err.Error()) + } + + req, err := client.GetPreparer(ctx, resourceGroupName, resourceName) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client ManagedClustersClient) GetPreparer(ctx context.Context, resourceGroupName string, resourceName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "resourceName": autorest.Encode("path", resourceName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2020-01-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client ManagedClustersClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client ManagedClustersClient) GetResponder(resp *http.Response) (result ManagedCluster, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetAccessProfile gets the accessProfile for the specified role name of the managed cluster with a specified resource +// group and name. +// Parameters: +// resourceGroupName - the name of the resource group. +// resourceName - the name of the managed cluster resource. +// roleName - the name of the role for managed cluster accessProfile resource. +func (client ManagedClustersClient) GetAccessProfile(ctx context.Context, resourceGroupName string, resourceName string, roleName string) (result ManagedClusterAccessProfile, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ManagedClustersClient.GetAccessProfile") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceName, + Constraints: []validation.Constraint{{Target: "resourceName", Name: validation.MaxLength, Rule: 63, Chain: nil}, + {Target: "resourceName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("containerservice.ManagedClustersClient", "GetAccessProfile", err.Error()) + } + + req, err := client.GetAccessProfilePreparer(ctx, resourceGroupName, resourceName, roleName) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "GetAccessProfile", nil, "Failure preparing request") + return + } + + resp, err := client.GetAccessProfileSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "GetAccessProfile", resp, "Failure sending request") + return + } + + result, err = client.GetAccessProfileResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "GetAccessProfile", resp, "Failure responding to request") + } + + return +} + +// GetAccessProfilePreparer prepares the GetAccessProfile request. +func (client ManagedClustersClient) GetAccessProfilePreparer(ctx context.Context, resourceGroupName string, resourceName string, roleName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "resourceName": autorest.Encode("path", resourceName), + "roleName": autorest.Encode("path", roleName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2020-01-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/accessProfiles/{roleName}/listCredential", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetAccessProfileSender sends the GetAccessProfile request. The method will close the +// http.Response Body if it receives an error. +func (client ManagedClustersClient) GetAccessProfileSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetAccessProfileResponder handles the response to the GetAccessProfile request. The method always +// closes the http.Response Body. +func (client ManagedClustersClient) GetAccessProfileResponder(resp *http.Response) (result ManagedClusterAccessProfile, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetUpgradeProfile gets the details of the upgrade profile for a managed cluster with a specified resource group and +// name. +// Parameters: +// resourceGroupName - the name of the resource group. +// resourceName - the name of the managed cluster resource. +func (client ManagedClustersClient) GetUpgradeProfile(ctx context.Context, resourceGroupName string, resourceName string) (result ManagedClusterUpgradeProfile, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ManagedClustersClient.GetUpgradeProfile") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceName, + Constraints: []validation.Constraint{{Target: "resourceName", Name: validation.MaxLength, Rule: 63, Chain: nil}, + {Target: "resourceName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("containerservice.ManagedClustersClient", "GetUpgradeProfile", err.Error()) + } + + req, err := client.GetUpgradeProfilePreparer(ctx, resourceGroupName, resourceName) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "GetUpgradeProfile", nil, "Failure preparing request") + return + } + + resp, err := client.GetUpgradeProfileSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "GetUpgradeProfile", resp, "Failure sending request") + return + } + + result, err = client.GetUpgradeProfileResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "GetUpgradeProfile", resp, "Failure responding to request") + } + + return +} + +// GetUpgradeProfilePreparer prepares the GetUpgradeProfile request. +func (client ManagedClustersClient) GetUpgradeProfilePreparer(ctx context.Context, resourceGroupName string, resourceName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "resourceName": autorest.Encode("path", resourceName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2020-01-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/upgradeProfiles/default", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetUpgradeProfileSender sends the GetUpgradeProfile request. The method will close the +// http.Response Body if it receives an error. +func (client ManagedClustersClient) GetUpgradeProfileSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetUpgradeProfileResponder handles the response to the GetUpgradeProfile request. The method always +// closes the http.Response Body. +func (client ManagedClustersClient) GetUpgradeProfileResponder(resp *http.Response) (result ManagedClusterUpgradeProfile, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets a list of managed clusters in the specified subscription. The operation returns properties of each managed +// cluster. +func (client ManagedClustersClient) List(ctx context.Context) (result ManagedClusterListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ManagedClustersClient.List") + defer func() { + sc := -1 + if result.mclr.Response.Response != nil { + sc = result.mclr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.mclr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "List", resp, "Failure sending request") + return + } + + result.mclr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client ManagedClustersClient) ListPreparer(ctx context.Context) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2020-01-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.ContainerService/managedClusters", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client ManagedClustersClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client ManagedClustersClient) ListResponder(resp *http.Response) (result ManagedClusterListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client ManagedClustersClient) listNextResults(ctx context.Context, lastResults ManagedClusterListResult) (result ManagedClusterListResult, err error) { + req, err := lastResults.managedClusterListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client ManagedClustersClient) ListComplete(ctx context.Context) (result ManagedClusterListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ManagedClustersClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx) + return +} + +// ListByResourceGroup lists managed clusters in the specified subscription and resource group. The operation returns +// properties of each managed cluster. +// Parameters: +// resourceGroupName - the name of the resource group. +func (client ManagedClustersClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (result ManagedClusterListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ManagedClustersClient.ListByResourceGroup") + defer func() { + sc := -1 + if result.mclr.Response.Response != nil { + sc = result.mclr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("containerservice.ManagedClustersClient", "ListByResourceGroup", err.Error()) + } + + result.fn = client.listByResourceGroupNextResults + req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "ListByResourceGroup", nil, "Failure preparing request") + return + } + + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.mclr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "ListByResourceGroup", resp, "Failure sending request") + return + } + + result.mclr, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "ListByResourceGroup", resp, "Failure responding to request") + } + + return +} + +// ListByResourceGroupPreparer prepares the ListByResourceGroup request. +func (client ManagedClustersClient) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2020-01-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the +// http.Response Body if it receives an error. +func (client ManagedClustersClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always +// closes the http.Response Body. +func (client ManagedClustersClient) ListByResourceGroupResponder(resp *http.Response) (result ManagedClusterListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listByResourceGroupNextResults retrieves the next set of results, if any. +func (client ManagedClustersClient) listByResourceGroupNextResults(ctx context.Context, lastResults ManagedClusterListResult) (result ManagedClusterListResult, err error) { + req, err := lastResults.managedClusterListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "listByResourceGroupNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "listByResourceGroupNextResults", resp, "Failure sending next results request") + } + result, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "listByResourceGroupNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListByResourceGroupComplete enumerates all values, automatically crossing page boundaries as required. +func (client ManagedClustersClient) ListByResourceGroupComplete(ctx context.Context, resourceGroupName string) (result ManagedClusterListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ManagedClustersClient.ListByResourceGroup") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.ListByResourceGroup(ctx, resourceGroupName) + return +} + +// ListClusterAdminCredentials gets cluster admin credential of the managed cluster with a specified resource group and +// name. +// Parameters: +// resourceGroupName - the name of the resource group. +// resourceName - the name of the managed cluster resource. +func (client ManagedClustersClient) ListClusterAdminCredentials(ctx context.Context, resourceGroupName string, resourceName string) (result CredentialResults, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ManagedClustersClient.ListClusterAdminCredentials") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceName, + Constraints: []validation.Constraint{{Target: "resourceName", Name: validation.MaxLength, Rule: 63, Chain: nil}, + {Target: "resourceName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("containerservice.ManagedClustersClient", "ListClusterAdminCredentials", err.Error()) + } + + req, err := client.ListClusterAdminCredentialsPreparer(ctx, resourceGroupName, resourceName) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "ListClusterAdminCredentials", nil, "Failure preparing request") + return + } + + resp, err := client.ListClusterAdminCredentialsSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "ListClusterAdminCredentials", resp, "Failure sending request") + return + } + + result, err = client.ListClusterAdminCredentialsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "ListClusterAdminCredentials", resp, "Failure responding to request") + } + + return +} + +// ListClusterAdminCredentialsPreparer prepares the ListClusterAdminCredentials request. +func (client ManagedClustersClient) ListClusterAdminCredentialsPreparer(ctx context.Context, resourceGroupName string, resourceName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "resourceName": autorest.Encode("path", resourceName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2020-01-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/listClusterAdminCredential", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListClusterAdminCredentialsSender sends the ListClusterAdminCredentials request. The method will close the +// http.Response Body if it receives an error. +func (client ManagedClustersClient) ListClusterAdminCredentialsSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListClusterAdminCredentialsResponder handles the response to the ListClusterAdminCredentials request. The method always +// closes the http.Response Body. +func (client ManagedClustersClient) ListClusterAdminCredentialsResponder(resp *http.Response) (result CredentialResults, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListClusterMonitoringUserCredentials gets cluster monitoring user credential of the managed cluster with a specified +// resource group and name. +// Parameters: +// resourceGroupName - the name of the resource group. +// resourceName - the name of the managed cluster resource. +func (client ManagedClustersClient) ListClusterMonitoringUserCredentials(ctx context.Context, resourceGroupName string, resourceName string) (result CredentialResults, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ManagedClustersClient.ListClusterMonitoringUserCredentials") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceName, + Constraints: []validation.Constraint{{Target: "resourceName", Name: validation.MaxLength, Rule: 63, Chain: nil}, + {Target: "resourceName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("containerservice.ManagedClustersClient", "ListClusterMonitoringUserCredentials", err.Error()) + } + + req, err := client.ListClusterMonitoringUserCredentialsPreparer(ctx, resourceGroupName, resourceName) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "ListClusterMonitoringUserCredentials", nil, "Failure preparing request") + return + } + + resp, err := client.ListClusterMonitoringUserCredentialsSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "ListClusterMonitoringUserCredentials", resp, "Failure sending request") + return + } + + result, err = client.ListClusterMonitoringUserCredentialsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "ListClusterMonitoringUserCredentials", resp, "Failure responding to request") + } + + return +} + +// ListClusterMonitoringUserCredentialsPreparer prepares the ListClusterMonitoringUserCredentials request. +func (client ManagedClustersClient) ListClusterMonitoringUserCredentialsPreparer(ctx context.Context, resourceGroupName string, resourceName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "resourceName": autorest.Encode("path", resourceName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2020-01-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/listClusterMonitoringUserCredential", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListClusterMonitoringUserCredentialsSender sends the ListClusterMonitoringUserCredentials request. The method will close the +// http.Response Body if it receives an error. +func (client ManagedClustersClient) ListClusterMonitoringUserCredentialsSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListClusterMonitoringUserCredentialsResponder handles the response to the ListClusterMonitoringUserCredentials request. The method always +// closes the http.Response Body. +func (client ManagedClustersClient) ListClusterMonitoringUserCredentialsResponder(resp *http.Response) (result CredentialResults, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListClusterUserCredentials gets cluster user credential of the managed cluster with a specified resource group and +// name. +// Parameters: +// resourceGroupName - the name of the resource group. +// resourceName - the name of the managed cluster resource. +func (client ManagedClustersClient) ListClusterUserCredentials(ctx context.Context, resourceGroupName string, resourceName string) (result CredentialResults, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ManagedClustersClient.ListClusterUserCredentials") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceName, + Constraints: []validation.Constraint{{Target: "resourceName", Name: validation.MaxLength, Rule: 63, Chain: nil}, + {Target: "resourceName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("containerservice.ManagedClustersClient", "ListClusterUserCredentials", err.Error()) + } + + req, err := client.ListClusterUserCredentialsPreparer(ctx, resourceGroupName, resourceName) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "ListClusterUserCredentials", nil, "Failure preparing request") + return + } + + resp, err := client.ListClusterUserCredentialsSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "ListClusterUserCredentials", resp, "Failure sending request") + return + } + + result, err = client.ListClusterUserCredentialsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "ListClusterUserCredentials", resp, "Failure responding to request") + } + + return +} + +// ListClusterUserCredentialsPreparer prepares the ListClusterUserCredentials request. +func (client ManagedClustersClient) ListClusterUserCredentialsPreparer(ctx context.Context, resourceGroupName string, resourceName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "resourceName": autorest.Encode("path", resourceName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2020-01-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/listClusterUserCredential", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListClusterUserCredentialsSender sends the ListClusterUserCredentials request. The method will close the +// http.Response Body if it receives an error. +func (client ManagedClustersClient) ListClusterUserCredentialsSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListClusterUserCredentialsResponder handles the response to the ListClusterUserCredentials request. The method always +// closes the http.Response Body. +func (client ManagedClustersClient) ListClusterUserCredentialsResponder(resp *http.Response) (result CredentialResults, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ResetAADProfile update the AAD Profile for a managed cluster. +// Parameters: +// resourceGroupName - the name of the resource group. +// resourceName - the name of the managed cluster resource. +// parameters - parameters supplied to the Reset AAD Profile operation for a Managed Cluster. +func (client ManagedClustersClient) ResetAADProfile(ctx context.Context, resourceGroupName string, resourceName string, parameters ManagedClusterAADProfile) (result ManagedClustersResetAADProfileFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ManagedClustersClient.ResetAADProfile") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceName, + Constraints: []validation.Constraint{{Target: "resourceName", Name: validation.MaxLength, Rule: 63, Chain: nil}, + {Target: "resourceName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$`, Chain: nil}}}, + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.ClientAppID", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.ServerAppID", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + return result, validation.NewError("containerservice.ManagedClustersClient", "ResetAADProfile", err.Error()) + } + + req, err := client.ResetAADProfilePreparer(ctx, resourceGroupName, resourceName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "ResetAADProfile", nil, "Failure preparing request") + return + } + + result, err = client.ResetAADProfileSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "ResetAADProfile", result.Response(), "Failure sending request") + return + } + + return +} + +// ResetAADProfilePreparer prepares the ResetAADProfile request. +func (client ManagedClustersClient) ResetAADProfilePreparer(ctx context.Context, resourceGroupName string, resourceName string, parameters ManagedClusterAADProfile) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "resourceName": autorest.Encode("path", resourceName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2020-01-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/resetAADProfile", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ResetAADProfileSender sends the ResetAADProfile request. The method will close the +// http.Response Body if it receives an error. +func (client ManagedClustersClient) ResetAADProfileSender(req *http.Request) (future ManagedClustersResetAADProfileFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// ResetAADProfileResponder handles the response to the ResetAADProfile request. The method always +// closes the http.Response Body. +func (client ManagedClustersClient) ResetAADProfileResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// ResetServicePrincipalProfile update the service principal Profile for a managed cluster. +// Parameters: +// resourceGroupName - the name of the resource group. +// resourceName - the name of the managed cluster resource. +// parameters - parameters supplied to the Reset Service Principal Profile operation for a Managed Cluster. +func (client ManagedClustersClient) ResetServicePrincipalProfile(ctx context.Context, resourceGroupName string, resourceName string, parameters ManagedClusterServicePrincipalProfile) (result ManagedClustersResetServicePrincipalProfileFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ManagedClustersClient.ResetServicePrincipalProfile") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceName, + Constraints: []validation.Constraint{{Target: "resourceName", Name: validation.MaxLength, Rule: 63, Chain: nil}, + {Target: "resourceName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$`, Chain: nil}}}, + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.ClientID", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + return result, validation.NewError("containerservice.ManagedClustersClient", "ResetServicePrincipalProfile", err.Error()) + } + + req, err := client.ResetServicePrincipalProfilePreparer(ctx, resourceGroupName, resourceName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "ResetServicePrincipalProfile", nil, "Failure preparing request") + return + } + + result, err = client.ResetServicePrincipalProfileSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "ResetServicePrincipalProfile", result.Response(), "Failure sending request") + return + } + + return +} + +// ResetServicePrincipalProfilePreparer prepares the ResetServicePrincipalProfile request. +func (client ManagedClustersClient) ResetServicePrincipalProfilePreparer(ctx context.Context, resourceGroupName string, resourceName string, parameters ManagedClusterServicePrincipalProfile) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "resourceName": autorest.Encode("path", resourceName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2020-01-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/resetServicePrincipalProfile", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ResetServicePrincipalProfileSender sends the ResetServicePrincipalProfile request. The method will close the +// http.Response Body if it receives an error. +func (client ManagedClustersClient) ResetServicePrincipalProfileSender(req *http.Request) (future ManagedClustersResetServicePrincipalProfileFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// ResetServicePrincipalProfileResponder handles the response to the ResetServicePrincipalProfile request. The method always +// closes the http.Response Body. +func (client ManagedClustersClient) ResetServicePrincipalProfileResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// RotateClusterCertificates rotate certificates of a managed cluster. +// Parameters: +// resourceGroupName - the name of the resource group. +// resourceName - the name of the managed cluster resource. +func (client ManagedClustersClient) RotateClusterCertificates(ctx context.Context, resourceGroupName string, resourceName string) (result ManagedClustersRotateClusterCertificatesFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ManagedClustersClient.RotateClusterCertificates") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceName, + Constraints: []validation.Constraint{{Target: "resourceName", Name: validation.MaxLength, Rule: 63, Chain: nil}, + {Target: "resourceName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("containerservice.ManagedClustersClient", "RotateClusterCertificates", err.Error()) + } + + req, err := client.RotateClusterCertificatesPreparer(ctx, resourceGroupName, resourceName) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "RotateClusterCertificates", nil, "Failure preparing request") + return + } + + result, err = client.RotateClusterCertificatesSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "RotateClusterCertificates", result.Response(), "Failure sending request") + return + } + + return +} + +// RotateClusterCertificatesPreparer prepares the RotateClusterCertificates request. +func (client ManagedClustersClient) RotateClusterCertificatesPreparer(ctx context.Context, resourceGroupName string, resourceName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "resourceName": autorest.Encode("path", resourceName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2020-01-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/rotateClusterCertificates", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// RotateClusterCertificatesSender sends the RotateClusterCertificates request. The method will close the +// http.Response Body if it receives an error. +func (client ManagedClustersClient) RotateClusterCertificatesSender(req *http.Request) (future ManagedClustersRotateClusterCertificatesFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// RotateClusterCertificatesResponder handles the response to the RotateClusterCertificates request. The method always +// closes the http.Response Body. +func (client ManagedClustersClient) RotateClusterCertificatesResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// UpdateTags updates a managed cluster with the specified tags. +// Parameters: +// resourceGroupName - the name of the resource group. +// resourceName - the name of the managed cluster resource. +// parameters - parameters supplied to the Update Managed Cluster Tags operation. +func (client ManagedClustersClient) UpdateTags(ctx context.Context, resourceGroupName string, resourceName string, parameters TagsObject) (result ManagedClustersUpdateTagsFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ManagedClustersClient.UpdateTags") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceName, + Constraints: []validation.Constraint{{Target: "resourceName", Name: validation.MaxLength, Rule: 63, Chain: nil}, + {Target: "resourceName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("containerservice.ManagedClustersClient", "UpdateTags", err.Error()) + } + + req, err := client.UpdateTagsPreparer(ctx, resourceGroupName, resourceName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "UpdateTags", nil, "Failure preparing request") + return + } + + result, err = client.UpdateTagsSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "UpdateTags", result.Response(), "Failure sending request") + return + } + + return +} + +// UpdateTagsPreparer prepares the UpdateTags request. +func (client ManagedClustersClient) UpdateTagsPreparer(ctx context.Context, resourceGroupName string, resourceName string, parameters TagsObject) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "resourceName": autorest.Encode("path", resourceName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2020-01-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateTagsSender sends the UpdateTags request. The method will close the +// http.Response Body if it receives an error. +func (client ManagedClustersClient) UpdateTagsSender(req *http.Request) (future ManagedClustersUpdateTagsFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// UpdateTagsResponder handles the response to the UpdateTags request. The method always +// closes the http.Response Body. +func (client ManagedClustersClient) UpdateTagsResponder(resp *http.Response) (result ManagedCluster, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/services/preview/containerservice/mgmt/2019-10-27-preview/containerservice/models.go b/services/preview/containerservice/mgmt/2019-10-27-preview/containerservice/models.go new file mode 100644 index 000000000000..c6b811dc3eec --- /dev/null +++ b/services/preview/containerservice/mgmt/2019-10-27-preview/containerservice/models.go @@ -0,0 +1,3532 @@ +package containerservice + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "encoding/json" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/to" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// The package's fully qualified name. +const fqdn = "github.com/Azure/azure-sdk-for-go/services/preview/containerservice/mgmt/2019-10-27-preview/containerservice" + +// AgentPoolType enumerates the values for agent pool type. +type AgentPoolType string + +const ( + // AvailabilitySet ... + AvailabilitySet AgentPoolType = "AvailabilitySet" + // VirtualMachineScaleSets ... + VirtualMachineScaleSets AgentPoolType = "VirtualMachineScaleSets" +) + +// PossibleAgentPoolTypeValues returns an array of possible values for the AgentPoolType const type. +func PossibleAgentPoolTypeValues() []AgentPoolType { + return []AgentPoolType{AvailabilitySet, VirtualMachineScaleSets} +} + +// Kind enumerates the values for kind. +type Kind string + +const ( + // KindAADIdentityProvider ... + KindAADIdentityProvider Kind = "AADIdentityProvider" + // KindOpenShiftManagedClusterBaseIdentityProvider ... + KindOpenShiftManagedClusterBaseIdentityProvider Kind = "OpenShiftManagedClusterBaseIdentityProvider" +) + +// PossibleKindValues returns an array of possible values for the Kind const type. +func PossibleKindValues() []Kind { + return []Kind{KindAADIdentityProvider, KindOpenShiftManagedClusterBaseIdentityProvider} +} + +// LoadBalancerSku enumerates the values for load balancer sku. +type LoadBalancerSku string + +const ( + // Basic ... + Basic LoadBalancerSku = "basic" + // Standard ... + Standard LoadBalancerSku = "standard" +) + +// PossibleLoadBalancerSkuValues returns an array of possible values for the LoadBalancerSku const type. +func PossibleLoadBalancerSkuValues() []LoadBalancerSku { + return []LoadBalancerSku{Basic, Standard} +} + +// NetworkPlugin enumerates the values for network plugin. +type NetworkPlugin string + +const ( + // Azure ... + Azure NetworkPlugin = "azure" + // Kubenet ... + Kubenet NetworkPlugin = "kubenet" +) + +// PossibleNetworkPluginValues returns an array of possible values for the NetworkPlugin const type. +func PossibleNetworkPluginValues() []NetworkPlugin { + return []NetworkPlugin{Azure, Kubenet} +} + +// NetworkPolicy enumerates the values for network policy. +type NetworkPolicy string + +const ( + // NetworkPolicyAzure ... + NetworkPolicyAzure NetworkPolicy = "azure" + // NetworkPolicyCalico ... + NetworkPolicyCalico NetworkPolicy = "calico" +) + +// PossibleNetworkPolicyValues returns an array of possible values for the NetworkPolicy const type. +func PossibleNetworkPolicyValues() []NetworkPolicy { + return []NetworkPolicy{NetworkPolicyAzure, NetworkPolicyCalico} +} + +// OpenShiftAgentPoolProfileRole enumerates the values for open shift agent pool profile role. +type OpenShiftAgentPoolProfileRole string + +const ( + // Compute ... + Compute OpenShiftAgentPoolProfileRole = "compute" + // Infra ... + Infra OpenShiftAgentPoolProfileRole = "infra" +) + +// PossibleOpenShiftAgentPoolProfileRoleValues returns an array of possible values for the OpenShiftAgentPoolProfileRole const type. +func PossibleOpenShiftAgentPoolProfileRoleValues() []OpenShiftAgentPoolProfileRole { + return []OpenShiftAgentPoolProfileRole{Compute, Infra} +} + +// OpenShiftContainerServiceVMSize enumerates the values for open shift container service vm size. +type OpenShiftContainerServiceVMSize string + +const ( + // StandardD16sV3 ... + StandardD16sV3 OpenShiftContainerServiceVMSize = "Standard_D16s_v3" + // StandardD2sV3 ... + StandardD2sV3 OpenShiftContainerServiceVMSize = "Standard_D2s_v3" + // StandardD32sV3 ... + StandardD32sV3 OpenShiftContainerServiceVMSize = "Standard_D32s_v3" + // StandardD4sV3 ... + StandardD4sV3 OpenShiftContainerServiceVMSize = "Standard_D4s_v3" + // StandardD64sV3 ... + StandardD64sV3 OpenShiftContainerServiceVMSize = "Standard_D64s_v3" + // StandardD8sV3 ... + StandardD8sV3 OpenShiftContainerServiceVMSize = "Standard_D8s_v3" + // StandardDS12V2 ... + StandardDS12V2 OpenShiftContainerServiceVMSize = "Standard_DS12_v2" + // StandardDS13V2 ... + StandardDS13V2 OpenShiftContainerServiceVMSize = "Standard_DS13_v2" + // StandardDS14V2 ... + StandardDS14V2 OpenShiftContainerServiceVMSize = "Standard_DS14_v2" + // StandardDS15V2 ... + StandardDS15V2 OpenShiftContainerServiceVMSize = "Standard_DS15_v2" + // StandardDS4V2 ... + StandardDS4V2 OpenShiftContainerServiceVMSize = "Standard_DS4_v2" + // StandardDS5V2 ... + StandardDS5V2 OpenShiftContainerServiceVMSize = "Standard_DS5_v2" + // StandardE16sV3 ... + StandardE16sV3 OpenShiftContainerServiceVMSize = "Standard_E16s_v3" + // StandardE20sV3 ... + StandardE20sV3 OpenShiftContainerServiceVMSize = "Standard_E20s_v3" + // StandardE32sV3 ... + StandardE32sV3 OpenShiftContainerServiceVMSize = "Standard_E32s_v3" + // StandardE4sV3 ... + StandardE4sV3 OpenShiftContainerServiceVMSize = "Standard_E4s_v3" + // StandardE64sV3 ... + StandardE64sV3 OpenShiftContainerServiceVMSize = "Standard_E64s_v3" + // StandardE8sV3 ... + StandardE8sV3 OpenShiftContainerServiceVMSize = "Standard_E8s_v3" + // StandardF16s ... + StandardF16s OpenShiftContainerServiceVMSize = "Standard_F16s" + // StandardF16sV2 ... + StandardF16sV2 OpenShiftContainerServiceVMSize = "Standard_F16s_v2" + // StandardF32sV2 ... + StandardF32sV2 OpenShiftContainerServiceVMSize = "Standard_F32s_v2" + // StandardF64sV2 ... + StandardF64sV2 OpenShiftContainerServiceVMSize = "Standard_F64s_v2" + // StandardF72sV2 ... + StandardF72sV2 OpenShiftContainerServiceVMSize = "Standard_F72s_v2" + // StandardF8s ... + StandardF8s OpenShiftContainerServiceVMSize = "Standard_F8s" + // StandardF8sV2 ... + StandardF8sV2 OpenShiftContainerServiceVMSize = "Standard_F8s_v2" + // StandardGS2 ... + StandardGS2 OpenShiftContainerServiceVMSize = "Standard_GS2" + // StandardGS3 ... + StandardGS3 OpenShiftContainerServiceVMSize = "Standard_GS3" + // StandardGS4 ... + StandardGS4 OpenShiftContainerServiceVMSize = "Standard_GS4" + // StandardGS5 ... + StandardGS5 OpenShiftContainerServiceVMSize = "Standard_GS5" + // StandardL16s ... + StandardL16s OpenShiftContainerServiceVMSize = "Standard_L16s" + // StandardL32s ... + StandardL32s OpenShiftContainerServiceVMSize = "Standard_L32s" + // StandardL4s ... + StandardL4s OpenShiftContainerServiceVMSize = "Standard_L4s" + // StandardL8s ... + StandardL8s OpenShiftContainerServiceVMSize = "Standard_L8s" +) + +// PossibleOpenShiftContainerServiceVMSizeValues returns an array of possible values for the OpenShiftContainerServiceVMSize const type. +func PossibleOpenShiftContainerServiceVMSizeValues() []OpenShiftContainerServiceVMSize { + return []OpenShiftContainerServiceVMSize{StandardD16sV3, StandardD2sV3, StandardD32sV3, StandardD4sV3, StandardD64sV3, StandardD8sV3, StandardDS12V2, StandardDS13V2, StandardDS14V2, StandardDS15V2, StandardDS4V2, StandardDS5V2, StandardE16sV3, StandardE20sV3, StandardE32sV3, StandardE4sV3, StandardE64sV3, StandardE8sV3, StandardF16s, StandardF16sV2, StandardF32sV2, StandardF64sV2, StandardF72sV2, StandardF8s, StandardF8sV2, StandardGS2, StandardGS3, StandardGS4, StandardGS5, StandardL16s, StandardL32s, StandardL4s, StandardL8s} +} + +// OrchestratorTypes enumerates the values for orchestrator types. +type OrchestratorTypes string + +const ( + // Custom ... + Custom OrchestratorTypes = "Custom" + // DCOS ... + DCOS OrchestratorTypes = "DCOS" + // DockerCE ... + DockerCE OrchestratorTypes = "DockerCE" + // Kubernetes ... + Kubernetes OrchestratorTypes = "Kubernetes" + // Swarm ... + Swarm OrchestratorTypes = "Swarm" +) + +// PossibleOrchestratorTypesValues returns an array of possible values for the OrchestratorTypes const type. +func PossibleOrchestratorTypesValues() []OrchestratorTypes { + return []OrchestratorTypes{Custom, DCOS, DockerCE, Kubernetes, Swarm} +} + +// OSType enumerates the values for os type. +type OSType string + +const ( + // Linux ... + Linux OSType = "Linux" + // Windows ... + Windows OSType = "Windows" +) + +// PossibleOSTypeValues returns an array of possible values for the OSType const type. +func PossibleOSTypeValues() []OSType { + return []OSType{Linux, Windows} +} + +// OutboundType enumerates the values for outbound type. +type OutboundType string + +const ( + // LoadBalancer ... + LoadBalancer OutboundType = "loadBalancer" + // UserDefinedRouting ... + UserDefinedRouting OutboundType = "userDefinedRouting" +) + +// PossibleOutboundTypeValues returns an array of possible values for the OutboundType const type. +func PossibleOutboundTypeValues() []OutboundType { + return []OutboundType{LoadBalancer, UserDefinedRouting} +} + +// ResourceIdentityType enumerates the values for resource identity type. +type ResourceIdentityType string + +const ( + // None ... + None ResourceIdentityType = "None" + // SystemAssigned ... + SystemAssigned ResourceIdentityType = "SystemAssigned" +) + +// PossibleResourceIdentityTypeValues returns an array of possible values for the ResourceIdentityType const type. +func PossibleResourceIdentityTypeValues() []ResourceIdentityType { + return []ResourceIdentityType{None, SystemAssigned} +} + +// ScaleSetEvictionPolicy enumerates the values for scale set eviction policy. +type ScaleSetEvictionPolicy string + +const ( + // Deallocate ... + Deallocate ScaleSetEvictionPolicy = "Deallocate" + // Delete ... + Delete ScaleSetEvictionPolicy = "Delete" +) + +// PossibleScaleSetEvictionPolicyValues returns an array of possible values for the ScaleSetEvictionPolicy const type. +func PossibleScaleSetEvictionPolicyValues() []ScaleSetEvictionPolicy { + return []ScaleSetEvictionPolicy{Deallocate, Delete} +} + +// ScaleSetPriority enumerates the values for scale set priority. +type ScaleSetPriority string + +const ( + // Low ... + Low ScaleSetPriority = "Low" + // Regular ... + Regular ScaleSetPriority = "Regular" +) + +// PossibleScaleSetPriorityValues returns an array of possible values for the ScaleSetPriority const type. +func PossibleScaleSetPriorityValues() []ScaleSetPriority { + return []ScaleSetPriority{Low, Regular} +} + +// StorageProfileTypes enumerates the values for storage profile types. +type StorageProfileTypes string + +const ( + // ManagedDisks ... + ManagedDisks StorageProfileTypes = "ManagedDisks" + // StorageAccount ... + StorageAccount StorageProfileTypes = "StorageAccount" +) + +// PossibleStorageProfileTypesValues returns an array of possible values for the StorageProfileTypes const type. +func PossibleStorageProfileTypesValues() []StorageProfileTypes { + return []StorageProfileTypes{ManagedDisks, StorageAccount} +} + +// VMSizeTypes enumerates the values for vm size types. +type VMSizeTypes string + +const ( + // VMSizeTypesStandardA1 ... + VMSizeTypesStandardA1 VMSizeTypes = "Standard_A1" + // VMSizeTypesStandardA10 ... + VMSizeTypesStandardA10 VMSizeTypes = "Standard_A10" + // VMSizeTypesStandardA11 ... + VMSizeTypesStandardA11 VMSizeTypes = "Standard_A11" + // VMSizeTypesStandardA1V2 ... + VMSizeTypesStandardA1V2 VMSizeTypes = "Standard_A1_v2" + // VMSizeTypesStandardA2 ... + VMSizeTypesStandardA2 VMSizeTypes = "Standard_A2" + // VMSizeTypesStandardA2mV2 ... + VMSizeTypesStandardA2mV2 VMSizeTypes = "Standard_A2m_v2" + // VMSizeTypesStandardA2V2 ... + VMSizeTypesStandardA2V2 VMSizeTypes = "Standard_A2_v2" + // VMSizeTypesStandardA3 ... + VMSizeTypesStandardA3 VMSizeTypes = "Standard_A3" + // VMSizeTypesStandardA4 ... + VMSizeTypesStandardA4 VMSizeTypes = "Standard_A4" + // VMSizeTypesStandardA4mV2 ... + VMSizeTypesStandardA4mV2 VMSizeTypes = "Standard_A4m_v2" + // VMSizeTypesStandardA4V2 ... + VMSizeTypesStandardA4V2 VMSizeTypes = "Standard_A4_v2" + // VMSizeTypesStandardA5 ... + VMSizeTypesStandardA5 VMSizeTypes = "Standard_A5" + // VMSizeTypesStandardA6 ... + VMSizeTypesStandardA6 VMSizeTypes = "Standard_A6" + // VMSizeTypesStandardA7 ... + VMSizeTypesStandardA7 VMSizeTypes = "Standard_A7" + // VMSizeTypesStandardA8 ... + VMSizeTypesStandardA8 VMSizeTypes = "Standard_A8" + // VMSizeTypesStandardA8mV2 ... + VMSizeTypesStandardA8mV2 VMSizeTypes = "Standard_A8m_v2" + // VMSizeTypesStandardA8V2 ... + VMSizeTypesStandardA8V2 VMSizeTypes = "Standard_A8_v2" + // VMSizeTypesStandardA9 ... + VMSizeTypesStandardA9 VMSizeTypes = "Standard_A9" + // VMSizeTypesStandardB2ms ... + VMSizeTypesStandardB2ms VMSizeTypes = "Standard_B2ms" + // VMSizeTypesStandardB2s ... + VMSizeTypesStandardB2s VMSizeTypes = "Standard_B2s" + // VMSizeTypesStandardB4ms ... + VMSizeTypesStandardB4ms VMSizeTypes = "Standard_B4ms" + // VMSizeTypesStandardB8ms ... + VMSizeTypesStandardB8ms VMSizeTypes = "Standard_B8ms" + // VMSizeTypesStandardD1 ... + VMSizeTypesStandardD1 VMSizeTypes = "Standard_D1" + // VMSizeTypesStandardD11 ... + VMSizeTypesStandardD11 VMSizeTypes = "Standard_D11" + // VMSizeTypesStandardD11V2 ... + VMSizeTypesStandardD11V2 VMSizeTypes = "Standard_D11_v2" + // VMSizeTypesStandardD11V2Promo ... + VMSizeTypesStandardD11V2Promo VMSizeTypes = "Standard_D11_v2_Promo" + // VMSizeTypesStandardD12 ... + VMSizeTypesStandardD12 VMSizeTypes = "Standard_D12" + // VMSizeTypesStandardD12V2 ... + VMSizeTypesStandardD12V2 VMSizeTypes = "Standard_D12_v2" + // VMSizeTypesStandardD12V2Promo ... + VMSizeTypesStandardD12V2Promo VMSizeTypes = "Standard_D12_v2_Promo" + // VMSizeTypesStandardD13 ... + VMSizeTypesStandardD13 VMSizeTypes = "Standard_D13" + // VMSizeTypesStandardD13V2 ... + VMSizeTypesStandardD13V2 VMSizeTypes = "Standard_D13_v2" + // VMSizeTypesStandardD13V2Promo ... + VMSizeTypesStandardD13V2Promo VMSizeTypes = "Standard_D13_v2_Promo" + // VMSizeTypesStandardD14 ... + VMSizeTypesStandardD14 VMSizeTypes = "Standard_D14" + // VMSizeTypesStandardD14V2 ... + VMSizeTypesStandardD14V2 VMSizeTypes = "Standard_D14_v2" + // VMSizeTypesStandardD14V2Promo ... + VMSizeTypesStandardD14V2Promo VMSizeTypes = "Standard_D14_v2_Promo" + // VMSizeTypesStandardD15V2 ... + VMSizeTypesStandardD15V2 VMSizeTypes = "Standard_D15_v2" + // VMSizeTypesStandardD16sV3 ... + VMSizeTypesStandardD16sV3 VMSizeTypes = "Standard_D16s_v3" + // VMSizeTypesStandardD16V3 ... + VMSizeTypesStandardD16V3 VMSizeTypes = "Standard_D16_v3" + // VMSizeTypesStandardD1V2 ... + VMSizeTypesStandardD1V2 VMSizeTypes = "Standard_D1_v2" + // VMSizeTypesStandardD2 ... + VMSizeTypesStandardD2 VMSizeTypes = "Standard_D2" + // VMSizeTypesStandardD2sV3 ... + VMSizeTypesStandardD2sV3 VMSizeTypes = "Standard_D2s_v3" + // VMSizeTypesStandardD2V2 ... + VMSizeTypesStandardD2V2 VMSizeTypes = "Standard_D2_v2" + // VMSizeTypesStandardD2V2Promo ... + VMSizeTypesStandardD2V2Promo VMSizeTypes = "Standard_D2_v2_Promo" + // VMSizeTypesStandardD2V3 ... + VMSizeTypesStandardD2V3 VMSizeTypes = "Standard_D2_v3" + // VMSizeTypesStandardD3 ... + VMSizeTypesStandardD3 VMSizeTypes = "Standard_D3" + // VMSizeTypesStandardD32sV3 ... + VMSizeTypesStandardD32sV3 VMSizeTypes = "Standard_D32s_v3" + // VMSizeTypesStandardD32V3 ... + VMSizeTypesStandardD32V3 VMSizeTypes = "Standard_D32_v3" + // VMSizeTypesStandardD3V2 ... + VMSizeTypesStandardD3V2 VMSizeTypes = "Standard_D3_v2" + // VMSizeTypesStandardD3V2Promo ... + VMSizeTypesStandardD3V2Promo VMSizeTypes = "Standard_D3_v2_Promo" + // VMSizeTypesStandardD4 ... + VMSizeTypesStandardD4 VMSizeTypes = "Standard_D4" + // VMSizeTypesStandardD4sV3 ... + VMSizeTypesStandardD4sV3 VMSizeTypes = "Standard_D4s_v3" + // VMSizeTypesStandardD4V2 ... + VMSizeTypesStandardD4V2 VMSizeTypes = "Standard_D4_v2" + // VMSizeTypesStandardD4V2Promo ... + VMSizeTypesStandardD4V2Promo VMSizeTypes = "Standard_D4_v2_Promo" + // VMSizeTypesStandardD4V3 ... + VMSizeTypesStandardD4V3 VMSizeTypes = "Standard_D4_v3" + // VMSizeTypesStandardD5V2 ... + VMSizeTypesStandardD5V2 VMSizeTypes = "Standard_D5_v2" + // VMSizeTypesStandardD5V2Promo ... + VMSizeTypesStandardD5V2Promo VMSizeTypes = "Standard_D5_v2_Promo" + // VMSizeTypesStandardD64sV3 ... + VMSizeTypesStandardD64sV3 VMSizeTypes = "Standard_D64s_v3" + // VMSizeTypesStandardD64V3 ... + VMSizeTypesStandardD64V3 VMSizeTypes = "Standard_D64_v3" + // VMSizeTypesStandardD8sV3 ... + VMSizeTypesStandardD8sV3 VMSizeTypes = "Standard_D8s_v3" + // VMSizeTypesStandardD8V3 ... + VMSizeTypesStandardD8V3 VMSizeTypes = "Standard_D8_v3" + // VMSizeTypesStandardDS1 ... + VMSizeTypesStandardDS1 VMSizeTypes = "Standard_DS1" + // VMSizeTypesStandardDS11 ... + VMSizeTypesStandardDS11 VMSizeTypes = "Standard_DS11" + // VMSizeTypesStandardDS11V2 ... + VMSizeTypesStandardDS11V2 VMSizeTypes = "Standard_DS11_v2" + // VMSizeTypesStandardDS11V2Promo ... + VMSizeTypesStandardDS11V2Promo VMSizeTypes = "Standard_DS11_v2_Promo" + // VMSizeTypesStandardDS12 ... + VMSizeTypesStandardDS12 VMSizeTypes = "Standard_DS12" + // VMSizeTypesStandardDS12V2 ... + VMSizeTypesStandardDS12V2 VMSizeTypes = "Standard_DS12_v2" + // VMSizeTypesStandardDS12V2Promo ... + VMSizeTypesStandardDS12V2Promo VMSizeTypes = "Standard_DS12_v2_Promo" + // VMSizeTypesStandardDS13 ... + VMSizeTypesStandardDS13 VMSizeTypes = "Standard_DS13" + // VMSizeTypesStandardDS132V2 ... + VMSizeTypesStandardDS132V2 VMSizeTypes = "Standard_DS13-2_v2" + // VMSizeTypesStandardDS134V2 ... + VMSizeTypesStandardDS134V2 VMSizeTypes = "Standard_DS13-4_v2" + // VMSizeTypesStandardDS13V2 ... + VMSizeTypesStandardDS13V2 VMSizeTypes = "Standard_DS13_v2" + // VMSizeTypesStandardDS13V2Promo ... + VMSizeTypesStandardDS13V2Promo VMSizeTypes = "Standard_DS13_v2_Promo" + // VMSizeTypesStandardDS14 ... + VMSizeTypesStandardDS14 VMSizeTypes = "Standard_DS14" + // VMSizeTypesStandardDS144V2 ... + VMSizeTypesStandardDS144V2 VMSizeTypes = "Standard_DS14-4_v2" + // VMSizeTypesStandardDS148V2 ... + VMSizeTypesStandardDS148V2 VMSizeTypes = "Standard_DS14-8_v2" + // VMSizeTypesStandardDS14V2 ... + VMSizeTypesStandardDS14V2 VMSizeTypes = "Standard_DS14_v2" + // VMSizeTypesStandardDS14V2Promo ... + VMSizeTypesStandardDS14V2Promo VMSizeTypes = "Standard_DS14_v2_Promo" + // VMSizeTypesStandardDS15V2 ... + VMSizeTypesStandardDS15V2 VMSizeTypes = "Standard_DS15_v2" + // VMSizeTypesStandardDS1V2 ... + VMSizeTypesStandardDS1V2 VMSizeTypes = "Standard_DS1_v2" + // VMSizeTypesStandardDS2 ... + VMSizeTypesStandardDS2 VMSizeTypes = "Standard_DS2" + // VMSizeTypesStandardDS2V2 ... + VMSizeTypesStandardDS2V2 VMSizeTypes = "Standard_DS2_v2" + // VMSizeTypesStandardDS2V2Promo ... + VMSizeTypesStandardDS2V2Promo VMSizeTypes = "Standard_DS2_v2_Promo" + // VMSizeTypesStandardDS3 ... + VMSizeTypesStandardDS3 VMSizeTypes = "Standard_DS3" + // VMSizeTypesStandardDS3V2 ... + VMSizeTypesStandardDS3V2 VMSizeTypes = "Standard_DS3_v2" + // VMSizeTypesStandardDS3V2Promo ... + VMSizeTypesStandardDS3V2Promo VMSizeTypes = "Standard_DS3_v2_Promo" + // VMSizeTypesStandardDS4 ... + VMSizeTypesStandardDS4 VMSizeTypes = "Standard_DS4" + // VMSizeTypesStandardDS4V2 ... + VMSizeTypesStandardDS4V2 VMSizeTypes = "Standard_DS4_v2" + // VMSizeTypesStandardDS4V2Promo ... + VMSizeTypesStandardDS4V2Promo VMSizeTypes = "Standard_DS4_v2_Promo" + // VMSizeTypesStandardDS5V2 ... + VMSizeTypesStandardDS5V2 VMSizeTypes = "Standard_DS5_v2" + // VMSizeTypesStandardDS5V2Promo ... + VMSizeTypesStandardDS5V2Promo VMSizeTypes = "Standard_DS5_v2_Promo" + // VMSizeTypesStandardE16sV3 ... + VMSizeTypesStandardE16sV3 VMSizeTypes = "Standard_E16s_v3" + // VMSizeTypesStandardE16V3 ... + VMSizeTypesStandardE16V3 VMSizeTypes = "Standard_E16_v3" + // VMSizeTypesStandardE2sV3 ... + VMSizeTypesStandardE2sV3 VMSizeTypes = "Standard_E2s_v3" + // VMSizeTypesStandardE2V3 ... + VMSizeTypesStandardE2V3 VMSizeTypes = "Standard_E2_v3" + // VMSizeTypesStandardE3216sV3 ... + VMSizeTypesStandardE3216sV3 VMSizeTypes = "Standard_E32-16s_v3" + // VMSizeTypesStandardE328sV3 ... + VMSizeTypesStandardE328sV3 VMSizeTypes = "Standard_E32-8s_v3" + // VMSizeTypesStandardE32sV3 ... + VMSizeTypesStandardE32sV3 VMSizeTypes = "Standard_E32s_v3" + // VMSizeTypesStandardE32V3 ... + VMSizeTypesStandardE32V3 VMSizeTypes = "Standard_E32_v3" + // VMSizeTypesStandardE4sV3 ... + VMSizeTypesStandardE4sV3 VMSizeTypes = "Standard_E4s_v3" + // VMSizeTypesStandardE4V3 ... + VMSizeTypesStandardE4V3 VMSizeTypes = "Standard_E4_v3" + // VMSizeTypesStandardE6416sV3 ... + VMSizeTypesStandardE6416sV3 VMSizeTypes = "Standard_E64-16s_v3" + // VMSizeTypesStandardE6432sV3 ... + VMSizeTypesStandardE6432sV3 VMSizeTypes = "Standard_E64-32s_v3" + // VMSizeTypesStandardE64sV3 ... + VMSizeTypesStandardE64sV3 VMSizeTypes = "Standard_E64s_v3" + // VMSizeTypesStandardE64V3 ... + VMSizeTypesStandardE64V3 VMSizeTypes = "Standard_E64_v3" + // VMSizeTypesStandardE8sV3 ... + VMSizeTypesStandardE8sV3 VMSizeTypes = "Standard_E8s_v3" + // VMSizeTypesStandardE8V3 ... + VMSizeTypesStandardE8V3 VMSizeTypes = "Standard_E8_v3" + // VMSizeTypesStandardF1 ... + VMSizeTypesStandardF1 VMSizeTypes = "Standard_F1" + // VMSizeTypesStandardF16 ... + VMSizeTypesStandardF16 VMSizeTypes = "Standard_F16" + // VMSizeTypesStandardF16s ... + VMSizeTypesStandardF16s VMSizeTypes = "Standard_F16s" + // VMSizeTypesStandardF16sV2 ... + VMSizeTypesStandardF16sV2 VMSizeTypes = "Standard_F16s_v2" + // VMSizeTypesStandardF1s ... + VMSizeTypesStandardF1s VMSizeTypes = "Standard_F1s" + // VMSizeTypesStandardF2 ... + VMSizeTypesStandardF2 VMSizeTypes = "Standard_F2" + // VMSizeTypesStandardF2s ... + VMSizeTypesStandardF2s VMSizeTypes = "Standard_F2s" + // VMSizeTypesStandardF2sV2 ... + VMSizeTypesStandardF2sV2 VMSizeTypes = "Standard_F2s_v2" + // VMSizeTypesStandardF32sV2 ... + VMSizeTypesStandardF32sV2 VMSizeTypes = "Standard_F32s_v2" + // VMSizeTypesStandardF4 ... + VMSizeTypesStandardF4 VMSizeTypes = "Standard_F4" + // VMSizeTypesStandardF4s ... + VMSizeTypesStandardF4s VMSizeTypes = "Standard_F4s" + // VMSizeTypesStandardF4sV2 ... + VMSizeTypesStandardF4sV2 VMSizeTypes = "Standard_F4s_v2" + // VMSizeTypesStandardF64sV2 ... + VMSizeTypesStandardF64sV2 VMSizeTypes = "Standard_F64s_v2" + // VMSizeTypesStandardF72sV2 ... + VMSizeTypesStandardF72sV2 VMSizeTypes = "Standard_F72s_v2" + // VMSizeTypesStandardF8 ... + VMSizeTypesStandardF8 VMSizeTypes = "Standard_F8" + // VMSizeTypesStandardF8s ... + VMSizeTypesStandardF8s VMSizeTypes = "Standard_F8s" + // VMSizeTypesStandardF8sV2 ... + VMSizeTypesStandardF8sV2 VMSizeTypes = "Standard_F8s_v2" + // VMSizeTypesStandardG1 ... + VMSizeTypesStandardG1 VMSizeTypes = "Standard_G1" + // VMSizeTypesStandardG2 ... + VMSizeTypesStandardG2 VMSizeTypes = "Standard_G2" + // VMSizeTypesStandardG3 ... + VMSizeTypesStandardG3 VMSizeTypes = "Standard_G3" + // VMSizeTypesStandardG4 ... + VMSizeTypesStandardG4 VMSizeTypes = "Standard_G4" + // VMSizeTypesStandardG5 ... + VMSizeTypesStandardG5 VMSizeTypes = "Standard_G5" + // VMSizeTypesStandardGS1 ... + VMSizeTypesStandardGS1 VMSizeTypes = "Standard_GS1" + // VMSizeTypesStandardGS2 ... + VMSizeTypesStandardGS2 VMSizeTypes = "Standard_GS2" + // VMSizeTypesStandardGS3 ... + VMSizeTypesStandardGS3 VMSizeTypes = "Standard_GS3" + // VMSizeTypesStandardGS4 ... + VMSizeTypesStandardGS4 VMSizeTypes = "Standard_GS4" + // VMSizeTypesStandardGS44 ... + VMSizeTypesStandardGS44 VMSizeTypes = "Standard_GS4-4" + // VMSizeTypesStandardGS48 ... + VMSizeTypesStandardGS48 VMSizeTypes = "Standard_GS4-8" + // VMSizeTypesStandardGS5 ... + VMSizeTypesStandardGS5 VMSizeTypes = "Standard_GS5" + // VMSizeTypesStandardGS516 ... + VMSizeTypesStandardGS516 VMSizeTypes = "Standard_GS5-16" + // VMSizeTypesStandardGS58 ... + VMSizeTypesStandardGS58 VMSizeTypes = "Standard_GS5-8" + // VMSizeTypesStandardH16 ... + VMSizeTypesStandardH16 VMSizeTypes = "Standard_H16" + // VMSizeTypesStandardH16m ... + VMSizeTypesStandardH16m VMSizeTypes = "Standard_H16m" + // VMSizeTypesStandardH16mr ... + VMSizeTypesStandardH16mr VMSizeTypes = "Standard_H16mr" + // VMSizeTypesStandardH16r ... + VMSizeTypesStandardH16r VMSizeTypes = "Standard_H16r" + // VMSizeTypesStandardH8 ... + VMSizeTypesStandardH8 VMSizeTypes = "Standard_H8" + // VMSizeTypesStandardH8m ... + VMSizeTypesStandardH8m VMSizeTypes = "Standard_H8m" + // VMSizeTypesStandardL16s ... + VMSizeTypesStandardL16s VMSizeTypes = "Standard_L16s" + // VMSizeTypesStandardL32s ... + VMSizeTypesStandardL32s VMSizeTypes = "Standard_L32s" + // VMSizeTypesStandardL4s ... + VMSizeTypesStandardL4s VMSizeTypes = "Standard_L4s" + // VMSizeTypesStandardL8s ... + VMSizeTypesStandardL8s VMSizeTypes = "Standard_L8s" + // VMSizeTypesStandardM12832ms ... + VMSizeTypesStandardM12832ms VMSizeTypes = "Standard_M128-32ms" + // VMSizeTypesStandardM12864ms ... + VMSizeTypesStandardM12864ms VMSizeTypes = "Standard_M128-64ms" + // VMSizeTypesStandardM128ms ... + VMSizeTypesStandardM128ms VMSizeTypes = "Standard_M128ms" + // VMSizeTypesStandardM128s ... + VMSizeTypesStandardM128s VMSizeTypes = "Standard_M128s" + // VMSizeTypesStandardM6416ms ... + VMSizeTypesStandardM6416ms VMSizeTypes = "Standard_M64-16ms" + // VMSizeTypesStandardM6432ms ... + VMSizeTypesStandardM6432ms VMSizeTypes = "Standard_M64-32ms" + // VMSizeTypesStandardM64ms ... + VMSizeTypesStandardM64ms VMSizeTypes = "Standard_M64ms" + // VMSizeTypesStandardM64s ... + VMSizeTypesStandardM64s VMSizeTypes = "Standard_M64s" + // VMSizeTypesStandardNC12 ... + VMSizeTypesStandardNC12 VMSizeTypes = "Standard_NC12" + // VMSizeTypesStandardNC12sV2 ... + VMSizeTypesStandardNC12sV2 VMSizeTypes = "Standard_NC12s_v2" + // VMSizeTypesStandardNC12sV3 ... + VMSizeTypesStandardNC12sV3 VMSizeTypes = "Standard_NC12s_v3" + // VMSizeTypesStandardNC24 ... + VMSizeTypesStandardNC24 VMSizeTypes = "Standard_NC24" + // VMSizeTypesStandardNC24r ... + VMSizeTypesStandardNC24r VMSizeTypes = "Standard_NC24r" + // VMSizeTypesStandardNC24rsV2 ... + VMSizeTypesStandardNC24rsV2 VMSizeTypes = "Standard_NC24rs_v2" + // VMSizeTypesStandardNC24rsV3 ... + VMSizeTypesStandardNC24rsV3 VMSizeTypes = "Standard_NC24rs_v3" + // VMSizeTypesStandardNC24sV2 ... + VMSizeTypesStandardNC24sV2 VMSizeTypes = "Standard_NC24s_v2" + // VMSizeTypesStandardNC24sV3 ... + VMSizeTypesStandardNC24sV3 VMSizeTypes = "Standard_NC24s_v3" + // VMSizeTypesStandardNC6 ... + VMSizeTypesStandardNC6 VMSizeTypes = "Standard_NC6" + // VMSizeTypesStandardNC6sV2 ... + VMSizeTypesStandardNC6sV2 VMSizeTypes = "Standard_NC6s_v2" + // VMSizeTypesStandardNC6sV3 ... + VMSizeTypesStandardNC6sV3 VMSizeTypes = "Standard_NC6s_v3" + // VMSizeTypesStandardND12s ... + VMSizeTypesStandardND12s VMSizeTypes = "Standard_ND12s" + // VMSizeTypesStandardND24rs ... + VMSizeTypesStandardND24rs VMSizeTypes = "Standard_ND24rs" + // VMSizeTypesStandardND24s ... + VMSizeTypesStandardND24s VMSizeTypes = "Standard_ND24s" + // VMSizeTypesStandardND6s ... + VMSizeTypesStandardND6s VMSizeTypes = "Standard_ND6s" + // VMSizeTypesStandardNV12 ... + VMSizeTypesStandardNV12 VMSizeTypes = "Standard_NV12" + // VMSizeTypesStandardNV24 ... + VMSizeTypesStandardNV24 VMSizeTypes = "Standard_NV24" + // VMSizeTypesStandardNV6 ... + VMSizeTypesStandardNV6 VMSizeTypes = "Standard_NV6" +) + +// PossibleVMSizeTypesValues returns an array of possible values for the VMSizeTypes const type. +func PossibleVMSizeTypesValues() []VMSizeTypes { + return []VMSizeTypes{VMSizeTypesStandardA1, VMSizeTypesStandardA10, VMSizeTypesStandardA11, VMSizeTypesStandardA1V2, VMSizeTypesStandardA2, VMSizeTypesStandardA2mV2, VMSizeTypesStandardA2V2, VMSizeTypesStandardA3, VMSizeTypesStandardA4, VMSizeTypesStandardA4mV2, VMSizeTypesStandardA4V2, VMSizeTypesStandardA5, VMSizeTypesStandardA6, VMSizeTypesStandardA7, VMSizeTypesStandardA8, VMSizeTypesStandardA8mV2, VMSizeTypesStandardA8V2, VMSizeTypesStandardA9, VMSizeTypesStandardB2ms, VMSizeTypesStandardB2s, VMSizeTypesStandardB4ms, VMSizeTypesStandardB8ms, VMSizeTypesStandardD1, VMSizeTypesStandardD11, VMSizeTypesStandardD11V2, VMSizeTypesStandardD11V2Promo, VMSizeTypesStandardD12, VMSizeTypesStandardD12V2, VMSizeTypesStandardD12V2Promo, VMSizeTypesStandardD13, VMSizeTypesStandardD13V2, VMSizeTypesStandardD13V2Promo, VMSizeTypesStandardD14, VMSizeTypesStandardD14V2, VMSizeTypesStandardD14V2Promo, VMSizeTypesStandardD15V2, VMSizeTypesStandardD16sV3, VMSizeTypesStandardD16V3, VMSizeTypesStandardD1V2, VMSizeTypesStandardD2, VMSizeTypesStandardD2sV3, VMSizeTypesStandardD2V2, VMSizeTypesStandardD2V2Promo, VMSizeTypesStandardD2V3, VMSizeTypesStandardD3, VMSizeTypesStandardD32sV3, VMSizeTypesStandardD32V3, VMSizeTypesStandardD3V2, VMSizeTypesStandardD3V2Promo, VMSizeTypesStandardD4, VMSizeTypesStandardD4sV3, VMSizeTypesStandardD4V2, VMSizeTypesStandardD4V2Promo, VMSizeTypesStandardD4V3, VMSizeTypesStandardD5V2, VMSizeTypesStandardD5V2Promo, VMSizeTypesStandardD64sV3, VMSizeTypesStandardD64V3, VMSizeTypesStandardD8sV3, VMSizeTypesStandardD8V3, VMSizeTypesStandardDS1, VMSizeTypesStandardDS11, VMSizeTypesStandardDS11V2, VMSizeTypesStandardDS11V2Promo, VMSizeTypesStandardDS12, VMSizeTypesStandardDS12V2, VMSizeTypesStandardDS12V2Promo, VMSizeTypesStandardDS13, VMSizeTypesStandardDS132V2, VMSizeTypesStandardDS134V2, VMSizeTypesStandardDS13V2, VMSizeTypesStandardDS13V2Promo, VMSizeTypesStandardDS14, VMSizeTypesStandardDS144V2, VMSizeTypesStandardDS148V2, VMSizeTypesStandardDS14V2, VMSizeTypesStandardDS14V2Promo, VMSizeTypesStandardDS15V2, VMSizeTypesStandardDS1V2, VMSizeTypesStandardDS2, VMSizeTypesStandardDS2V2, VMSizeTypesStandardDS2V2Promo, VMSizeTypesStandardDS3, VMSizeTypesStandardDS3V2, VMSizeTypesStandardDS3V2Promo, VMSizeTypesStandardDS4, VMSizeTypesStandardDS4V2, VMSizeTypesStandardDS4V2Promo, VMSizeTypesStandardDS5V2, VMSizeTypesStandardDS5V2Promo, VMSizeTypesStandardE16sV3, VMSizeTypesStandardE16V3, VMSizeTypesStandardE2sV3, VMSizeTypesStandardE2V3, VMSizeTypesStandardE3216sV3, VMSizeTypesStandardE328sV3, VMSizeTypesStandardE32sV3, VMSizeTypesStandardE32V3, VMSizeTypesStandardE4sV3, VMSizeTypesStandardE4V3, VMSizeTypesStandardE6416sV3, VMSizeTypesStandardE6432sV3, VMSizeTypesStandardE64sV3, VMSizeTypesStandardE64V3, VMSizeTypesStandardE8sV3, VMSizeTypesStandardE8V3, VMSizeTypesStandardF1, VMSizeTypesStandardF16, VMSizeTypesStandardF16s, VMSizeTypesStandardF16sV2, VMSizeTypesStandardF1s, VMSizeTypesStandardF2, VMSizeTypesStandardF2s, VMSizeTypesStandardF2sV2, VMSizeTypesStandardF32sV2, VMSizeTypesStandardF4, VMSizeTypesStandardF4s, VMSizeTypesStandardF4sV2, VMSizeTypesStandardF64sV2, VMSizeTypesStandardF72sV2, VMSizeTypesStandardF8, VMSizeTypesStandardF8s, VMSizeTypesStandardF8sV2, VMSizeTypesStandardG1, VMSizeTypesStandardG2, VMSizeTypesStandardG3, VMSizeTypesStandardG4, VMSizeTypesStandardG5, VMSizeTypesStandardGS1, VMSizeTypesStandardGS2, VMSizeTypesStandardGS3, VMSizeTypesStandardGS4, VMSizeTypesStandardGS44, VMSizeTypesStandardGS48, VMSizeTypesStandardGS5, VMSizeTypesStandardGS516, VMSizeTypesStandardGS58, VMSizeTypesStandardH16, VMSizeTypesStandardH16m, VMSizeTypesStandardH16mr, VMSizeTypesStandardH16r, VMSizeTypesStandardH8, VMSizeTypesStandardH8m, VMSizeTypesStandardL16s, VMSizeTypesStandardL32s, VMSizeTypesStandardL4s, VMSizeTypesStandardL8s, VMSizeTypesStandardM12832ms, VMSizeTypesStandardM12864ms, VMSizeTypesStandardM128ms, VMSizeTypesStandardM128s, VMSizeTypesStandardM6416ms, VMSizeTypesStandardM6432ms, VMSizeTypesStandardM64ms, VMSizeTypesStandardM64s, VMSizeTypesStandardNC12, VMSizeTypesStandardNC12sV2, VMSizeTypesStandardNC12sV3, VMSizeTypesStandardNC24, VMSizeTypesStandardNC24r, VMSizeTypesStandardNC24rsV2, VMSizeTypesStandardNC24rsV3, VMSizeTypesStandardNC24sV2, VMSizeTypesStandardNC24sV3, VMSizeTypesStandardNC6, VMSizeTypesStandardNC6sV2, VMSizeTypesStandardNC6sV3, VMSizeTypesStandardND12s, VMSizeTypesStandardND24rs, VMSizeTypesStandardND24s, VMSizeTypesStandardND6s, VMSizeTypesStandardNV12, VMSizeTypesStandardNV24, VMSizeTypesStandardNV6} +} + +// AccessProfile profile for enabling a user to access a managed cluster. +type AccessProfile struct { + // KubeConfig - Base64-encoded Kubernetes configuration file. + KubeConfig *[]byte `json:"kubeConfig,omitempty"` +} + +// AgentPool agent Pool. +type AgentPool struct { + autorest.Response `json:"-"` + // ManagedClusterAgentPoolProfileProperties - Properties of an agent pool. + *ManagedClusterAgentPoolProfileProperties `json:"properties,omitempty"` + // ID - READ-ONLY; Resource ID. + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; The name of the resource that is unique within a resource group. This name can be used to access the resource. + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; Resource type + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for AgentPool. +func (ap AgentPool) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if ap.ManagedClusterAgentPoolProfileProperties != nil { + objectMap["properties"] = ap.ManagedClusterAgentPoolProfileProperties + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for AgentPool struct. +func (ap *AgentPool) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var managedClusterAgentPoolProfileProperties ManagedClusterAgentPoolProfileProperties + err = json.Unmarshal(*v, &managedClusterAgentPoolProfileProperties) + if err != nil { + return err + } + ap.ManagedClusterAgentPoolProfileProperties = &managedClusterAgentPoolProfileProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + ap.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + ap.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + ap.Type = &typeVar + } + } + } + + return nil +} + +// AgentPoolAvailableVersions the list of available versions for an agent pool. +type AgentPoolAvailableVersions struct { + autorest.Response `json:"-"` + // ID - READ-ONLY; Id of the agent pool available versions. + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; Name of the agent pool available versions. + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; Type of the agent pool available versions. + Type *string `json:"type,omitempty"` + // AgentPoolAvailableVersionsProperties - Properties of agent pool available versions. + *AgentPoolAvailableVersionsProperties `json:"properties,omitempty"` +} + +// MarshalJSON is the custom marshaler for AgentPoolAvailableVersions. +func (apav AgentPoolAvailableVersions) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if apav.AgentPoolAvailableVersionsProperties != nil { + objectMap["properties"] = apav.AgentPoolAvailableVersionsProperties + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for AgentPoolAvailableVersions struct. +func (apav *AgentPoolAvailableVersions) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + apav.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + apav.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + apav.Type = &typeVar + } + case "properties": + if v != nil { + var agentPoolAvailableVersionsProperties AgentPoolAvailableVersionsProperties + err = json.Unmarshal(*v, &agentPoolAvailableVersionsProperties) + if err != nil { + return err + } + apav.AgentPoolAvailableVersionsProperties = &agentPoolAvailableVersionsProperties + } + } + } + + return nil +} + +// AgentPoolAvailableVersionsProperties the list of available agent pool versions. +type AgentPoolAvailableVersionsProperties struct { + // AgentPoolVersions - List of versions available for agent pool. + AgentPoolVersions *[]AgentPoolAvailableVersionsPropertiesAgentPoolVersionsItem `json:"agentPoolVersions,omitempty"` +} + +// AgentPoolAvailableVersionsPropertiesAgentPoolVersionsItem ... +type AgentPoolAvailableVersionsPropertiesAgentPoolVersionsItem struct { + // Default - Whether this version is the default agent pool version. + Default *bool `json:"default,omitempty"` + // KubernetesVersion - Kubernetes version (major, minor, patch). + KubernetesVersion *string `json:"kubernetesVersion,omitempty"` + // IsPreview - Whether Kubernetes version is currently in preview. + IsPreview *bool `json:"isPreview,omitempty"` +} + +// AgentPoolListResult the response from the List Agent Pools operation. +type AgentPoolListResult struct { + autorest.Response `json:"-"` + // Value - The list of agent pools. + Value *[]AgentPool `json:"value,omitempty"` + // NextLink - READ-ONLY; The URL to get the next set of agent pool results. + NextLink *string `json:"nextLink,omitempty"` +} + +// AgentPoolListResultIterator provides access to a complete listing of AgentPool values. +type AgentPoolListResultIterator struct { + i int + page AgentPoolListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *AgentPoolListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AgentPoolListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *AgentPoolListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter AgentPoolListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter AgentPoolListResultIterator) Response() AgentPoolListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter AgentPoolListResultIterator) Value() AgentPool { + if !iter.page.NotDone() { + return AgentPool{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the AgentPoolListResultIterator type. +func NewAgentPoolListResultIterator(page AgentPoolListResultPage) AgentPoolListResultIterator { + return AgentPoolListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (aplr AgentPoolListResult) IsEmpty() bool { + return aplr.Value == nil || len(*aplr.Value) == 0 +} + +// agentPoolListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (aplr AgentPoolListResult) agentPoolListResultPreparer(ctx context.Context) (*http.Request, error) { + if aplr.NextLink == nil || len(to.String(aplr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(aplr.NextLink))) +} + +// AgentPoolListResultPage contains a page of AgentPool values. +type AgentPoolListResultPage struct { + fn func(context.Context, AgentPoolListResult) (AgentPoolListResult, error) + aplr AgentPoolListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *AgentPoolListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AgentPoolListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.aplr) + if err != nil { + return err + } + page.aplr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *AgentPoolListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page AgentPoolListResultPage) NotDone() bool { + return !page.aplr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page AgentPoolListResultPage) Response() AgentPoolListResult { + return page.aplr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page AgentPoolListResultPage) Values() []AgentPool { + if page.aplr.IsEmpty() { + return nil + } + return *page.aplr.Value +} + +// Creates a new instance of the AgentPoolListResultPage type. +func NewAgentPoolListResultPage(getNextPage func(context.Context, AgentPoolListResult) (AgentPoolListResult, error)) AgentPoolListResultPage { + return AgentPoolListResultPage{fn: getNextPage} +} + +// AgentPoolProfile profile for the container service agent pool. +type AgentPoolProfile struct { + // Name - Unique name of the agent pool profile in the context of the subscription and resource group. + Name *string `json:"name,omitempty"` + // Count - Number of agents (VMs) to host docker containers. Allowed values must be in the range of 1 to 100 (inclusive). The default value is 1. + Count *int32 `json:"count,omitempty"` + // VMSize - Size of agent VMs. Possible values include: 'VMSizeTypesStandardA1', 'VMSizeTypesStandardA10', 'VMSizeTypesStandardA11', 'VMSizeTypesStandardA1V2', 'VMSizeTypesStandardA2', 'VMSizeTypesStandardA2V2', 'VMSizeTypesStandardA2mV2', 'VMSizeTypesStandardA3', 'VMSizeTypesStandardA4', 'VMSizeTypesStandardA4V2', 'VMSizeTypesStandardA4mV2', 'VMSizeTypesStandardA5', 'VMSizeTypesStandardA6', 'VMSizeTypesStandardA7', 'VMSizeTypesStandardA8', 'VMSizeTypesStandardA8V2', 'VMSizeTypesStandardA8mV2', 'VMSizeTypesStandardA9', 'VMSizeTypesStandardB2ms', 'VMSizeTypesStandardB2s', 'VMSizeTypesStandardB4ms', 'VMSizeTypesStandardB8ms', 'VMSizeTypesStandardD1', 'VMSizeTypesStandardD11', 'VMSizeTypesStandardD11V2', 'VMSizeTypesStandardD11V2Promo', 'VMSizeTypesStandardD12', 'VMSizeTypesStandardD12V2', 'VMSizeTypesStandardD12V2Promo', 'VMSizeTypesStandardD13', 'VMSizeTypesStandardD13V2', 'VMSizeTypesStandardD13V2Promo', 'VMSizeTypesStandardD14', 'VMSizeTypesStandardD14V2', 'VMSizeTypesStandardD14V2Promo', 'VMSizeTypesStandardD15V2', 'VMSizeTypesStandardD16V3', 'VMSizeTypesStandardD16sV3', 'VMSizeTypesStandardD1V2', 'VMSizeTypesStandardD2', 'VMSizeTypesStandardD2V2', 'VMSizeTypesStandardD2V2Promo', 'VMSizeTypesStandardD2V3', 'VMSizeTypesStandardD2sV3', 'VMSizeTypesStandardD3', 'VMSizeTypesStandardD32V3', 'VMSizeTypesStandardD32sV3', 'VMSizeTypesStandardD3V2', 'VMSizeTypesStandardD3V2Promo', 'VMSizeTypesStandardD4', 'VMSizeTypesStandardD4V2', 'VMSizeTypesStandardD4V2Promo', 'VMSizeTypesStandardD4V3', 'VMSizeTypesStandardD4sV3', 'VMSizeTypesStandardD5V2', 'VMSizeTypesStandardD5V2Promo', 'VMSizeTypesStandardD64V3', 'VMSizeTypesStandardD64sV3', 'VMSizeTypesStandardD8V3', 'VMSizeTypesStandardD8sV3', 'VMSizeTypesStandardDS1', 'VMSizeTypesStandardDS11', 'VMSizeTypesStandardDS11V2', 'VMSizeTypesStandardDS11V2Promo', 'VMSizeTypesStandardDS12', 'VMSizeTypesStandardDS12V2', 'VMSizeTypesStandardDS12V2Promo', 'VMSizeTypesStandardDS13', 'VMSizeTypesStandardDS132V2', 'VMSizeTypesStandardDS134V2', 'VMSizeTypesStandardDS13V2', 'VMSizeTypesStandardDS13V2Promo', 'VMSizeTypesStandardDS14', 'VMSizeTypesStandardDS144V2', 'VMSizeTypesStandardDS148V2', 'VMSizeTypesStandardDS14V2', 'VMSizeTypesStandardDS14V2Promo', 'VMSizeTypesStandardDS15V2', 'VMSizeTypesStandardDS1V2', 'VMSizeTypesStandardDS2', 'VMSizeTypesStandardDS2V2', 'VMSizeTypesStandardDS2V2Promo', 'VMSizeTypesStandardDS3', 'VMSizeTypesStandardDS3V2', 'VMSizeTypesStandardDS3V2Promo', 'VMSizeTypesStandardDS4', 'VMSizeTypesStandardDS4V2', 'VMSizeTypesStandardDS4V2Promo', 'VMSizeTypesStandardDS5V2', 'VMSizeTypesStandardDS5V2Promo', 'VMSizeTypesStandardE16V3', 'VMSizeTypesStandardE16sV3', 'VMSizeTypesStandardE2V3', 'VMSizeTypesStandardE2sV3', 'VMSizeTypesStandardE3216sV3', 'VMSizeTypesStandardE328sV3', 'VMSizeTypesStandardE32V3', 'VMSizeTypesStandardE32sV3', 'VMSizeTypesStandardE4V3', 'VMSizeTypesStandardE4sV3', 'VMSizeTypesStandardE6416sV3', 'VMSizeTypesStandardE6432sV3', 'VMSizeTypesStandardE64V3', 'VMSizeTypesStandardE64sV3', 'VMSizeTypesStandardE8V3', 'VMSizeTypesStandardE8sV3', 'VMSizeTypesStandardF1', 'VMSizeTypesStandardF16', 'VMSizeTypesStandardF16s', 'VMSizeTypesStandardF16sV2', 'VMSizeTypesStandardF1s', 'VMSizeTypesStandardF2', 'VMSizeTypesStandardF2s', 'VMSizeTypesStandardF2sV2', 'VMSizeTypesStandardF32sV2', 'VMSizeTypesStandardF4', 'VMSizeTypesStandardF4s', 'VMSizeTypesStandardF4sV2', 'VMSizeTypesStandardF64sV2', 'VMSizeTypesStandardF72sV2', 'VMSizeTypesStandardF8', 'VMSizeTypesStandardF8s', 'VMSizeTypesStandardF8sV2', 'VMSizeTypesStandardG1', 'VMSizeTypesStandardG2', 'VMSizeTypesStandardG3', 'VMSizeTypesStandardG4', 'VMSizeTypesStandardG5', 'VMSizeTypesStandardGS1', 'VMSizeTypesStandardGS2', 'VMSizeTypesStandardGS3', 'VMSizeTypesStandardGS4', 'VMSizeTypesStandardGS44', 'VMSizeTypesStandardGS48', 'VMSizeTypesStandardGS5', 'VMSizeTypesStandardGS516', 'VMSizeTypesStandardGS58', 'VMSizeTypesStandardH16', 'VMSizeTypesStandardH16m', 'VMSizeTypesStandardH16mr', 'VMSizeTypesStandardH16r', 'VMSizeTypesStandardH8', 'VMSizeTypesStandardH8m', 'VMSizeTypesStandardL16s', 'VMSizeTypesStandardL32s', 'VMSizeTypesStandardL4s', 'VMSizeTypesStandardL8s', 'VMSizeTypesStandardM12832ms', 'VMSizeTypesStandardM12864ms', 'VMSizeTypesStandardM128ms', 'VMSizeTypesStandardM128s', 'VMSizeTypesStandardM6416ms', 'VMSizeTypesStandardM6432ms', 'VMSizeTypesStandardM64ms', 'VMSizeTypesStandardM64s', 'VMSizeTypesStandardNC12', 'VMSizeTypesStandardNC12sV2', 'VMSizeTypesStandardNC12sV3', 'VMSizeTypesStandardNC24', 'VMSizeTypesStandardNC24r', 'VMSizeTypesStandardNC24rsV2', 'VMSizeTypesStandardNC24rsV3', 'VMSizeTypesStandardNC24sV2', 'VMSizeTypesStandardNC24sV3', 'VMSizeTypesStandardNC6', 'VMSizeTypesStandardNC6sV2', 'VMSizeTypesStandardNC6sV3', 'VMSizeTypesStandardND12s', 'VMSizeTypesStandardND24rs', 'VMSizeTypesStandardND24s', 'VMSizeTypesStandardND6s', 'VMSizeTypesStandardNV12', 'VMSizeTypesStandardNV24', 'VMSizeTypesStandardNV6' + VMSize VMSizeTypes `json:"vmSize,omitempty"` + // OsDiskSizeGB - OS Disk Size in GB to be used to specify the disk size for every machine in this master/agent pool. If you specify 0, it will apply the default osDisk size according to the vmSize specified. + OsDiskSizeGB *int32 `json:"osDiskSizeGB,omitempty"` + // DNSPrefix - DNS prefix to be used to create the FQDN for the agent pool. + DNSPrefix *string `json:"dnsPrefix,omitempty"` + // Fqdn - READ-ONLY; FQDN for the agent pool. + Fqdn *string `json:"fqdn,omitempty"` + // Ports - Ports number array used to expose on this agent pool. The default opened ports are different based on your choice of orchestrator. + Ports *[]int32 `json:"ports,omitempty"` + // StorageProfile - Storage profile specifies what kind of storage used. Choose from StorageAccount and ManagedDisks. Leave it empty, we will choose for you based on the orchestrator choice. Possible values include: 'StorageAccount', 'ManagedDisks' + StorageProfile StorageProfileTypes `json:"storageProfile,omitempty"` + // VnetSubnetID - VNet SubnetID specifies the VNet's subnet identifier. + VnetSubnetID *string `json:"vnetSubnetID,omitempty"` + // OsType - OsType to be used to specify os type. Choose from Linux and Windows. Default to Linux. Possible values include: 'Linux', 'Windows' + OsType OSType `json:"osType,omitempty"` +} + +// AgentPoolsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type AgentPoolsCreateOrUpdateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *AgentPoolsCreateOrUpdateFuture) Result(client AgentPoolsClient) (ap AgentPool, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.AgentPoolsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("containerservice.AgentPoolsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if ap.Response.Response, err = future.GetResult(sender); err == nil && ap.Response.Response.StatusCode != http.StatusNoContent { + ap, err = client.CreateOrUpdateResponder(ap.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.AgentPoolsCreateOrUpdateFuture", "Result", ap.Response.Response, "Failure responding to request") + } + } + return +} + +// AgentPoolsDeleteFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type AgentPoolsDeleteFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *AgentPoolsDeleteFuture) Result(client AgentPoolsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.AgentPoolsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("containerservice.AgentPoolsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + +// AgentPoolUpgradeProfile the list of available upgrades for an agent pool. +type AgentPoolUpgradeProfile struct { + autorest.Response `json:"-"` + // ID - READ-ONLY; Id of the agent pool upgrade profile. + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; Name of the agent pool upgrade profile. + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; Type of the agent pool upgrade profile. + Type *string `json:"type,omitempty"` + // AgentPoolUpgradeProfileProperties - Properties of agent pool upgrade profile. + *AgentPoolUpgradeProfileProperties `json:"properties,omitempty"` +} + +// MarshalJSON is the custom marshaler for AgentPoolUpgradeProfile. +func (apup AgentPoolUpgradeProfile) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if apup.AgentPoolUpgradeProfileProperties != nil { + objectMap["properties"] = apup.AgentPoolUpgradeProfileProperties + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for AgentPoolUpgradeProfile struct. +func (apup *AgentPoolUpgradeProfile) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + apup.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + apup.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + apup.Type = &typeVar + } + case "properties": + if v != nil { + var agentPoolUpgradeProfileProperties AgentPoolUpgradeProfileProperties + err = json.Unmarshal(*v, &agentPoolUpgradeProfileProperties) + if err != nil { + return err + } + apup.AgentPoolUpgradeProfileProperties = &agentPoolUpgradeProfileProperties + } + } + } + + return nil +} + +// AgentPoolUpgradeProfileProperties the list of available upgrade versions. +type AgentPoolUpgradeProfileProperties struct { + // KubernetesVersion - Kubernetes version (major, minor, patch). + KubernetesVersion *string `json:"kubernetesVersion,omitempty"` + // OsType - OsType to be used to specify os type. Choose from Linux and Windows. Default to Linux. Possible values include: 'Linux', 'Windows' + OsType OSType `json:"osType,omitempty"` + // Upgrades - List of orchestrator types and versions available for upgrade. + Upgrades *[]AgentPoolUpgradeProfilePropertiesUpgradesItem `json:"upgrades,omitempty"` +} + +// AgentPoolUpgradeProfilePropertiesUpgradesItem ... +type AgentPoolUpgradeProfilePropertiesUpgradesItem struct { + // KubernetesVersion - Kubernetes version (major, minor, patch). + KubernetesVersion *string `json:"kubernetesVersion,omitempty"` + // IsPreview - Whether Kubernetes version is currently in preview. + IsPreview *bool `json:"isPreview,omitempty"` +} + +// CloudError an error response from the Container service. +type CloudError struct { + // Error - Details about the error. + Error *CloudErrorBody `json:"error,omitempty"` +} + +// CloudErrorBody an error response from the Container service. +type CloudErrorBody struct { + // Code - An identifier for the error. Codes are invariant and are intended to be consumed programmatically. + Code *string `json:"code,omitempty"` + // Message - A message describing the error, intended to be suitable for display in a user interface. + Message *string `json:"message,omitempty"` + // Target - The target of the particular error. For example, the name of the property in error. + Target *string `json:"target,omitempty"` + // Details - A list of additional details about the error. + Details *[]CloudErrorBody `json:"details,omitempty"` +} + +// ContainerService container service. +type ContainerService struct { + autorest.Response `json:"-"` + // Properties - Properties of the container service. + *Properties `json:"properties,omitempty"` + // ID - READ-ONLY; Resource Id + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; Resource name + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; Resource type + Type *string `json:"type,omitempty"` + // Location - Resource location + Location *string `json:"location,omitempty"` + // Tags - Resource tags + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for ContainerService. +func (cs ContainerService) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if cs.Properties != nil { + objectMap["properties"] = cs.Properties + } + if cs.Location != nil { + objectMap["location"] = cs.Location + } + if cs.Tags != nil { + objectMap["tags"] = cs.Tags + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for ContainerService struct. +func (cs *ContainerService) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var properties Properties + err = json.Unmarshal(*v, &properties) + if err != nil { + return err + } + cs.Properties = &properties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + cs.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + cs.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + cs.Type = &typeVar + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + cs.Location = &location + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + cs.Tags = tags + } + } + } + + return nil +} + +// ContainerServicesCreateOrUpdateFutureType an abstraction for monitoring and retrieving the results of a +// long-running operation. +type ContainerServicesCreateOrUpdateFutureType struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *ContainerServicesCreateOrUpdateFutureType) Result(client ContainerServicesClient) (cs ContainerService, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.ContainerServicesCreateOrUpdateFutureType", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("containerservice.ContainerServicesCreateOrUpdateFutureType") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if cs.Response.Response, err = future.GetResult(sender); err == nil && cs.Response.Response.StatusCode != http.StatusNoContent { + cs, err = client.CreateOrUpdateResponder(cs.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.ContainerServicesCreateOrUpdateFutureType", "Result", cs.Response.Response, "Failure responding to request") + } + } + return +} + +// ContainerServicesDeleteFutureType an abstraction for monitoring and retrieving the results of a +// long-running operation. +type ContainerServicesDeleteFutureType struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *ContainerServicesDeleteFutureType) Result(client ContainerServicesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.ContainerServicesDeleteFutureType", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("containerservice.ContainerServicesDeleteFutureType") + return + } + ar.Response = future.Response() + return +} + +// CredentialResult the credential result response. +type CredentialResult struct { + // Name - READ-ONLY; The name of the credential. + Name *string `json:"name,omitempty"` + // Value - READ-ONLY; Base64-encoded Kubernetes configuration file. + Value *[]byte `json:"value,omitempty"` +} + +// CredentialResults the list of credential result response. +type CredentialResults struct { + autorest.Response `json:"-"` + // Kubeconfigs - READ-ONLY; Base64-encoded Kubernetes configuration file. + Kubeconfigs *[]CredentialResult `json:"kubeconfigs,omitempty"` +} + +// CustomProfile properties to configure a custom container service cluster. +type CustomProfile struct { + // Orchestrator - The name of the custom orchestrator to use. + Orchestrator *string `json:"orchestrator,omitempty"` +} + +// DiagnosticsProfile profile for diagnostics on the container service cluster. +type DiagnosticsProfile struct { + // VMDiagnostics - Profile for diagnostics on the container service VMs. + VMDiagnostics *VMDiagnostics `json:"vmDiagnostics,omitempty"` +} + +// KeyVaultSecretRef reference to a secret stored in Azure Key Vault. +type KeyVaultSecretRef struct { + // VaultID - Key vault identifier. + VaultID *string `json:"vaultID,omitempty"` + // SecretName - The secret name. + SecretName *string `json:"secretName,omitempty"` + // Version - The secret version. + Version *string `json:"version,omitempty"` +} + +// LinuxProfile profile for Linux VMs in the container service cluster. +type LinuxProfile struct { + // AdminUsername - The administrator username to use for Linux VMs. + AdminUsername *string `json:"adminUsername,omitempty"` + // SSH - SSH configuration for Linux-based VMs running on Azure. + SSH *SSHConfiguration `json:"ssh,omitempty"` +} + +// ListResult the response from the List Container Services operation. +type ListResult struct { + autorest.Response `json:"-"` + // Value - The list of container services. + Value *[]ContainerService `json:"value,omitempty"` + // NextLink - READ-ONLY; The URL to get the next set of container service results. + NextLink *string `json:"nextLink,omitempty"` +} + +// ListResultIterator provides access to a complete listing of ContainerService values. +type ListResultIterator struct { + i int + page ListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *ListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *ListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter ListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter ListResultIterator) Response() ListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter ListResultIterator) Value() ContainerService { + if !iter.page.NotDone() { + return ContainerService{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the ListResultIterator type. +func NewListResultIterator(page ListResultPage) ListResultIterator { + return ListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (lr ListResult) IsEmpty() bool { + return lr.Value == nil || len(*lr.Value) == 0 +} + +// listResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (lr ListResult) listResultPreparer(ctx context.Context) (*http.Request, error) { + if lr.NextLink == nil || len(to.String(lr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(lr.NextLink))) +} + +// ListResultPage contains a page of ContainerService values. +type ListResultPage struct { + fn func(context.Context, ListResult) (ListResult, error) + lr ListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *ListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.lr) + if err != nil { + return err + } + page.lr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *ListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page ListResultPage) NotDone() bool { + return !page.lr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page ListResultPage) Response() ListResult { + return page.lr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page ListResultPage) Values() []ContainerService { + if page.lr.IsEmpty() { + return nil + } + return *page.lr.Value +} + +// Creates a new instance of the ListResultPage type. +func NewListResultPage(getNextPage func(context.Context, ListResult) (ListResult, error)) ListResultPage { + return ListResultPage{fn: getNextPage} +} + +// ManagedCluster managed cluster. +type ManagedCluster struct { + autorest.Response `json:"-"` + // ManagedClusterProperties - Properties of a managed cluster. + *ManagedClusterProperties `json:"properties,omitempty"` + // Identity - The identity of the managed cluster, if configured. + Identity *ManagedClusterIdentity `json:"identity,omitempty"` + // ID - READ-ONLY; Resource Id + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; Resource name + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; Resource type + Type *string `json:"type,omitempty"` + // Location - Resource location + Location *string `json:"location,omitempty"` + // Tags - Resource tags + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for ManagedCluster. +func (mc ManagedCluster) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if mc.ManagedClusterProperties != nil { + objectMap["properties"] = mc.ManagedClusterProperties + } + if mc.Identity != nil { + objectMap["identity"] = mc.Identity + } + if mc.Location != nil { + objectMap["location"] = mc.Location + } + if mc.Tags != nil { + objectMap["tags"] = mc.Tags + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for ManagedCluster struct. +func (mc *ManagedCluster) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var managedClusterProperties ManagedClusterProperties + err = json.Unmarshal(*v, &managedClusterProperties) + if err != nil { + return err + } + mc.ManagedClusterProperties = &managedClusterProperties + } + case "identity": + if v != nil { + var identity ManagedClusterIdentity + err = json.Unmarshal(*v, &identity) + if err != nil { + return err + } + mc.Identity = &identity + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + mc.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + mc.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + mc.Type = &typeVar + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + mc.Location = &location + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + mc.Tags = tags + } + } + } + + return nil +} + +// ManagedClusterAADProfile aADProfile specifies attributes for Azure Active Directory integration. +type ManagedClusterAADProfile struct { + // ClientAppID - The client AAD application ID. + ClientAppID *string `json:"clientAppID,omitempty"` + // ServerAppID - The server AAD application ID. + ServerAppID *string `json:"serverAppID,omitempty"` + // ServerAppSecret - The server AAD application secret. + ServerAppSecret *string `json:"serverAppSecret,omitempty"` + // TenantID - The AAD tenant ID to use for authentication. If not specified, will use the tenant of the deployment subscription. + TenantID *string `json:"tenantID,omitempty"` +} + +// ManagedClusterAccessProfile managed cluster Access Profile. +type ManagedClusterAccessProfile struct { + autorest.Response `json:"-"` + // AccessProfile - AccessProfile of a managed cluster. + *AccessProfile `json:"properties,omitempty"` + // ID - READ-ONLY; Resource Id + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; Resource name + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; Resource type + Type *string `json:"type,omitempty"` + // Location - Resource location + Location *string `json:"location,omitempty"` + // Tags - Resource tags + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for ManagedClusterAccessProfile. +func (mcap ManagedClusterAccessProfile) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if mcap.AccessProfile != nil { + objectMap["properties"] = mcap.AccessProfile + } + if mcap.Location != nil { + objectMap["location"] = mcap.Location + } + if mcap.Tags != nil { + objectMap["tags"] = mcap.Tags + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for ManagedClusterAccessProfile struct. +func (mcap *ManagedClusterAccessProfile) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var accessProfile AccessProfile + err = json.Unmarshal(*v, &accessProfile) + if err != nil { + return err + } + mcap.AccessProfile = &accessProfile + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + mcap.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + mcap.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + mcap.Type = &typeVar + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + mcap.Location = &location + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + mcap.Tags = tags + } + } + } + + return nil +} + +// ManagedClusterAddonProfile a Kubernetes add-on profile for a managed cluster. +type ManagedClusterAddonProfile struct { + // Enabled - Whether the add-on is enabled or not. + Enabled *bool `json:"enabled,omitempty"` + // Config - Key-value pairs for configuring an add-on. + Config map[string]*string `json:"config"` + // Identity - READ-ONLY; Information of user assigned identity used by this add-on. + Identity *ManagedClusterAddonProfileIdentity `json:"identity,omitempty"` +} + +// MarshalJSON is the custom marshaler for ManagedClusterAddonProfile. +func (mcap ManagedClusterAddonProfile) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if mcap.Enabled != nil { + objectMap["enabled"] = mcap.Enabled + } + if mcap.Config != nil { + objectMap["config"] = mcap.Config + } + return json.Marshal(objectMap) +} + +// ManagedClusterAddonProfileIdentity information of user assigned identity used by this add-on. +type ManagedClusterAddonProfileIdentity struct { + // ResourceID - The resource id of the user assigned identity. + ResourceID *string `json:"resourceId,omitempty"` + // ClientID - The client id of the user assigned identity. + ClientID *string `json:"clientId,omitempty"` + // ObjectID - The object id of the user assigned identity. + ObjectID *string `json:"objectId,omitempty"` +} + +// ManagedClusterAgentPoolProfile profile for the container service agent pool. +type ManagedClusterAgentPoolProfile struct { + // Name - Unique name of the agent pool profile in the context of the subscription and resource group. + Name *string `json:"name,omitempty"` + // Count - Number of agents (VMs) to host docker containers. Allowed values must be in the range of 1 to 100 (inclusive). The default value is 1. + Count *int32 `json:"count,omitempty"` + // VMSize - Size of agent VMs. Possible values include: 'VMSizeTypesStandardA1', 'VMSizeTypesStandardA10', 'VMSizeTypesStandardA11', 'VMSizeTypesStandardA1V2', 'VMSizeTypesStandardA2', 'VMSizeTypesStandardA2V2', 'VMSizeTypesStandardA2mV2', 'VMSizeTypesStandardA3', 'VMSizeTypesStandardA4', 'VMSizeTypesStandardA4V2', 'VMSizeTypesStandardA4mV2', 'VMSizeTypesStandardA5', 'VMSizeTypesStandardA6', 'VMSizeTypesStandardA7', 'VMSizeTypesStandardA8', 'VMSizeTypesStandardA8V2', 'VMSizeTypesStandardA8mV2', 'VMSizeTypesStandardA9', 'VMSizeTypesStandardB2ms', 'VMSizeTypesStandardB2s', 'VMSizeTypesStandardB4ms', 'VMSizeTypesStandardB8ms', 'VMSizeTypesStandardD1', 'VMSizeTypesStandardD11', 'VMSizeTypesStandardD11V2', 'VMSizeTypesStandardD11V2Promo', 'VMSizeTypesStandardD12', 'VMSizeTypesStandardD12V2', 'VMSizeTypesStandardD12V2Promo', 'VMSizeTypesStandardD13', 'VMSizeTypesStandardD13V2', 'VMSizeTypesStandardD13V2Promo', 'VMSizeTypesStandardD14', 'VMSizeTypesStandardD14V2', 'VMSizeTypesStandardD14V2Promo', 'VMSizeTypesStandardD15V2', 'VMSizeTypesStandardD16V3', 'VMSizeTypesStandardD16sV3', 'VMSizeTypesStandardD1V2', 'VMSizeTypesStandardD2', 'VMSizeTypesStandardD2V2', 'VMSizeTypesStandardD2V2Promo', 'VMSizeTypesStandardD2V3', 'VMSizeTypesStandardD2sV3', 'VMSizeTypesStandardD3', 'VMSizeTypesStandardD32V3', 'VMSizeTypesStandardD32sV3', 'VMSizeTypesStandardD3V2', 'VMSizeTypesStandardD3V2Promo', 'VMSizeTypesStandardD4', 'VMSizeTypesStandardD4V2', 'VMSizeTypesStandardD4V2Promo', 'VMSizeTypesStandardD4V3', 'VMSizeTypesStandardD4sV3', 'VMSizeTypesStandardD5V2', 'VMSizeTypesStandardD5V2Promo', 'VMSizeTypesStandardD64V3', 'VMSizeTypesStandardD64sV3', 'VMSizeTypesStandardD8V3', 'VMSizeTypesStandardD8sV3', 'VMSizeTypesStandardDS1', 'VMSizeTypesStandardDS11', 'VMSizeTypesStandardDS11V2', 'VMSizeTypesStandardDS11V2Promo', 'VMSizeTypesStandardDS12', 'VMSizeTypesStandardDS12V2', 'VMSizeTypesStandardDS12V2Promo', 'VMSizeTypesStandardDS13', 'VMSizeTypesStandardDS132V2', 'VMSizeTypesStandardDS134V2', 'VMSizeTypesStandardDS13V2', 'VMSizeTypesStandardDS13V2Promo', 'VMSizeTypesStandardDS14', 'VMSizeTypesStandardDS144V2', 'VMSizeTypesStandardDS148V2', 'VMSizeTypesStandardDS14V2', 'VMSizeTypesStandardDS14V2Promo', 'VMSizeTypesStandardDS15V2', 'VMSizeTypesStandardDS1V2', 'VMSizeTypesStandardDS2', 'VMSizeTypesStandardDS2V2', 'VMSizeTypesStandardDS2V2Promo', 'VMSizeTypesStandardDS3', 'VMSizeTypesStandardDS3V2', 'VMSizeTypesStandardDS3V2Promo', 'VMSizeTypesStandardDS4', 'VMSizeTypesStandardDS4V2', 'VMSizeTypesStandardDS4V2Promo', 'VMSizeTypesStandardDS5V2', 'VMSizeTypesStandardDS5V2Promo', 'VMSizeTypesStandardE16V3', 'VMSizeTypesStandardE16sV3', 'VMSizeTypesStandardE2V3', 'VMSizeTypesStandardE2sV3', 'VMSizeTypesStandardE3216sV3', 'VMSizeTypesStandardE328sV3', 'VMSizeTypesStandardE32V3', 'VMSizeTypesStandardE32sV3', 'VMSizeTypesStandardE4V3', 'VMSizeTypesStandardE4sV3', 'VMSizeTypesStandardE6416sV3', 'VMSizeTypesStandardE6432sV3', 'VMSizeTypesStandardE64V3', 'VMSizeTypesStandardE64sV3', 'VMSizeTypesStandardE8V3', 'VMSizeTypesStandardE8sV3', 'VMSizeTypesStandardF1', 'VMSizeTypesStandardF16', 'VMSizeTypesStandardF16s', 'VMSizeTypesStandardF16sV2', 'VMSizeTypesStandardF1s', 'VMSizeTypesStandardF2', 'VMSizeTypesStandardF2s', 'VMSizeTypesStandardF2sV2', 'VMSizeTypesStandardF32sV2', 'VMSizeTypesStandardF4', 'VMSizeTypesStandardF4s', 'VMSizeTypesStandardF4sV2', 'VMSizeTypesStandardF64sV2', 'VMSizeTypesStandardF72sV2', 'VMSizeTypesStandardF8', 'VMSizeTypesStandardF8s', 'VMSizeTypesStandardF8sV2', 'VMSizeTypesStandardG1', 'VMSizeTypesStandardG2', 'VMSizeTypesStandardG3', 'VMSizeTypesStandardG4', 'VMSizeTypesStandardG5', 'VMSizeTypesStandardGS1', 'VMSizeTypesStandardGS2', 'VMSizeTypesStandardGS3', 'VMSizeTypesStandardGS4', 'VMSizeTypesStandardGS44', 'VMSizeTypesStandardGS48', 'VMSizeTypesStandardGS5', 'VMSizeTypesStandardGS516', 'VMSizeTypesStandardGS58', 'VMSizeTypesStandardH16', 'VMSizeTypesStandardH16m', 'VMSizeTypesStandardH16mr', 'VMSizeTypesStandardH16r', 'VMSizeTypesStandardH8', 'VMSizeTypesStandardH8m', 'VMSizeTypesStandardL16s', 'VMSizeTypesStandardL32s', 'VMSizeTypesStandardL4s', 'VMSizeTypesStandardL8s', 'VMSizeTypesStandardM12832ms', 'VMSizeTypesStandardM12864ms', 'VMSizeTypesStandardM128ms', 'VMSizeTypesStandardM128s', 'VMSizeTypesStandardM6416ms', 'VMSizeTypesStandardM6432ms', 'VMSizeTypesStandardM64ms', 'VMSizeTypesStandardM64s', 'VMSizeTypesStandardNC12', 'VMSizeTypesStandardNC12sV2', 'VMSizeTypesStandardNC12sV3', 'VMSizeTypesStandardNC24', 'VMSizeTypesStandardNC24r', 'VMSizeTypesStandardNC24rsV2', 'VMSizeTypesStandardNC24rsV3', 'VMSizeTypesStandardNC24sV2', 'VMSizeTypesStandardNC24sV3', 'VMSizeTypesStandardNC6', 'VMSizeTypesStandardNC6sV2', 'VMSizeTypesStandardNC6sV3', 'VMSizeTypesStandardND12s', 'VMSizeTypesStandardND24rs', 'VMSizeTypesStandardND24s', 'VMSizeTypesStandardND6s', 'VMSizeTypesStandardNV12', 'VMSizeTypesStandardNV24', 'VMSizeTypesStandardNV6' + VMSize VMSizeTypes `json:"vmSize,omitempty"` + // OsDiskSizeGB - OS Disk Size in GB to be used to specify the disk size for every machine in this master/agent pool. If you specify 0, it will apply the default osDisk size according to the vmSize specified. + OsDiskSizeGB *int32 `json:"osDiskSizeGB,omitempty"` + // VnetSubnetID - VNet SubnetID specifies the VNet's subnet identifier. + VnetSubnetID *string `json:"vnetSubnetID,omitempty"` + // MaxPods - Maximum number of pods that can run on a node. + MaxPods *int32 `json:"maxPods,omitempty"` + // OsType - OsType to be used to specify os type. Choose from Linux and Windows. Default to Linux. Possible values include: 'Linux', 'Windows' + OsType OSType `json:"osType,omitempty"` + // MaxCount - Maximum number of nodes for auto-scaling + MaxCount *int32 `json:"maxCount,omitempty"` + // MinCount - Minimum number of nodes for auto-scaling + MinCount *int32 `json:"minCount,omitempty"` + // EnableAutoScaling - Whether to enable auto-scaler + EnableAutoScaling *bool `json:"enableAutoScaling,omitempty"` + // Type - AgentPoolType represents types of an agent pool. Possible values include: 'VirtualMachineScaleSets', 'AvailabilitySet' + Type AgentPoolType `json:"type,omitempty"` + // OrchestratorVersion - Version of orchestrator specified when creating the managed cluster. + OrchestratorVersion *string `json:"orchestratorVersion,omitempty"` + // ProvisioningState - READ-ONLY; The current deployment or provisioning state, which only appears in the response. + ProvisioningState *string `json:"provisioningState,omitempty"` + // AvailabilityZones - Availability zones for nodes. Must use VirtualMachineScaleSets AgentPoolType. + AvailabilityZones *[]string `json:"availabilityZones,omitempty"` + // EnableNodePublicIP - Enable public IP for nodes + EnableNodePublicIP *bool `json:"enableNodePublicIP,omitempty"` + // ScaleSetPriority - ScaleSetPriority to be used to specify virtual machine scale set priority. Default to regular. Possible values include: 'Low', 'Regular' + ScaleSetPriority ScaleSetPriority `json:"scaleSetPriority,omitempty"` + // ScaleSetEvictionPolicy - ScaleSetEvictionPolicy to be used to specify eviction policy for low priority virtual machine scale set. Default to Delete. Possible values include: 'Delete', 'Deallocate' + ScaleSetEvictionPolicy ScaleSetEvictionPolicy `json:"scaleSetEvictionPolicy,omitempty"` + // Tags - Agent pool tags to be persisted on the agent pool virtual machine scale set. + Tags map[string]*string `json:"tags"` + // NodeLabels - Agent pool node labels to be persisted across all nodes in agent pool. + NodeLabels map[string]*string `json:"nodeLabels"` + // NodeTaints - Taints added to new nodes during node pool create and scale. For example, key=value:NoSchedule. + NodeTaints *[]string `json:"nodeTaints,omitempty"` +} + +// MarshalJSON is the custom marshaler for ManagedClusterAgentPoolProfile. +func (mcapp ManagedClusterAgentPoolProfile) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if mcapp.Name != nil { + objectMap["name"] = mcapp.Name + } + if mcapp.Count != nil { + objectMap["count"] = mcapp.Count + } + if mcapp.VMSize != "" { + objectMap["vmSize"] = mcapp.VMSize + } + if mcapp.OsDiskSizeGB != nil { + objectMap["osDiskSizeGB"] = mcapp.OsDiskSizeGB + } + if mcapp.VnetSubnetID != nil { + objectMap["vnetSubnetID"] = mcapp.VnetSubnetID + } + if mcapp.MaxPods != nil { + objectMap["maxPods"] = mcapp.MaxPods + } + if mcapp.OsType != "" { + objectMap["osType"] = mcapp.OsType + } + if mcapp.MaxCount != nil { + objectMap["maxCount"] = mcapp.MaxCount + } + if mcapp.MinCount != nil { + objectMap["minCount"] = mcapp.MinCount + } + if mcapp.EnableAutoScaling != nil { + objectMap["enableAutoScaling"] = mcapp.EnableAutoScaling + } + if mcapp.Type != "" { + objectMap["type"] = mcapp.Type + } + if mcapp.OrchestratorVersion != nil { + objectMap["orchestratorVersion"] = mcapp.OrchestratorVersion + } + if mcapp.AvailabilityZones != nil { + objectMap["availabilityZones"] = mcapp.AvailabilityZones + } + if mcapp.EnableNodePublicIP != nil { + objectMap["enableNodePublicIP"] = mcapp.EnableNodePublicIP + } + if mcapp.ScaleSetPriority != "" { + objectMap["scaleSetPriority"] = mcapp.ScaleSetPriority + } + if mcapp.ScaleSetEvictionPolicy != "" { + objectMap["scaleSetEvictionPolicy"] = mcapp.ScaleSetEvictionPolicy + } + if mcapp.Tags != nil { + objectMap["tags"] = mcapp.Tags + } + if mcapp.NodeLabels != nil { + objectMap["nodeLabels"] = mcapp.NodeLabels + } + if mcapp.NodeTaints != nil { + objectMap["nodeTaints"] = mcapp.NodeTaints + } + return json.Marshal(objectMap) +} + +// ManagedClusterAgentPoolProfileProperties properties for the container service agent pool profile. +type ManagedClusterAgentPoolProfileProperties struct { + // Count - Number of agents (VMs) to host docker containers. Allowed values must be in the range of 1 to 100 (inclusive). The default value is 1. + Count *int32 `json:"count,omitempty"` + // VMSize - Size of agent VMs. Possible values include: 'VMSizeTypesStandardA1', 'VMSizeTypesStandardA10', 'VMSizeTypesStandardA11', 'VMSizeTypesStandardA1V2', 'VMSizeTypesStandardA2', 'VMSizeTypesStandardA2V2', 'VMSizeTypesStandardA2mV2', 'VMSizeTypesStandardA3', 'VMSizeTypesStandardA4', 'VMSizeTypesStandardA4V2', 'VMSizeTypesStandardA4mV2', 'VMSizeTypesStandardA5', 'VMSizeTypesStandardA6', 'VMSizeTypesStandardA7', 'VMSizeTypesStandardA8', 'VMSizeTypesStandardA8V2', 'VMSizeTypesStandardA8mV2', 'VMSizeTypesStandardA9', 'VMSizeTypesStandardB2ms', 'VMSizeTypesStandardB2s', 'VMSizeTypesStandardB4ms', 'VMSizeTypesStandardB8ms', 'VMSizeTypesStandardD1', 'VMSizeTypesStandardD11', 'VMSizeTypesStandardD11V2', 'VMSizeTypesStandardD11V2Promo', 'VMSizeTypesStandardD12', 'VMSizeTypesStandardD12V2', 'VMSizeTypesStandardD12V2Promo', 'VMSizeTypesStandardD13', 'VMSizeTypesStandardD13V2', 'VMSizeTypesStandardD13V2Promo', 'VMSizeTypesStandardD14', 'VMSizeTypesStandardD14V2', 'VMSizeTypesStandardD14V2Promo', 'VMSizeTypesStandardD15V2', 'VMSizeTypesStandardD16V3', 'VMSizeTypesStandardD16sV3', 'VMSizeTypesStandardD1V2', 'VMSizeTypesStandardD2', 'VMSizeTypesStandardD2V2', 'VMSizeTypesStandardD2V2Promo', 'VMSizeTypesStandardD2V3', 'VMSizeTypesStandardD2sV3', 'VMSizeTypesStandardD3', 'VMSizeTypesStandardD32V3', 'VMSizeTypesStandardD32sV3', 'VMSizeTypesStandardD3V2', 'VMSizeTypesStandardD3V2Promo', 'VMSizeTypesStandardD4', 'VMSizeTypesStandardD4V2', 'VMSizeTypesStandardD4V2Promo', 'VMSizeTypesStandardD4V3', 'VMSizeTypesStandardD4sV3', 'VMSizeTypesStandardD5V2', 'VMSizeTypesStandardD5V2Promo', 'VMSizeTypesStandardD64V3', 'VMSizeTypesStandardD64sV3', 'VMSizeTypesStandardD8V3', 'VMSizeTypesStandardD8sV3', 'VMSizeTypesStandardDS1', 'VMSizeTypesStandardDS11', 'VMSizeTypesStandardDS11V2', 'VMSizeTypesStandardDS11V2Promo', 'VMSizeTypesStandardDS12', 'VMSizeTypesStandardDS12V2', 'VMSizeTypesStandardDS12V2Promo', 'VMSizeTypesStandardDS13', 'VMSizeTypesStandardDS132V2', 'VMSizeTypesStandardDS134V2', 'VMSizeTypesStandardDS13V2', 'VMSizeTypesStandardDS13V2Promo', 'VMSizeTypesStandardDS14', 'VMSizeTypesStandardDS144V2', 'VMSizeTypesStandardDS148V2', 'VMSizeTypesStandardDS14V2', 'VMSizeTypesStandardDS14V2Promo', 'VMSizeTypesStandardDS15V2', 'VMSizeTypesStandardDS1V2', 'VMSizeTypesStandardDS2', 'VMSizeTypesStandardDS2V2', 'VMSizeTypesStandardDS2V2Promo', 'VMSizeTypesStandardDS3', 'VMSizeTypesStandardDS3V2', 'VMSizeTypesStandardDS3V2Promo', 'VMSizeTypesStandardDS4', 'VMSizeTypesStandardDS4V2', 'VMSizeTypesStandardDS4V2Promo', 'VMSizeTypesStandardDS5V2', 'VMSizeTypesStandardDS5V2Promo', 'VMSizeTypesStandardE16V3', 'VMSizeTypesStandardE16sV3', 'VMSizeTypesStandardE2V3', 'VMSizeTypesStandardE2sV3', 'VMSizeTypesStandardE3216sV3', 'VMSizeTypesStandardE328sV3', 'VMSizeTypesStandardE32V3', 'VMSizeTypesStandardE32sV3', 'VMSizeTypesStandardE4V3', 'VMSizeTypesStandardE4sV3', 'VMSizeTypesStandardE6416sV3', 'VMSizeTypesStandardE6432sV3', 'VMSizeTypesStandardE64V3', 'VMSizeTypesStandardE64sV3', 'VMSizeTypesStandardE8V3', 'VMSizeTypesStandardE8sV3', 'VMSizeTypesStandardF1', 'VMSizeTypesStandardF16', 'VMSizeTypesStandardF16s', 'VMSizeTypesStandardF16sV2', 'VMSizeTypesStandardF1s', 'VMSizeTypesStandardF2', 'VMSizeTypesStandardF2s', 'VMSizeTypesStandardF2sV2', 'VMSizeTypesStandardF32sV2', 'VMSizeTypesStandardF4', 'VMSizeTypesStandardF4s', 'VMSizeTypesStandardF4sV2', 'VMSizeTypesStandardF64sV2', 'VMSizeTypesStandardF72sV2', 'VMSizeTypesStandardF8', 'VMSizeTypesStandardF8s', 'VMSizeTypesStandardF8sV2', 'VMSizeTypesStandardG1', 'VMSizeTypesStandardG2', 'VMSizeTypesStandardG3', 'VMSizeTypesStandardG4', 'VMSizeTypesStandardG5', 'VMSizeTypesStandardGS1', 'VMSizeTypesStandardGS2', 'VMSizeTypesStandardGS3', 'VMSizeTypesStandardGS4', 'VMSizeTypesStandardGS44', 'VMSizeTypesStandardGS48', 'VMSizeTypesStandardGS5', 'VMSizeTypesStandardGS516', 'VMSizeTypesStandardGS58', 'VMSizeTypesStandardH16', 'VMSizeTypesStandardH16m', 'VMSizeTypesStandardH16mr', 'VMSizeTypesStandardH16r', 'VMSizeTypesStandardH8', 'VMSizeTypesStandardH8m', 'VMSizeTypesStandardL16s', 'VMSizeTypesStandardL32s', 'VMSizeTypesStandardL4s', 'VMSizeTypesStandardL8s', 'VMSizeTypesStandardM12832ms', 'VMSizeTypesStandardM12864ms', 'VMSizeTypesStandardM128ms', 'VMSizeTypesStandardM128s', 'VMSizeTypesStandardM6416ms', 'VMSizeTypesStandardM6432ms', 'VMSizeTypesStandardM64ms', 'VMSizeTypesStandardM64s', 'VMSizeTypesStandardNC12', 'VMSizeTypesStandardNC12sV2', 'VMSizeTypesStandardNC12sV3', 'VMSizeTypesStandardNC24', 'VMSizeTypesStandardNC24r', 'VMSizeTypesStandardNC24rsV2', 'VMSizeTypesStandardNC24rsV3', 'VMSizeTypesStandardNC24sV2', 'VMSizeTypesStandardNC24sV3', 'VMSizeTypesStandardNC6', 'VMSizeTypesStandardNC6sV2', 'VMSizeTypesStandardNC6sV3', 'VMSizeTypesStandardND12s', 'VMSizeTypesStandardND24rs', 'VMSizeTypesStandardND24s', 'VMSizeTypesStandardND6s', 'VMSizeTypesStandardNV12', 'VMSizeTypesStandardNV24', 'VMSizeTypesStandardNV6' + VMSize VMSizeTypes `json:"vmSize,omitempty"` + // OsDiskSizeGB - OS Disk Size in GB to be used to specify the disk size for every machine in this master/agent pool. If you specify 0, it will apply the default osDisk size according to the vmSize specified. + OsDiskSizeGB *int32 `json:"osDiskSizeGB,omitempty"` + // VnetSubnetID - VNet SubnetID specifies the VNet's subnet identifier. + VnetSubnetID *string `json:"vnetSubnetID,omitempty"` + // MaxPods - Maximum number of pods that can run on a node. + MaxPods *int32 `json:"maxPods,omitempty"` + // OsType - OsType to be used to specify os type. Choose from Linux and Windows. Default to Linux. Possible values include: 'Linux', 'Windows' + OsType OSType `json:"osType,omitempty"` + // MaxCount - Maximum number of nodes for auto-scaling + MaxCount *int32 `json:"maxCount,omitempty"` + // MinCount - Minimum number of nodes for auto-scaling + MinCount *int32 `json:"minCount,omitempty"` + // EnableAutoScaling - Whether to enable auto-scaler + EnableAutoScaling *bool `json:"enableAutoScaling,omitempty"` + // Type - AgentPoolType represents types of an agent pool. Possible values include: 'VirtualMachineScaleSets', 'AvailabilitySet' + Type AgentPoolType `json:"type,omitempty"` + // OrchestratorVersion - Version of orchestrator specified when creating the managed cluster. + OrchestratorVersion *string `json:"orchestratorVersion,omitempty"` + // ProvisioningState - READ-ONLY; The current deployment or provisioning state, which only appears in the response. + ProvisioningState *string `json:"provisioningState,omitempty"` + // AvailabilityZones - Availability zones for nodes. Must use VirtualMachineScaleSets AgentPoolType. + AvailabilityZones *[]string `json:"availabilityZones,omitempty"` + // EnableNodePublicIP - Enable public IP for nodes + EnableNodePublicIP *bool `json:"enableNodePublicIP,omitempty"` + // ScaleSetPriority - ScaleSetPriority to be used to specify virtual machine scale set priority. Default to regular. Possible values include: 'Low', 'Regular' + ScaleSetPriority ScaleSetPriority `json:"scaleSetPriority,omitempty"` + // ScaleSetEvictionPolicy - ScaleSetEvictionPolicy to be used to specify eviction policy for low priority virtual machine scale set. Default to Delete. Possible values include: 'Delete', 'Deallocate' + ScaleSetEvictionPolicy ScaleSetEvictionPolicy `json:"scaleSetEvictionPolicy,omitempty"` + // Tags - Agent pool tags to be persisted on the agent pool virtual machine scale set. + Tags map[string]*string `json:"tags"` + // NodeLabels - Agent pool node labels to be persisted across all nodes in agent pool. + NodeLabels map[string]*string `json:"nodeLabels"` + // NodeTaints - Taints added to new nodes during node pool create and scale. For example, key=value:NoSchedule. + NodeTaints *[]string `json:"nodeTaints,omitempty"` +} + +// MarshalJSON is the custom marshaler for ManagedClusterAgentPoolProfileProperties. +func (mcappp ManagedClusterAgentPoolProfileProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if mcappp.Count != nil { + objectMap["count"] = mcappp.Count + } + if mcappp.VMSize != "" { + objectMap["vmSize"] = mcappp.VMSize + } + if mcappp.OsDiskSizeGB != nil { + objectMap["osDiskSizeGB"] = mcappp.OsDiskSizeGB + } + if mcappp.VnetSubnetID != nil { + objectMap["vnetSubnetID"] = mcappp.VnetSubnetID + } + if mcappp.MaxPods != nil { + objectMap["maxPods"] = mcappp.MaxPods + } + if mcappp.OsType != "" { + objectMap["osType"] = mcappp.OsType + } + if mcappp.MaxCount != nil { + objectMap["maxCount"] = mcappp.MaxCount + } + if mcappp.MinCount != nil { + objectMap["minCount"] = mcappp.MinCount + } + if mcappp.EnableAutoScaling != nil { + objectMap["enableAutoScaling"] = mcappp.EnableAutoScaling + } + if mcappp.Type != "" { + objectMap["type"] = mcappp.Type + } + if mcappp.OrchestratorVersion != nil { + objectMap["orchestratorVersion"] = mcappp.OrchestratorVersion + } + if mcappp.AvailabilityZones != nil { + objectMap["availabilityZones"] = mcappp.AvailabilityZones + } + if mcappp.EnableNodePublicIP != nil { + objectMap["enableNodePublicIP"] = mcappp.EnableNodePublicIP + } + if mcappp.ScaleSetPriority != "" { + objectMap["scaleSetPriority"] = mcappp.ScaleSetPriority + } + if mcappp.ScaleSetEvictionPolicy != "" { + objectMap["scaleSetEvictionPolicy"] = mcappp.ScaleSetEvictionPolicy + } + if mcappp.Tags != nil { + objectMap["tags"] = mcappp.Tags + } + if mcappp.NodeLabels != nil { + objectMap["nodeLabels"] = mcappp.NodeLabels + } + if mcappp.NodeTaints != nil { + objectMap["nodeTaints"] = mcappp.NodeTaints + } + return json.Marshal(objectMap) +} + +// ManagedClusterAPIServerAccessProfile access profile for managed cluster API server. +type ManagedClusterAPIServerAccessProfile struct { + // AuthorizedIPRanges - Authorized IP Ranges to kubernetes API server. + AuthorizedIPRanges *[]string `json:"authorizedIPRanges,omitempty"` + // EnablePrivateCluster - Whether to create the cluster as a private cluster or not. + EnablePrivateCluster *bool `json:"enablePrivateCluster,omitempty"` +} + +// ManagedClusterIdentity identity for the managed cluster. +type ManagedClusterIdentity struct { + // PrincipalID - READ-ONLY; The principal id of the system assigned identity which is used by master components. + PrincipalID *string `json:"principalId,omitempty"` + // TenantID - READ-ONLY; The tenant id of the system assigned identity which is used by master components. + TenantID *string `json:"tenantId,omitempty"` + // Type - The type of identity used for the managed cluster. Type 'SystemAssigned' will use an implicitly created identity in master components and an auto-created user assigned identity in MC_ resource group in agent nodes. Type 'None' will not use MSI for the managed cluster, service principal will be used instead. Possible values include: 'SystemAssigned', 'None' + Type ResourceIdentityType `json:"type,omitempty"` +} + +// ManagedClusterListResult the response from the List Managed Clusters operation. +type ManagedClusterListResult struct { + autorest.Response `json:"-"` + // Value - The list of managed clusters. + Value *[]ManagedCluster `json:"value,omitempty"` + // NextLink - READ-ONLY; The URL to get the next set of managed cluster results. + NextLink *string `json:"nextLink,omitempty"` +} + +// ManagedClusterListResultIterator provides access to a complete listing of ManagedCluster values. +type ManagedClusterListResultIterator struct { + i int + page ManagedClusterListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *ManagedClusterListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ManagedClusterListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *ManagedClusterListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter ManagedClusterListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter ManagedClusterListResultIterator) Response() ManagedClusterListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter ManagedClusterListResultIterator) Value() ManagedCluster { + if !iter.page.NotDone() { + return ManagedCluster{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the ManagedClusterListResultIterator type. +func NewManagedClusterListResultIterator(page ManagedClusterListResultPage) ManagedClusterListResultIterator { + return ManagedClusterListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (mclr ManagedClusterListResult) IsEmpty() bool { + return mclr.Value == nil || len(*mclr.Value) == 0 +} + +// managedClusterListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (mclr ManagedClusterListResult) managedClusterListResultPreparer(ctx context.Context) (*http.Request, error) { + if mclr.NextLink == nil || len(to.String(mclr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(mclr.NextLink))) +} + +// ManagedClusterListResultPage contains a page of ManagedCluster values. +type ManagedClusterListResultPage struct { + fn func(context.Context, ManagedClusterListResult) (ManagedClusterListResult, error) + mclr ManagedClusterListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *ManagedClusterListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ManagedClusterListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.mclr) + if err != nil { + return err + } + page.mclr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *ManagedClusterListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page ManagedClusterListResultPage) NotDone() bool { + return !page.mclr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page ManagedClusterListResultPage) Response() ManagedClusterListResult { + return page.mclr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page ManagedClusterListResultPage) Values() []ManagedCluster { + if page.mclr.IsEmpty() { + return nil + } + return *page.mclr.Value +} + +// Creates a new instance of the ManagedClusterListResultPage type. +func NewManagedClusterListResultPage(getNextPage func(context.Context, ManagedClusterListResult) (ManagedClusterListResult, error)) ManagedClusterListResultPage { + return ManagedClusterListResultPage{fn: getNextPage} +} + +// ManagedClusterLoadBalancerProfile profile of the managed cluster load balancer. +type ManagedClusterLoadBalancerProfile struct { + // ManagedOutboundIPs - Desired managed outbound IPs for the cluster load balancer. + ManagedOutboundIPs *ManagedClusterLoadBalancerProfileManagedOutboundIPs `json:"managedOutboundIPs,omitempty"` + // OutboundIPPrefixes - Desired outbound IP Prefix resources for the cluster load balancer. + OutboundIPPrefixes *ManagedClusterLoadBalancerProfileOutboundIPPrefixes `json:"outboundIPPrefixes,omitempty"` + // OutboundIPs - Desired outbound IP resources for the cluster load balancer. + OutboundIPs *ManagedClusterLoadBalancerProfileOutboundIPs `json:"outboundIPs,omitempty"` + // EffectiveOutboundIPs - The effective outbound IP resources of the cluster load balancer. + EffectiveOutboundIPs *[]ResourceReference `json:"effectiveOutboundIPs,omitempty"` + // AllocatedOutboundPorts - Desired number of allocated SNAT ports per VM. Allowed values must be in the range of 0 to 64000 (inclusive). The default value is 0 which results in Azure dynamically allocating ports. + AllocatedOutboundPorts *int32 `json:"allocatedOutboundPorts,omitempty"` + // IdleTimeoutInMinutes - Desired outbound flow idle timeout in minutes. Allowed values must be in the range of 4 to 120 (inclusive). The default value is 30 minutes. + IdleTimeoutInMinutes *int32 `json:"idleTimeoutInMinutes,omitempty"` +} + +// ManagedClusterLoadBalancerProfileManagedOutboundIPs desired managed outbound IPs for the cluster load +// balancer. +type ManagedClusterLoadBalancerProfileManagedOutboundIPs struct { + // Count - Desired number of outbound IP created/managed by Azure for the cluster load balancer. Allowed values must be in the range of 1 to 100 (inclusive). The default value is 1. + Count *int32 `json:"count,omitempty"` +} + +// ManagedClusterLoadBalancerProfileOutboundIPPrefixes desired outbound IP Prefix resources for the cluster +// load balancer. +type ManagedClusterLoadBalancerProfileOutboundIPPrefixes struct { + // PublicIPPrefixes - A list of public IP prefix resources. + PublicIPPrefixes *[]ResourceReference `json:"publicIPPrefixes,omitempty"` +} + +// ManagedClusterLoadBalancerProfileOutboundIPs desired outbound IP resources for the cluster load +// balancer. +type ManagedClusterLoadBalancerProfileOutboundIPs struct { + // PublicIPs - A list of public IP resources. + PublicIPs *[]ResourceReference `json:"publicIPs,omitempty"` +} + +// ManagedClusterPoolUpgradeProfile the list of available upgrade versions. +type ManagedClusterPoolUpgradeProfile struct { + // KubernetesVersion - Kubernetes version (major, minor, patch). + KubernetesVersion *string `json:"kubernetesVersion,omitempty"` + // Name - Pool name. + Name *string `json:"name,omitempty"` + // OsType - OsType to be used to specify os type. Choose from Linux and Windows. Default to Linux. Possible values include: 'Linux', 'Windows' + OsType OSType `json:"osType,omitempty"` + // Upgrades - List of orchestrator types and versions available for upgrade. + Upgrades *[]ManagedClusterPoolUpgradeProfileUpgradesItem `json:"upgrades,omitempty"` +} + +// ManagedClusterPoolUpgradeProfileUpgradesItem ... +type ManagedClusterPoolUpgradeProfileUpgradesItem struct { + // KubernetesVersion - Kubernetes version (major, minor, patch). + KubernetesVersion *string `json:"kubernetesVersion,omitempty"` + // IsPreview - Whether Kubernetes version is currently in preview. + IsPreview *bool `json:"isPreview,omitempty"` +} + +// ManagedClusterProperties properties of the managed cluster. +type ManagedClusterProperties struct { + // ProvisioningState - READ-ONLY; The current deployment or provisioning state, which only appears in the response. + ProvisioningState *string `json:"provisioningState,omitempty"` + // MaxAgentPools - READ-ONLY; The max number of agent pools for the managed cluster. + MaxAgentPools *int32 `json:"maxAgentPools,omitempty"` + // KubernetesVersion - Version of Kubernetes specified when creating the managed cluster. + KubernetesVersion *string `json:"kubernetesVersion,omitempty"` + // DNSPrefix - DNS prefix specified when creating the managed cluster. + DNSPrefix *string `json:"dnsPrefix,omitempty"` + // Fqdn - READ-ONLY; FQDN for the master pool. + Fqdn *string `json:"fqdn,omitempty"` + // PrivateFQDN - READ-ONLY; FQDN of private cluster. + PrivateFQDN *string `json:"privateFQDN,omitempty"` + // AgentPoolProfiles - Properties of the agent pool. + AgentPoolProfiles *[]ManagedClusterAgentPoolProfile `json:"agentPoolProfiles,omitempty"` + // LinuxProfile - Profile for Linux VMs in the container service cluster. + LinuxProfile *LinuxProfile `json:"linuxProfile,omitempty"` + // WindowsProfile - Profile for Windows VMs in the container service cluster. + WindowsProfile *ManagedClusterWindowsProfile `json:"windowsProfile,omitempty"` + // ServicePrincipalProfile - Information about a service principal identity for the cluster to use for manipulating Azure APIs. + ServicePrincipalProfile *ManagedClusterServicePrincipalProfile `json:"servicePrincipalProfile,omitempty"` + // AddonProfiles - Profile of managed cluster add-on. + AddonProfiles map[string]*ManagedClusterAddonProfile `json:"addonProfiles"` + // NodeResourceGroup - Name of the resource group containing agent pool nodes. + NodeResourceGroup *string `json:"nodeResourceGroup,omitempty"` + // EnableRBAC - Whether to enable Kubernetes Role-Based Access Control. + EnableRBAC *bool `json:"enableRBAC,omitempty"` + // EnablePodSecurityPolicy - (PREVIEW) Whether to enable Kubernetes Pod security policy. + EnablePodSecurityPolicy *bool `json:"enablePodSecurityPolicy,omitempty"` + // NetworkProfile - Profile of network configuration. + NetworkProfile *NetworkProfileType `json:"networkProfile,omitempty"` + // AadProfile - Profile of Azure Active Directory configuration. + AadProfile *ManagedClusterAADProfile `json:"aadProfile,omitempty"` + // APIServerAccessProfile - Access profile for managed cluster API server. + APIServerAccessProfile *ManagedClusterAPIServerAccessProfile `json:"apiServerAccessProfile,omitempty"` + // DiskEncryptionSetID - ResourceId of the disk encryption set to use for enabling encryption at rest. + DiskEncryptionSetID *string `json:"diskEncryptionSetID,omitempty"` + // IdentityProfile - Identities associated with the cluster. + IdentityProfile map[string]*ManagedClusterPropertiesIdentityProfileValue `json:"identityProfile"` +} + +// MarshalJSON is the custom marshaler for ManagedClusterProperties. +func (mcp ManagedClusterProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if mcp.KubernetesVersion != nil { + objectMap["kubernetesVersion"] = mcp.KubernetesVersion + } + if mcp.DNSPrefix != nil { + objectMap["dnsPrefix"] = mcp.DNSPrefix + } + if mcp.AgentPoolProfiles != nil { + objectMap["agentPoolProfiles"] = mcp.AgentPoolProfiles + } + if mcp.LinuxProfile != nil { + objectMap["linuxProfile"] = mcp.LinuxProfile + } + if mcp.WindowsProfile != nil { + objectMap["windowsProfile"] = mcp.WindowsProfile + } + if mcp.ServicePrincipalProfile != nil { + objectMap["servicePrincipalProfile"] = mcp.ServicePrincipalProfile + } + if mcp.AddonProfiles != nil { + objectMap["addonProfiles"] = mcp.AddonProfiles + } + if mcp.NodeResourceGroup != nil { + objectMap["nodeResourceGroup"] = mcp.NodeResourceGroup + } + if mcp.EnableRBAC != nil { + objectMap["enableRBAC"] = mcp.EnableRBAC + } + if mcp.EnablePodSecurityPolicy != nil { + objectMap["enablePodSecurityPolicy"] = mcp.EnablePodSecurityPolicy + } + if mcp.NetworkProfile != nil { + objectMap["networkProfile"] = mcp.NetworkProfile + } + if mcp.AadProfile != nil { + objectMap["aadProfile"] = mcp.AadProfile + } + if mcp.APIServerAccessProfile != nil { + objectMap["apiServerAccessProfile"] = mcp.APIServerAccessProfile + } + if mcp.DiskEncryptionSetID != nil { + objectMap["diskEncryptionSetID"] = mcp.DiskEncryptionSetID + } + if mcp.IdentityProfile != nil { + objectMap["identityProfile"] = mcp.IdentityProfile + } + return json.Marshal(objectMap) +} + +// ManagedClusterPropertiesIdentityProfileValue ... +type ManagedClusterPropertiesIdentityProfileValue struct { + // ResourceID - The resource id of the user assigned identity. + ResourceID *string `json:"resourceId,omitempty"` + // ClientID - The client id of the user assigned identity. + ClientID *string `json:"clientId,omitempty"` + // ObjectID - The object id of the user assigned identity. + ObjectID *string `json:"objectId,omitempty"` +} + +// ManagedClustersCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type ManagedClustersCreateOrUpdateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *ManagedClustersCreateOrUpdateFuture) Result(client ManagedClustersClient) (mc ManagedCluster, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("containerservice.ManagedClustersCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if mc.Response.Response, err = future.GetResult(sender); err == nil && mc.Response.Response.StatusCode != http.StatusNoContent { + mc, err = client.CreateOrUpdateResponder(mc.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersCreateOrUpdateFuture", "Result", mc.Response.Response, "Failure responding to request") + } + } + return +} + +// ManagedClustersDeleteFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type ManagedClustersDeleteFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *ManagedClustersDeleteFuture) Result(client ManagedClustersClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("containerservice.ManagedClustersDeleteFuture") + return + } + ar.Response = future.Response() + return +} + +// ManagedClusterServicePrincipalProfile information about a service principal identity for the cluster to +// use for manipulating Azure APIs. +type ManagedClusterServicePrincipalProfile struct { + // ClientID - The ID for the service principal. + ClientID *string `json:"clientId,omitempty"` + // Secret - The secret password associated with the service principal in plain text. + Secret *string `json:"secret,omitempty"` +} + +// ManagedClustersResetAADProfileFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type ManagedClustersResetAADProfileFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *ManagedClustersResetAADProfileFuture) Result(client ManagedClustersClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersResetAADProfileFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("containerservice.ManagedClustersResetAADProfileFuture") + return + } + ar.Response = future.Response() + return +} + +// ManagedClustersResetServicePrincipalProfileFuture an abstraction for monitoring and retrieving the +// results of a long-running operation. +type ManagedClustersResetServicePrincipalProfileFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *ManagedClustersResetServicePrincipalProfileFuture) Result(client ManagedClustersClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersResetServicePrincipalProfileFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("containerservice.ManagedClustersResetServicePrincipalProfileFuture") + return + } + ar.Response = future.Response() + return +} + +// ManagedClustersRotateClusterCertificatesFuture an abstraction for monitoring and retrieving the results +// of a long-running operation. +type ManagedClustersRotateClusterCertificatesFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *ManagedClustersRotateClusterCertificatesFuture) Result(client ManagedClustersClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersRotateClusterCertificatesFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("containerservice.ManagedClustersRotateClusterCertificatesFuture") + return + } + ar.Response = future.Response() + return +} + +// ManagedClustersUpdateTagsFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type ManagedClustersUpdateTagsFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *ManagedClustersUpdateTagsFuture) Result(client ManagedClustersClient) (mc ManagedCluster, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersUpdateTagsFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("containerservice.ManagedClustersUpdateTagsFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if mc.Response.Response, err = future.GetResult(sender); err == nil && mc.Response.Response.StatusCode != http.StatusNoContent { + mc, err = client.UpdateTagsResponder(mc.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersUpdateTagsFuture", "Result", mc.Response.Response, "Failure responding to request") + } + } + return +} + +// ManagedClusterUpgradeProfile the list of available upgrades for compute pools. +type ManagedClusterUpgradeProfile struct { + autorest.Response `json:"-"` + // ID - READ-ONLY; Id of upgrade profile. + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; Name of upgrade profile. + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; Type of upgrade profile. + Type *string `json:"type,omitempty"` + // ManagedClusterUpgradeProfileProperties - Properties of upgrade profile. + *ManagedClusterUpgradeProfileProperties `json:"properties,omitempty"` +} + +// MarshalJSON is the custom marshaler for ManagedClusterUpgradeProfile. +func (mcup ManagedClusterUpgradeProfile) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if mcup.ManagedClusterUpgradeProfileProperties != nil { + objectMap["properties"] = mcup.ManagedClusterUpgradeProfileProperties + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for ManagedClusterUpgradeProfile struct. +func (mcup *ManagedClusterUpgradeProfile) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + mcup.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + mcup.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + mcup.Type = &typeVar + } + case "properties": + if v != nil { + var managedClusterUpgradeProfileProperties ManagedClusterUpgradeProfileProperties + err = json.Unmarshal(*v, &managedClusterUpgradeProfileProperties) + if err != nil { + return err + } + mcup.ManagedClusterUpgradeProfileProperties = &managedClusterUpgradeProfileProperties + } + } + } + + return nil +} + +// ManagedClusterUpgradeProfileProperties control plane and agent pool upgrade profiles. +type ManagedClusterUpgradeProfileProperties struct { + // ControlPlaneProfile - The list of available upgrade versions for the control plane. + ControlPlaneProfile *ManagedClusterPoolUpgradeProfile `json:"controlPlaneProfile,omitempty"` + // AgentPoolProfiles - The list of available upgrade versions for agent pools. + AgentPoolProfiles *[]ManagedClusterPoolUpgradeProfile `json:"agentPoolProfiles,omitempty"` +} + +// ManagedClusterWindowsProfile profile for Windows VMs in the container service cluster. +type ManagedClusterWindowsProfile struct { + // AdminUsername - The administrator username to use for Windows VMs. + AdminUsername *string `json:"adminUsername,omitempty"` + // AdminPassword - The administrator password to use for Windows VMs. + AdminPassword *string `json:"adminPassword,omitempty"` +} + +// MasterProfile profile for the container service master. +type MasterProfile struct { + // Count - Number of masters (VMs) in the container service cluster. Allowed values are 1, 3, and 5. The default value is 1. + Count *int32 `json:"count,omitempty"` + // DNSPrefix - DNS prefix to be used to create the FQDN for the master pool. + DNSPrefix *string `json:"dnsPrefix,omitempty"` + // VMSize - Size of agent VMs. Possible values include: 'VMSizeTypesStandardA1', 'VMSizeTypesStandardA10', 'VMSizeTypesStandardA11', 'VMSizeTypesStandardA1V2', 'VMSizeTypesStandardA2', 'VMSizeTypesStandardA2V2', 'VMSizeTypesStandardA2mV2', 'VMSizeTypesStandardA3', 'VMSizeTypesStandardA4', 'VMSizeTypesStandardA4V2', 'VMSizeTypesStandardA4mV2', 'VMSizeTypesStandardA5', 'VMSizeTypesStandardA6', 'VMSizeTypesStandardA7', 'VMSizeTypesStandardA8', 'VMSizeTypesStandardA8V2', 'VMSizeTypesStandardA8mV2', 'VMSizeTypesStandardA9', 'VMSizeTypesStandardB2ms', 'VMSizeTypesStandardB2s', 'VMSizeTypesStandardB4ms', 'VMSizeTypesStandardB8ms', 'VMSizeTypesStandardD1', 'VMSizeTypesStandardD11', 'VMSizeTypesStandardD11V2', 'VMSizeTypesStandardD11V2Promo', 'VMSizeTypesStandardD12', 'VMSizeTypesStandardD12V2', 'VMSizeTypesStandardD12V2Promo', 'VMSizeTypesStandardD13', 'VMSizeTypesStandardD13V2', 'VMSizeTypesStandardD13V2Promo', 'VMSizeTypesStandardD14', 'VMSizeTypesStandardD14V2', 'VMSizeTypesStandardD14V2Promo', 'VMSizeTypesStandardD15V2', 'VMSizeTypesStandardD16V3', 'VMSizeTypesStandardD16sV3', 'VMSizeTypesStandardD1V2', 'VMSizeTypesStandardD2', 'VMSizeTypesStandardD2V2', 'VMSizeTypesStandardD2V2Promo', 'VMSizeTypesStandardD2V3', 'VMSizeTypesStandardD2sV3', 'VMSizeTypesStandardD3', 'VMSizeTypesStandardD32V3', 'VMSizeTypesStandardD32sV3', 'VMSizeTypesStandardD3V2', 'VMSizeTypesStandardD3V2Promo', 'VMSizeTypesStandardD4', 'VMSizeTypesStandardD4V2', 'VMSizeTypesStandardD4V2Promo', 'VMSizeTypesStandardD4V3', 'VMSizeTypesStandardD4sV3', 'VMSizeTypesStandardD5V2', 'VMSizeTypesStandardD5V2Promo', 'VMSizeTypesStandardD64V3', 'VMSizeTypesStandardD64sV3', 'VMSizeTypesStandardD8V3', 'VMSizeTypesStandardD8sV3', 'VMSizeTypesStandardDS1', 'VMSizeTypesStandardDS11', 'VMSizeTypesStandardDS11V2', 'VMSizeTypesStandardDS11V2Promo', 'VMSizeTypesStandardDS12', 'VMSizeTypesStandardDS12V2', 'VMSizeTypesStandardDS12V2Promo', 'VMSizeTypesStandardDS13', 'VMSizeTypesStandardDS132V2', 'VMSizeTypesStandardDS134V2', 'VMSizeTypesStandardDS13V2', 'VMSizeTypesStandardDS13V2Promo', 'VMSizeTypesStandardDS14', 'VMSizeTypesStandardDS144V2', 'VMSizeTypesStandardDS148V2', 'VMSizeTypesStandardDS14V2', 'VMSizeTypesStandardDS14V2Promo', 'VMSizeTypesStandardDS15V2', 'VMSizeTypesStandardDS1V2', 'VMSizeTypesStandardDS2', 'VMSizeTypesStandardDS2V2', 'VMSizeTypesStandardDS2V2Promo', 'VMSizeTypesStandardDS3', 'VMSizeTypesStandardDS3V2', 'VMSizeTypesStandardDS3V2Promo', 'VMSizeTypesStandardDS4', 'VMSizeTypesStandardDS4V2', 'VMSizeTypesStandardDS4V2Promo', 'VMSizeTypesStandardDS5V2', 'VMSizeTypesStandardDS5V2Promo', 'VMSizeTypesStandardE16V3', 'VMSizeTypesStandardE16sV3', 'VMSizeTypesStandardE2V3', 'VMSizeTypesStandardE2sV3', 'VMSizeTypesStandardE3216sV3', 'VMSizeTypesStandardE328sV3', 'VMSizeTypesStandardE32V3', 'VMSizeTypesStandardE32sV3', 'VMSizeTypesStandardE4V3', 'VMSizeTypesStandardE4sV3', 'VMSizeTypesStandardE6416sV3', 'VMSizeTypesStandardE6432sV3', 'VMSizeTypesStandardE64V3', 'VMSizeTypesStandardE64sV3', 'VMSizeTypesStandardE8V3', 'VMSizeTypesStandardE8sV3', 'VMSizeTypesStandardF1', 'VMSizeTypesStandardF16', 'VMSizeTypesStandardF16s', 'VMSizeTypesStandardF16sV2', 'VMSizeTypesStandardF1s', 'VMSizeTypesStandardF2', 'VMSizeTypesStandardF2s', 'VMSizeTypesStandardF2sV2', 'VMSizeTypesStandardF32sV2', 'VMSizeTypesStandardF4', 'VMSizeTypesStandardF4s', 'VMSizeTypesStandardF4sV2', 'VMSizeTypesStandardF64sV2', 'VMSizeTypesStandardF72sV2', 'VMSizeTypesStandardF8', 'VMSizeTypesStandardF8s', 'VMSizeTypesStandardF8sV2', 'VMSizeTypesStandardG1', 'VMSizeTypesStandardG2', 'VMSizeTypesStandardG3', 'VMSizeTypesStandardG4', 'VMSizeTypesStandardG5', 'VMSizeTypesStandardGS1', 'VMSizeTypesStandardGS2', 'VMSizeTypesStandardGS3', 'VMSizeTypesStandardGS4', 'VMSizeTypesStandardGS44', 'VMSizeTypesStandardGS48', 'VMSizeTypesStandardGS5', 'VMSizeTypesStandardGS516', 'VMSizeTypesStandardGS58', 'VMSizeTypesStandardH16', 'VMSizeTypesStandardH16m', 'VMSizeTypesStandardH16mr', 'VMSizeTypesStandardH16r', 'VMSizeTypesStandardH8', 'VMSizeTypesStandardH8m', 'VMSizeTypesStandardL16s', 'VMSizeTypesStandardL32s', 'VMSizeTypesStandardL4s', 'VMSizeTypesStandardL8s', 'VMSizeTypesStandardM12832ms', 'VMSizeTypesStandardM12864ms', 'VMSizeTypesStandardM128ms', 'VMSizeTypesStandardM128s', 'VMSizeTypesStandardM6416ms', 'VMSizeTypesStandardM6432ms', 'VMSizeTypesStandardM64ms', 'VMSizeTypesStandardM64s', 'VMSizeTypesStandardNC12', 'VMSizeTypesStandardNC12sV2', 'VMSizeTypesStandardNC12sV3', 'VMSizeTypesStandardNC24', 'VMSizeTypesStandardNC24r', 'VMSizeTypesStandardNC24rsV2', 'VMSizeTypesStandardNC24rsV3', 'VMSizeTypesStandardNC24sV2', 'VMSizeTypesStandardNC24sV3', 'VMSizeTypesStandardNC6', 'VMSizeTypesStandardNC6sV2', 'VMSizeTypesStandardNC6sV3', 'VMSizeTypesStandardND12s', 'VMSizeTypesStandardND24rs', 'VMSizeTypesStandardND24s', 'VMSizeTypesStandardND6s', 'VMSizeTypesStandardNV12', 'VMSizeTypesStandardNV24', 'VMSizeTypesStandardNV6' + VMSize VMSizeTypes `json:"vmSize,omitempty"` + // OsDiskSizeGB - OS Disk Size in GB to be used to specify the disk size for every machine in this master/agent pool. If you specify 0, it will apply the default osDisk size according to the vmSize specified. + OsDiskSizeGB *int32 `json:"osDiskSizeGB,omitempty"` + // VnetSubnetID - VNet SubnetID specifies the VNet's subnet identifier. + VnetSubnetID *string `json:"vnetSubnetID,omitempty"` + // FirstConsecutiveStaticIP - FirstConsecutiveStaticIP used to specify the first static ip of masters. + FirstConsecutiveStaticIP *string `json:"firstConsecutiveStaticIP,omitempty"` + // StorageProfile - Storage profile specifies what kind of storage used. Choose from StorageAccount and ManagedDisks. Leave it empty, we will choose for you based on the orchestrator choice. Possible values include: 'StorageAccount', 'ManagedDisks' + StorageProfile StorageProfileTypes `json:"storageProfile,omitempty"` + // Fqdn - READ-ONLY; FQDN for the master pool. + Fqdn *string `json:"fqdn,omitempty"` +} + +// NetworkProfile represents the OpenShift networking configuration +type NetworkProfile struct { + // VnetCidr - CIDR for the OpenShift Vnet. + VnetCidr *string `json:"vnetCidr,omitempty"` + // ManagementSubnetCidr - CIDR of subnet used to create PLS needed for management of the cluster + ManagementSubnetCidr *string `json:"managementSubnetCidr,omitempty"` + // VnetID - ID of the Vnet created for OSA cluster. + VnetID *string `json:"vnetId,omitempty"` +} + +// NetworkProfileType profile of network configuration. +type NetworkProfileType struct { + // NetworkPlugin - Network plugin used for building Kubernetes network. Possible values include: 'Azure', 'Kubenet' + NetworkPlugin NetworkPlugin `json:"networkPlugin,omitempty"` + // NetworkPolicy - Network policy used for building Kubernetes network. Possible values include: 'NetworkPolicyCalico', 'NetworkPolicyAzure' + NetworkPolicy NetworkPolicy `json:"networkPolicy,omitempty"` + // PodCidr - A CIDR notation IP range from which to assign pod IPs when kubenet is used. + PodCidr *string `json:"podCidr,omitempty"` + // ServiceCidr - A CIDR notation IP range from which to assign service cluster IPs. It must not overlap with any Subnet IP ranges. + ServiceCidr *string `json:"serviceCidr,omitempty"` + // DNSServiceIP - An IP address assigned to the Kubernetes DNS service. It must be within the Kubernetes service address range specified in serviceCidr. + DNSServiceIP *string `json:"dnsServiceIP,omitempty"` + // DockerBridgeCidr - A CIDR notation IP range assigned to the Docker bridge network. It must not overlap with any Subnet IP ranges or the Kubernetes service address range. + DockerBridgeCidr *string `json:"dockerBridgeCidr,omitempty"` + // OutboundType - The outbound (egress) routing method. Possible values include: 'LoadBalancer', 'UserDefinedRouting' + OutboundType OutboundType `json:"outboundType,omitempty"` + // LoadBalancerSku - The load balancer sku for the managed cluster. Possible values include: 'Standard', 'Basic' + LoadBalancerSku LoadBalancerSku `json:"loadBalancerSku,omitempty"` + // LoadBalancerProfile - Profile of the cluster load balancer. + LoadBalancerProfile *ManagedClusterLoadBalancerProfile `json:"loadBalancerProfile,omitempty"` +} + +// OpenShiftAPIProperties defines further properties on the API. +type OpenShiftAPIProperties struct { + // PrivateAPIServer - Specifies if API server is public or private. + PrivateAPIServer *bool `json:"privateApiServer,omitempty"` +} + +// OpenShiftManagedCluster openShift Managed cluster. +type OpenShiftManagedCluster struct { + autorest.Response `json:"-"` + // Plan - Define the resource plan as required by ARM for billing purposes + Plan *PurchasePlan `json:"plan,omitempty"` + // OpenShiftManagedClusterProperties - Properties of a OpenShift managed cluster. + *OpenShiftManagedClusterProperties `json:"properties,omitempty"` + // ID - READ-ONLY; Resource Id + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; Resource name + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; Resource type + Type *string `json:"type,omitempty"` + // Location - Resource location + Location *string `json:"location,omitempty"` + // Tags - Resource tags + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for OpenShiftManagedCluster. +func (osmc OpenShiftManagedCluster) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if osmc.Plan != nil { + objectMap["plan"] = osmc.Plan + } + if osmc.OpenShiftManagedClusterProperties != nil { + objectMap["properties"] = osmc.OpenShiftManagedClusterProperties + } + if osmc.Location != nil { + objectMap["location"] = osmc.Location + } + if osmc.Tags != nil { + objectMap["tags"] = osmc.Tags + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for OpenShiftManagedCluster struct. +func (osmc *OpenShiftManagedCluster) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "plan": + if v != nil { + var plan PurchasePlan + err = json.Unmarshal(*v, &plan) + if err != nil { + return err + } + osmc.Plan = &plan + } + case "properties": + if v != nil { + var openShiftManagedClusterProperties OpenShiftManagedClusterProperties + err = json.Unmarshal(*v, &openShiftManagedClusterProperties) + if err != nil { + return err + } + osmc.OpenShiftManagedClusterProperties = &openShiftManagedClusterProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + osmc.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + osmc.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + osmc.Type = &typeVar + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + osmc.Location = &location + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + osmc.Tags = tags + } + } + } + + return nil +} + +// OpenShiftManagedClusterAADIdentityProvider defines the Identity provider for MS AAD. +type OpenShiftManagedClusterAADIdentityProvider struct { + // ClientID - The clientId password associated with the provider. + ClientID *string `json:"clientId,omitempty"` + // Secret - The secret password associated with the provider. + Secret *string `json:"secret,omitempty"` + // TenantID - The tenantId associated with the provider. + TenantID *string `json:"tenantId,omitempty"` + // CustomerAdminGroupID - The groupId to be granted cluster admin role. + CustomerAdminGroupID *string `json:"customerAdminGroupId,omitempty"` + // Kind - Possible values include: 'KindOpenShiftManagedClusterBaseIdentityProvider', 'KindAADIdentityProvider' + Kind Kind `json:"kind,omitempty"` +} + +// MarshalJSON is the custom marshaler for OpenShiftManagedClusterAADIdentityProvider. +func (osmcaip OpenShiftManagedClusterAADIdentityProvider) MarshalJSON() ([]byte, error) { + osmcaip.Kind = KindAADIdentityProvider + objectMap := make(map[string]interface{}) + if osmcaip.ClientID != nil { + objectMap["clientId"] = osmcaip.ClientID + } + if osmcaip.Secret != nil { + objectMap["secret"] = osmcaip.Secret + } + if osmcaip.TenantID != nil { + objectMap["tenantId"] = osmcaip.TenantID + } + if osmcaip.CustomerAdminGroupID != nil { + objectMap["customerAdminGroupId"] = osmcaip.CustomerAdminGroupID + } + if osmcaip.Kind != "" { + objectMap["kind"] = osmcaip.Kind + } + return json.Marshal(objectMap) +} + +// AsOpenShiftManagedClusterAADIdentityProvider is the BasicOpenShiftManagedClusterBaseIdentityProvider implementation for OpenShiftManagedClusterAADIdentityProvider. +func (osmcaip OpenShiftManagedClusterAADIdentityProvider) AsOpenShiftManagedClusterAADIdentityProvider() (*OpenShiftManagedClusterAADIdentityProvider, bool) { + return &osmcaip, true +} + +// AsOpenShiftManagedClusterBaseIdentityProvider is the BasicOpenShiftManagedClusterBaseIdentityProvider implementation for OpenShiftManagedClusterAADIdentityProvider. +func (osmcaip OpenShiftManagedClusterAADIdentityProvider) AsOpenShiftManagedClusterBaseIdentityProvider() (*OpenShiftManagedClusterBaseIdentityProvider, bool) { + return nil, false +} + +// AsBasicOpenShiftManagedClusterBaseIdentityProvider is the BasicOpenShiftManagedClusterBaseIdentityProvider implementation for OpenShiftManagedClusterAADIdentityProvider. +func (osmcaip OpenShiftManagedClusterAADIdentityProvider) AsBasicOpenShiftManagedClusterBaseIdentityProvider() (BasicOpenShiftManagedClusterBaseIdentityProvider, bool) { + return &osmcaip, true +} + +// OpenShiftManagedClusterAgentPoolProfile defines the configuration of the OpenShift cluster VMs. +type OpenShiftManagedClusterAgentPoolProfile struct { + // Name - Unique name of the pool profile in the context of the subscription and resource group. + Name *string `json:"name,omitempty"` + // Count - Number of agents (VMs) to host docker containers. + Count *int32 `json:"count,omitempty"` + // VMSize - Size of agent VMs. Possible values include: 'StandardD2sV3', 'StandardD4sV3', 'StandardD8sV3', 'StandardD16sV3', 'StandardD32sV3', 'StandardD64sV3', 'StandardDS4V2', 'StandardDS5V2', 'StandardF8sV2', 'StandardF16sV2', 'StandardF32sV2', 'StandardF64sV2', 'StandardF72sV2', 'StandardF8s', 'StandardF16s', 'StandardE4sV3', 'StandardE8sV3', 'StandardE16sV3', 'StandardE20sV3', 'StandardE32sV3', 'StandardE64sV3', 'StandardGS2', 'StandardGS3', 'StandardGS4', 'StandardGS5', 'StandardDS12V2', 'StandardDS13V2', 'StandardDS14V2', 'StandardDS15V2', 'StandardL4s', 'StandardL8s', 'StandardL16s', 'StandardL32s' + VMSize OpenShiftContainerServiceVMSize `json:"vmSize,omitempty"` + // SubnetCidr - Subnet CIDR for the peering. + SubnetCidr *string `json:"subnetCidr,omitempty"` + // OsType - OsType to be used to specify os type. Choose from Linux and Windows. Default to Linux. Possible values include: 'Linux', 'Windows' + OsType OSType `json:"osType,omitempty"` + // Role - Define the role of the AgentPoolProfile. Possible values include: 'Compute', 'Infra' + Role OpenShiftAgentPoolProfileRole `json:"role,omitempty"` +} + +// OpenShiftManagedClusterAuthProfile defines all possible authentication profiles for the OpenShift +// cluster. +type OpenShiftManagedClusterAuthProfile struct { + // IdentityProviders - Type of authentication profile to use. + IdentityProviders *[]OpenShiftManagedClusterIdentityProvider `json:"identityProviders,omitempty"` +} + +// BasicOpenShiftManagedClusterBaseIdentityProvider structure for any Identity provider. +type BasicOpenShiftManagedClusterBaseIdentityProvider interface { + AsOpenShiftManagedClusterAADIdentityProvider() (*OpenShiftManagedClusterAADIdentityProvider, bool) + AsOpenShiftManagedClusterBaseIdentityProvider() (*OpenShiftManagedClusterBaseIdentityProvider, bool) +} + +// OpenShiftManagedClusterBaseIdentityProvider structure for any Identity provider. +type OpenShiftManagedClusterBaseIdentityProvider struct { + // Kind - Possible values include: 'KindOpenShiftManagedClusterBaseIdentityProvider', 'KindAADIdentityProvider' + Kind Kind `json:"kind,omitempty"` +} + +func unmarshalBasicOpenShiftManagedClusterBaseIdentityProvider(body []byte) (BasicOpenShiftManagedClusterBaseIdentityProvider, error) { + var m map[string]interface{} + err := json.Unmarshal(body, &m) + if err != nil { + return nil, err + } + + switch m["kind"] { + case string(KindAADIdentityProvider): + var osmcaip OpenShiftManagedClusterAADIdentityProvider + err := json.Unmarshal(body, &osmcaip) + return osmcaip, err + default: + var osmcbip OpenShiftManagedClusterBaseIdentityProvider + err := json.Unmarshal(body, &osmcbip) + return osmcbip, err + } +} +func unmarshalBasicOpenShiftManagedClusterBaseIdentityProviderArray(body []byte) ([]BasicOpenShiftManagedClusterBaseIdentityProvider, error) { + var rawMessages []*json.RawMessage + err := json.Unmarshal(body, &rawMessages) + if err != nil { + return nil, err + } + + osmcbipArray := make([]BasicOpenShiftManagedClusterBaseIdentityProvider, len(rawMessages)) + + for index, rawMessage := range rawMessages { + osmcbip, err := unmarshalBasicOpenShiftManagedClusterBaseIdentityProvider(*rawMessage) + if err != nil { + return nil, err + } + osmcbipArray[index] = osmcbip + } + return osmcbipArray, nil +} + +// MarshalJSON is the custom marshaler for OpenShiftManagedClusterBaseIdentityProvider. +func (osmcbip OpenShiftManagedClusterBaseIdentityProvider) MarshalJSON() ([]byte, error) { + osmcbip.Kind = KindOpenShiftManagedClusterBaseIdentityProvider + objectMap := make(map[string]interface{}) + if osmcbip.Kind != "" { + objectMap["kind"] = osmcbip.Kind + } + return json.Marshal(objectMap) +} + +// AsOpenShiftManagedClusterAADIdentityProvider is the BasicOpenShiftManagedClusterBaseIdentityProvider implementation for OpenShiftManagedClusterBaseIdentityProvider. +func (osmcbip OpenShiftManagedClusterBaseIdentityProvider) AsOpenShiftManagedClusterAADIdentityProvider() (*OpenShiftManagedClusterAADIdentityProvider, bool) { + return nil, false +} + +// AsOpenShiftManagedClusterBaseIdentityProvider is the BasicOpenShiftManagedClusterBaseIdentityProvider implementation for OpenShiftManagedClusterBaseIdentityProvider. +func (osmcbip OpenShiftManagedClusterBaseIdentityProvider) AsOpenShiftManagedClusterBaseIdentityProvider() (*OpenShiftManagedClusterBaseIdentityProvider, bool) { + return &osmcbip, true +} + +// AsBasicOpenShiftManagedClusterBaseIdentityProvider is the BasicOpenShiftManagedClusterBaseIdentityProvider implementation for OpenShiftManagedClusterBaseIdentityProvider. +func (osmcbip OpenShiftManagedClusterBaseIdentityProvider) AsBasicOpenShiftManagedClusterBaseIdentityProvider() (BasicOpenShiftManagedClusterBaseIdentityProvider, bool) { + return &osmcbip, true +} + +// OpenShiftManagedClusterIdentityProvider defines the configuration of the identity providers to be used +// in the OpenShift cluster. +type OpenShiftManagedClusterIdentityProvider struct { + // Name - Name of the provider. + Name *string `json:"name,omitempty"` + // Provider - Configuration of the provider. + Provider BasicOpenShiftManagedClusterBaseIdentityProvider `json:"provider,omitempty"` +} + +// UnmarshalJSON is the custom unmarshaler for OpenShiftManagedClusterIdentityProvider struct. +func (osmcip *OpenShiftManagedClusterIdentityProvider) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + osmcip.Name = &name + } + case "provider": + if v != nil { + provider, err := unmarshalBasicOpenShiftManagedClusterBaseIdentityProvider(*v) + if err != nil { + return err + } + osmcip.Provider = provider + } + } + } + + return nil +} + +// OpenShiftManagedClusterListResult the response from the List OpenShift Managed Clusters operation. +type OpenShiftManagedClusterListResult struct { + autorest.Response `json:"-"` + // Value - The list of OpenShift managed clusters. + Value *[]OpenShiftManagedCluster `json:"value,omitempty"` + // NextLink - READ-ONLY; The URL to get the next set of OpenShift managed cluster results. + NextLink *string `json:"nextLink,omitempty"` +} + +// OpenShiftManagedClusterListResultIterator provides access to a complete listing of +// OpenShiftManagedCluster values. +type OpenShiftManagedClusterListResultIterator struct { + i int + page OpenShiftManagedClusterListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *OpenShiftManagedClusterListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/OpenShiftManagedClusterListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *OpenShiftManagedClusterListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter OpenShiftManagedClusterListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter OpenShiftManagedClusterListResultIterator) Response() OpenShiftManagedClusterListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter OpenShiftManagedClusterListResultIterator) Value() OpenShiftManagedCluster { + if !iter.page.NotDone() { + return OpenShiftManagedCluster{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the OpenShiftManagedClusterListResultIterator type. +func NewOpenShiftManagedClusterListResultIterator(page OpenShiftManagedClusterListResultPage) OpenShiftManagedClusterListResultIterator { + return OpenShiftManagedClusterListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (osmclr OpenShiftManagedClusterListResult) IsEmpty() bool { + return osmclr.Value == nil || len(*osmclr.Value) == 0 +} + +// openShiftManagedClusterListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (osmclr OpenShiftManagedClusterListResult) openShiftManagedClusterListResultPreparer(ctx context.Context) (*http.Request, error) { + if osmclr.NextLink == nil || len(to.String(osmclr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(osmclr.NextLink))) +} + +// OpenShiftManagedClusterListResultPage contains a page of OpenShiftManagedCluster values. +type OpenShiftManagedClusterListResultPage struct { + fn func(context.Context, OpenShiftManagedClusterListResult) (OpenShiftManagedClusterListResult, error) + osmclr OpenShiftManagedClusterListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *OpenShiftManagedClusterListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/OpenShiftManagedClusterListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.osmclr) + if err != nil { + return err + } + page.osmclr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *OpenShiftManagedClusterListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page OpenShiftManagedClusterListResultPage) NotDone() bool { + return !page.osmclr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page OpenShiftManagedClusterListResultPage) Response() OpenShiftManagedClusterListResult { + return page.osmclr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page OpenShiftManagedClusterListResultPage) Values() []OpenShiftManagedCluster { + if page.osmclr.IsEmpty() { + return nil + } + return *page.osmclr.Value +} + +// Creates a new instance of the OpenShiftManagedClusterListResultPage type. +func NewOpenShiftManagedClusterListResultPage(getNextPage func(context.Context, OpenShiftManagedClusterListResult) (OpenShiftManagedClusterListResult, error)) OpenShiftManagedClusterListResultPage { + return OpenShiftManagedClusterListResultPage{fn: getNextPage} +} + +// OpenShiftManagedClusterMasterPoolProfile openShiftManagedClusterMaterPoolProfile contains configuration +// for OpenShift master VMs. +type OpenShiftManagedClusterMasterPoolProfile struct { + // Count - Number of masters (VMs) to host docker containers. The default value is 3. + Count *int32 `json:"count,omitempty"` + // VMSize - Size of agent VMs. Possible values include: 'StandardD2sV3', 'StandardD4sV3', 'StandardD8sV3', 'StandardD16sV3', 'StandardD32sV3', 'StandardD64sV3', 'StandardDS4V2', 'StandardDS5V2', 'StandardF8sV2', 'StandardF16sV2', 'StandardF32sV2', 'StandardF64sV2', 'StandardF72sV2', 'StandardF8s', 'StandardF16s', 'StandardE4sV3', 'StandardE8sV3', 'StandardE16sV3', 'StandardE20sV3', 'StandardE32sV3', 'StandardE64sV3', 'StandardGS2', 'StandardGS3', 'StandardGS4', 'StandardGS5', 'StandardDS12V2', 'StandardDS13V2', 'StandardDS14V2', 'StandardDS15V2', 'StandardL4s', 'StandardL8s', 'StandardL16s', 'StandardL32s' + VMSize OpenShiftContainerServiceVMSize `json:"vmSize,omitempty"` + // SubnetCidr - Subnet CIDR for the peering. + SubnetCidr *string `json:"subnetCidr,omitempty"` + // APIProperties - Defines further properties on the API. + APIProperties *OpenShiftAPIProperties `json:"apiProperties,omitempty"` +} + +// OpenShiftManagedClusterMonitorProfile defines the configuration for Log Analytics integration. +type OpenShiftManagedClusterMonitorProfile struct { + // WorkspaceResourceID - Azure Resource Manager Resource ID for the Log Analytics workspace to integrate with. + WorkspaceResourceID *string `json:"workspaceResourceID,omitempty"` + // Enabled - If the Log analytics integration should be turned on or off + Enabled *bool `json:"enabled,omitempty"` +} + +// OpenShiftManagedClusterProperties properties of the OpenShift managed cluster. +type OpenShiftManagedClusterProperties struct { + // ProvisioningState - READ-ONLY; The current deployment or provisioning state, which only appears in the response. + ProvisioningState *string `json:"provisioningState,omitempty"` + // OpenShiftVersion - Version of OpenShift specified when creating the cluster. + OpenShiftVersion *string `json:"openShiftVersion,omitempty"` + // ClusterVersion - READ-ONLY; Version of OpenShift specified when creating the cluster. + ClusterVersion *string `json:"clusterVersion,omitempty"` + // PublicHostname - READ-ONLY; Service generated FQDN or private IP for OpenShift API server. + PublicHostname *string `json:"publicHostname,omitempty"` + // Fqdn - READ-ONLY; Service generated FQDN for OpenShift API server loadbalancer internal hostname. + Fqdn *string `json:"fqdn,omitempty"` + // NetworkProfile - Configuration for OpenShift networking. + NetworkProfile *NetworkProfile `json:"networkProfile,omitempty"` + // RouterProfiles - Configuration for OpenShift router(s). + RouterProfiles *[]OpenShiftRouterProfile `json:"routerProfiles,omitempty"` + // MasterPoolProfile - Configuration for OpenShift master VMs. + MasterPoolProfile *OpenShiftManagedClusterMasterPoolProfile `json:"masterPoolProfile,omitempty"` + // AgentPoolProfiles - Configuration of OpenShift cluster VMs. + AgentPoolProfiles *[]OpenShiftManagedClusterAgentPoolProfile `json:"agentPoolProfiles,omitempty"` + // AuthProfile - Configures OpenShift authentication. + AuthProfile *OpenShiftManagedClusterAuthProfile `json:"authProfile,omitempty"` + // MonitorProfile - Configures Log Analytics integration. + MonitorProfile *OpenShiftManagedClusterMonitorProfile `json:"monitorProfile,omitempty"` + // RefreshCluster - Allows node rotation + RefreshCluster *bool `json:"refreshCluster,omitempty"` +} + +// OpenShiftManagedClustersCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of +// a long-running operation. +type OpenShiftManagedClustersCreateOrUpdateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *OpenShiftManagedClustersCreateOrUpdateFuture) Result(client OpenShiftManagedClustersClient) (osmc OpenShiftManagedCluster, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.OpenShiftManagedClustersCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("containerservice.OpenShiftManagedClustersCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if osmc.Response.Response, err = future.GetResult(sender); err == nil && osmc.Response.Response.StatusCode != http.StatusNoContent { + osmc, err = client.CreateOrUpdateResponder(osmc.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.OpenShiftManagedClustersCreateOrUpdateFuture", "Result", osmc.Response.Response, "Failure responding to request") + } + } + return +} + +// OpenShiftManagedClustersDeleteFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type OpenShiftManagedClustersDeleteFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *OpenShiftManagedClustersDeleteFuture) Result(client OpenShiftManagedClustersClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.OpenShiftManagedClustersDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("containerservice.OpenShiftManagedClustersDeleteFuture") + return + } + ar.Response = future.Response() + return +} + +// OpenShiftManagedClustersUpdateTagsFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type OpenShiftManagedClustersUpdateTagsFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *OpenShiftManagedClustersUpdateTagsFuture) Result(client OpenShiftManagedClustersClient) (osmc OpenShiftManagedCluster, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.OpenShiftManagedClustersUpdateTagsFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("containerservice.OpenShiftManagedClustersUpdateTagsFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if osmc.Response.Response, err = future.GetResult(sender); err == nil && osmc.Response.Response.StatusCode != http.StatusNoContent { + osmc, err = client.UpdateTagsResponder(osmc.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.OpenShiftManagedClustersUpdateTagsFuture", "Result", osmc.Response.Response, "Failure responding to request") + } + } + return +} + +// OpenShiftRouterProfile represents an OpenShift router +type OpenShiftRouterProfile struct { + // Name - Name of the router profile. + Name *string `json:"name,omitempty"` + // PublicSubdomain - READ-ONLY; DNS subdomain for OpenShift router. + PublicSubdomain *string `json:"publicSubdomain,omitempty"` + // Fqdn - READ-ONLY; Auto-allocated FQDN for the OpenShift router. + Fqdn *string `json:"fqdn,omitempty"` +} + +// OperationListResult the List Compute Operation operation response. +type OperationListResult struct { + autorest.Response `json:"-"` + // Value - READ-ONLY; The list of compute operations + Value *[]OperationValue `json:"value,omitempty"` +} + +// OperationValue describes the properties of a Compute Operation value. +type OperationValue struct { + // Origin - READ-ONLY; The origin of the compute operation. + Origin *string `json:"origin,omitempty"` + // Name - READ-ONLY; The name of the compute operation. + Name *string `json:"name,omitempty"` + // OperationValueDisplay - Describes the properties of a Compute Operation Value Display. + *OperationValueDisplay `json:"display,omitempty"` +} + +// MarshalJSON is the custom marshaler for OperationValue. +func (ov OperationValue) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if ov.OperationValueDisplay != nil { + objectMap["display"] = ov.OperationValueDisplay + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for OperationValue struct. +func (ov *OperationValue) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "origin": + if v != nil { + var origin string + err = json.Unmarshal(*v, &origin) + if err != nil { + return err + } + ov.Origin = &origin + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + ov.Name = &name + } + case "display": + if v != nil { + var operationValueDisplay OperationValueDisplay + err = json.Unmarshal(*v, &operationValueDisplay) + if err != nil { + return err + } + ov.OperationValueDisplay = &operationValueDisplay + } + } + } + + return nil +} + +// OperationValueDisplay describes the properties of a Compute Operation Value Display. +type OperationValueDisplay struct { + // Operation - READ-ONLY; The display name of the compute operation. + Operation *string `json:"operation,omitempty"` + // Resource - READ-ONLY; The display name of the resource the operation applies to. + Resource *string `json:"resource,omitempty"` + // Description - READ-ONLY; The description of the operation. + Description *string `json:"description,omitempty"` + // Provider - READ-ONLY; The resource provider for the operation. + Provider *string `json:"provider,omitempty"` +} + +// OrchestratorProfile contains information about orchestrator. +type OrchestratorProfile struct { + // OrchestratorType - Orchestrator type. + OrchestratorType *string `json:"orchestratorType,omitempty"` + // OrchestratorVersion - Orchestrator version (major, minor, patch). + OrchestratorVersion *string `json:"orchestratorVersion,omitempty"` + // IsPreview - Whether Kubernetes version is currently in preview. + IsPreview *bool `json:"isPreview,omitempty"` +} + +// OrchestratorProfileType profile for the container service orchestrator. +type OrchestratorProfileType struct { + // OrchestratorType - The orchestrator to use to manage container service cluster resources. Valid values are Kubernetes, Swarm, DCOS, DockerCE and Custom. Possible values include: 'Kubernetes', 'Swarm', 'DCOS', 'DockerCE', 'Custom' + OrchestratorType OrchestratorTypes `json:"orchestratorType,omitempty"` + // OrchestratorVersion - The version of the orchestrator to use. You can specify the major.minor.patch part of the actual version.For example, you can specify version as "1.6.11". + OrchestratorVersion *string `json:"orchestratorVersion,omitempty"` +} + +// OrchestratorVersionProfile the profile of an orchestrator and its available versions. +type OrchestratorVersionProfile struct { + // OrchestratorType - Orchestrator type. + OrchestratorType *string `json:"orchestratorType,omitempty"` + // OrchestratorVersion - Orchestrator version (major, minor, patch). + OrchestratorVersion *string `json:"orchestratorVersion,omitempty"` + // Default - Installed by default if version is not specified. + Default *bool `json:"default,omitempty"` + // IsPreview - Whether Kubernetes version is currently in preview. + IsPreview *bool `json:"isPreview,omitempty"` + // Upgrades - The list of available upgrade versions. + Upgrades *[]OrchestratorProfile `json:"upgrades,omitempty"` +} + +// OrchestratorVersionProfileListResult the list of versions for supported orchestrators. +type OrchestratorVersionProfileListResult struct { + autorest.Response `json:"-"` + // ID - READ-ONLY; Id of the orchestrator version profile list result. + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; Name of the orchestrator version profile list result. + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; Type of the orchestrator version profile list result. + Type *string `json:"type,omitempty"` + // OrchestratorVersionProfileProperties - The properties of an orchestrator version profile. + *OrchestratorVersionProfileProperties `json:"properties,omitempty"` +} + +// MarshalJSON is the custom marshaler for OrchestratorVersionProfileListResult. +func (ovplr OrchestratorVersionProfileListResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if ovplr.OrchestratorVersionProfileProperties != nil { + objectMap["properties"] = ovplr.OrchestratorVersionProfileProperties + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for OrchestratorVersionProfileListResult struct. +func (ovplr *OrchestratorVersionProfileListResult) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + ovplr.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + ovplr.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + ovplr.Type = &typeVar + } + case "properties": + if v != nil { + var orchestratorVersionProfileProperties OrchestratorVersionProfileProperties + err = json.Unmarshal(*v, &orchestratorVersionProfileProperties) + if err != nil { + return err + } + ovplr.OrchestratorVersionProfileProperties = &orchestratorVersionProfileProperties + } + } + } + + return nil +} + +// OrchestratorVersionProfileProperties the properties of an orchestrator version profile. +type OrchestratorVersionProfileProperties struct { + // Orchestrators - List of orchestrator version profiles. + Orchestrators *[]OrchestratorVersionProfile `json:"orchestrators,omitempty"` +} + +// Properties properties of the container service. +type Properties struct { + // ProvisioningState - READ-ONLY; The current deployment or provisioning state, which only appears in the response. + ProvisioningState *string `json:"provisioningState,omitempty"` + // OrchestratorProfile - Profile for the container service orchestrator. + OrchestratorProfile *OrchestratorProfileType `json:"orchestratorProfile,omitempty"` + // CustomProfile - Properties to configure a custom container service cluster. + CustomProfile *CustomProfile `json:"customProfile,omitempty"` + // ServicePrincipalProfile - Information about a service principal identity for the cluster to use for manipulating Azure APIs. Exact one of secret or keyVaultSecretRef need to be specified. + ServicePrincipalProfile *ServicePrincipalProfile `json:"servicePrincipalProfile,omitempty"` + // MasterProfile - Profile for the container service master. + MasterProfile *MasterProfile `json:"masterProfile,omitempty"` + // AgentPoolProfiles - Properties of the agent pool. + AgentPoolProfiles *[]AgentPoolProfile `json:"agentPoolProfiles,omitempty"` + // WindowsProfile - Profile for Windows VMs in the container service cluster. + WindowsProfile *WindowsProfile `json:"windowsProfile,omitempty"` + // LinuxProfile - Profile for Linux VMs in the container service cluster. + LinuxProfile *LinuxProfile `json:"linuxProfile,omitempty"` + // DiagnosticsProfile - Profile for diagnostics in the container service cluster. + DiagnosticsProfile *DiagnosticsProfile `json:"diagnosticsProfile,omitempty"` +} + +// PurchasePlan used for establishing the purchase context of any 3rd Party artifact through MarketPlace. +type PurchasePlan struct { + // Name - The plan ID. + Name *string `json:"name,omitempty"` + // Product - Specifies the product of the image from the marketplace. This is the same value as Offer under the imageReference element. + Product *string `json:"product,omitempty"` + // PromotionCode - The promotion code. + PromotionCode *string `json:"promotionCode,omitempty"` + // Publisher - The plan ID. + Publisher *string `json:"publisher,omitempty"` +} + +// Resource the Resource model definition. +type Resource struct { + // ID - READ-ONLY; Resource Id + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; Resource name + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; Resource type + Type *string `json:"type,omitempty"` + // Location - Resource location + Location *string `json:"location,omitempty"` + // Tags - Resource tags + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for Resource. +func (r Resource) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if r.Location != nil { + objectMap["location"] = r.Location + } + if r.Tags != nil { + objectMap["tags"] = r.Tags + } + return json.Marshal(objectMap) +} + +// ResourceReference a reference to an Azure resource. +type ResourceReference struct { + // ID - The fully qualified Azure resource id. + ID *string `json:"id,omitempty"` +} + +// ServicePrincipalProfile information about a service principal identity for the cluster to use for +// manipulating Azure APIs. Either secret or keyVaultSecretRef must be specified. +type ServicePrincipalProfile struct { + // ClientID - The ID for the service principal. + ClientID *string `json:"clientId,omitempty"` + // Secret - The secret password associated with the service principal in plain text. + Secret *string `json:"secret,omitempty"` + // KeyVaultSecretRef - Reference to a secret stored in Azure Key Vault. + KeyVaultSecretRef *KeyVaultSecretRef `json:"keyVaultSecretRef,omitempty"` +} + +// SSHConfiguration SSH configuration for Linux-based VMs running on Azure. +type SSHConfiguration struct { + // PublicKeys - The list of SSH public keys used to authenticate with Linux-based VMs. Only expect one key specified. + PublicKeys *[]SSHPublicKey `json:"publicKeys,omitempty"` +} + +// SSHPublicKey contains information about SSH certificate public key data. +type SSHPublicKey struct { + // KeyData - Certificate public key used to authenticate with VMs through SSH. The certificate must be in PEM format with or without headers. + KeyData *string `json:"keyData,omitempty"` +} + +// SubResource reference to another subresource. +type SubResource struct { + // ID - READ-ONLY; Resource ID. + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; The name of the resource that is unique within a resource group. This name can be used to access the resource. + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; Resource type + Type *string `json:"type,omitempty"` +} + +// TagsObject tags object for patch operations. +type TagsObject struct { + // Tags - Resource tags. + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for TagsObject. +func (toVar TagsObject) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if toVar.Tags != nil { + objectMap["tags"] = toVar.Tags + } + return json.Marshal(objectMap) +} + +// UserAssignedIdentity ... +type UserAssignedIdentity struct { + // ResourceID - The resource id of the user assigned identity. + ResourceID *string `json:"resourceId,omitempty"` + // ClientID - The client id of the user assigned identity. + ClientID *string `json:"clientId,omitempty"` + // ObjectID - The object id of the user assigned identity. + ObjectID *string `json:"objectId,omitempty"` +} + +// VMDiagnostics profile for diagnostics on the container service VMs. +type VMDiagnostics struct { + // Enabled - Whether the VM diagnostic agent is provisioned on the VM. + Enabled *bool `json:"enabled,omitempty"` + // StorageURI - READ-ONLY; The URI of the storage account where diagnostics are stored. + StorageURI *string `json:"storageUri,omitempty"` +} + +// WindowsProfile profile for Windows VMs in the container service cluster. +type WindowsProfile struct { + // AdminUsername - The administrator username to use for Windows VMs. + AdminUsername *string `json:"adminUsername,omitempty"` + // AdminPassword - The administrator password to use for Windows VMs. + AdminPassword *string `json:"adminPassword,omitempty"` +} diff --git a/services/preview/containerservice/mgmt/2019-10-27-preview/containerservice/openshiftmanagedclusters.go b/services/preview/containerservice/mgmt/2019-10-27-preview/containerservice/openshiftmanagedclusters.go new file mode 100644 index 000000000000..ec88a3148fb5 --- /dev/null +++ b/services/preview/containerservice/mgmt/2019-10-27-preview/containerservice/openshiftmanagedclusters.go @@ -0,0 +1,613 @@ +package containerservice + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// OpenShiftManagedClustersClient is the the Container Service Client. +type OpenShiftManagedClustersClient struct { + BaseClient +} + +// NewOpenShiftManagedClustersClient creates an instance of the OpenShiftManagedClustersClient client. +func NewOpenShiftManagedClustersClient(subscriptionID string) OpenShiftManagedClustersClient { + return NewOpenShiftManagedClustersClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewOpenShiftManagedClustersClientWithBaseURI creates an instance of the OpenShiftManagedClustersClient client using +// a custom endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign +// clouds, Azure stack). +func NewOpenShiftManagedClustersClientWithBaseURI(baseURI string, subscriptionID string) OpenShiftManagedClustersClient { + return OpenShiftManagedClustersClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates a OpenShift managed cluster with the specified configuration for agents and +// OpenShift version. +// Parameters: +// resourceGroupName - the name of the resource group. +// resourceName - the name of the OpenShift managed cluster resource. +// parameters - parameters supplied to the Create or Update an OpenShift Managed Cluster operation. +func (client OpenShiftManagedClustersClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, parameters OpenShiftManagedCluster) (result OpenShiftManagedClustersCreateOrUpdateFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/OpenShiftManagedClustersClient.CreateOrUpdate") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.OpenShiftManagedClusterProperties", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.OpenShiftManagedClusterProperties.OpenShiftVersion", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.OpenShiftManagedClusterProperties.MasterPoolProfile", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.OpenShiftManagedClusterProperties.MasterPoolProfile.Count", Name: validation.Null, Rule: true, Chain: nil}}}, + }}}}}); err != nil { + return result, validation.NewError("containerservice.OpenShiftManagedClustersClient", "CreateOrUpdate", err.Error()) + } + + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, resourceName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.OpenShiftManagedClustersClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + result, err = client.CreateOrUpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.OpenShiftManagedClustersClient", "CreateOrUpdate", result.Response(), "Failure sending request") + return + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client OpenShiftManagedClustersClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, resourceName string, parameters OpenShiftManagedCluster) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "resourceName": autorest.Encode("path", resourceName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-10-27-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/openShiftManagedClusters/{resourceName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client OpenShiftManagedClustersClient) CreateOrUpdateSender(req *http.Request) (future OpenShiftManagedClustersCreateOrUpdateFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client OpenShiftManagedClustersClient) CreateOrUpdateResponder(resp *http.Response) (result OpenShiftManagedCluster, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes the OpenShift managed cluster with a specified resource group and name. +// Parameters: +// resourceGroupName - the name of the resource group. +// resourceName - the name of the OpenShift managed cluster resource. +func (client OpenShiftManagedClustersClient) Delete(ctx context.Context, resourceGroupName string, resourceName string) (result OpenShiftManagedClustersDeleteFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/OpenShiftManagedClustersClient.Delete") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("containerservice.OpenShiftManagedClustersClient", "Delete", err.Error()) + } + + req, err := client.DeletePreparer(ctx, resourceGroupName, resourceName) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.OpenShiftManagedClustersClient", "Delete", nil, "Failure preparing request") + return + } + + result, err = client.DeleteSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.OpenShiftManagedClustersClient", "Delete", result.Response(), "Failure sending request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client OpenShiftManagedClustersClient) DeletePreparer(ctx context.Context, resourceGroupName string, resourceName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "resourceName": autorest.Encode("path", resourceName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-10-27-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/openShiftManagedClusters/{resourceName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client OpenShiftManagedClustersClient) DeleteSender(req *http.Request) (future OpenShiftManagedClustersDeleteFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client OpenShiftManagedClustersClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets the details of the managed OpenShift cluster with a specified resource group and name. +// Parameters: +// resourceGroupName - the name of the resource group. +// resourceName - the name of the OpenShift managed cluster resource. +func (client OpenShiftManagedClustersClient) Get(ctx context.Context, resourceGroupName string, resourceName string) (result OpenShiftManagedCluster, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/OpenShiftManagedClustersClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("containerservice.OpenShiftManagedClustersClient", "Get", err.Error()) + } + + req, err := client.GetPreparer(ctx, resourceGroupName, resourceName) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.OpenShiftManagedClustersClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "containerservice.OpenShiftManagedClustersClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.OpenShiftManagedClustersClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client OpenShiftManagedClustersClient) GetPreparer(ctx context.Context, resourceGroupName string, resourceName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "resourceName": autorest.Encode("path", resourceName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-10-27-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/openShiftManagedClusters/{resourceName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client OpenShiftManagedClustersClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client OpenShiftManagedClustersClient) GetResponder(resp *http.Response) (result OpenShiftManagedCluster, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets a list of OpenShift managed clusters in the specified subscription. The operation returns properties of +// each OpenShift managed cluster. +func (client OpenShiftManagedClustersClient) List(ctx context.Context) (result OpenShiftManagedClusterListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/OpenShiftManagedClustersClient.List") + defer func() { + sc := -1 + if result.osmclr.Response.Response != nil { + sc = result.osmclr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.OpenShiftManagedClustersClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.osmclr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "containerservice.OpenShiftManagedClustersClient", "List", resp, "Failure sending request") + return + } + + result.osmclr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.OpenShiftManagedClustersClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client OpenShiftManagedClustersClient) ListPreparer(ctx context.Context) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-10-27-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.ContainerService/openShiftManagedClusters", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client OpenShiftManagedClustersClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client OpenShiftManagedClustersClient) ListResponder(resp *http.Response) (result OpenShiftManagedClusterListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client OpenShiftManagedClustersClient) listNextResults(ctx context.Context, lastResults OpenShiftManagedClusterListResult) (result OpenShiftManagedClusterListResult, err error) { + req, err := lastResults.openShiftManagedClusterListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "containerservice.OpenShiftManagedClustersClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "containerservice.OpenShiftManagedClustersClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.OpenShiftManagedClustersClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client OpenShiftManagedClustersClient) ListComplete(ctx context.Context) (result OpenShiftManagedClusterListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/OpenShiftManagedClustersClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx) + return +} + +// ListByResourceGroup lists OpenShift managed clusters in the specified subscription and resource group. The operation +// returns properties of each OpenShift managed cluster. +// Parameters: +// resourceGroupName - the name of the resource group. +func (client OpenShiftManagedClustersClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (result OpenShiftManagedClusterListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/OpenShiftManagedClustersClient.ListByResourceGroup") + defer func() { + sc := -1 + if result.osmclr.Response.Response != nil { + sc = result.osmclr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("containerservice.OpenShiftManagedClustersClient", "ListByResourceGroup", err.Error()) + } + + result.fn = client.listByResourceGroupNextResults + req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.OpenShiftManagedClustersClient", "ListByResourceGroup", nil, "Failure preparing request") + return + } + + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.osmclr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "containerservice.OpenShiftManagedClustersClient", "ListByResourceGroup", resp, "Failure sending request") + return + } + + result.osmclr, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.OpenShiftManagedClustersClient", "ListByResourceGroup", resp, "Failure responding to request") + } + + return +} + +// ListByResourceGroupPreparer prepares the ListByResourceGroup request. +func (client OpenShiftManagedClustersClient) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-10-27-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/openShiftManagedClusters", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the +// http.Response Body if it receives an error. +func (client OpenShiftManagedClustersClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always +// closes the http.Response Body. +func (client OpenShiftManagedClustersClient) ListByResourceGroupResponder(resp *http.Response) (result OpenShiftManagedClusterListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listByResourceGroupNextResults retrieves the next set of results, if any. +func (client OpenShiftManagedClustersClient) listByResourceGroupNextResults(ctx context.Context, lastResults OpenShiftManagedClusterListResult) (result OpenShiftManagedClusterListResult, err error) { + req, err := lastResults.openShiftManagedClusterListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "containerservice.OpenShiftManagedClustersClient", "listByResourceGroupNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "containerservice.OpenShiftManagedClustersClient", "listByResourceGroupNextResults", resp, "Failure sending next results request") + } + result, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.OpenShiftManagedClustersClient", "listByResourceGroupNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListByResourceGroupComplete enumerates all values, automatically crossing page boundaries as required. +func (client OpenShiftManagedClustersClient) ListByResourceGroupComplete(ctx context.Context, resourceGroupName string) (result OpenShiftManagedClusterListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/OpenShiftManagedClustersClient.ListByResourceGroup") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.ListByResourceGroup(ctx, resourceGroupName) + return +} + +// UpdateTags updates an OpenShift managed cluster with the specified tags. +// Parameters: +// resourceGroupName - the name of the resource group. +// resourceName - the name of the OpenShift managed cluster resource. +// parameters - parameters supplied to the Update OpenShift Managed Cluster Tags operation. +func (client OpenShiftManagedClustersClient) UpdateTags(ctx context.Context, resourceGroupName string, resourceName string, parameters TagsObject) (result OpenShiftManagedClustersUpdateTagsFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/OpenShiftManagedClustersClient.UpdateTags") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("containerservice.OpenShiftManagedClustersClient", "UpdateTags", err.Error()) + } + + req, err := client.UpdateTagsPreparer(ctx, resourceGroupName, resourceName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.OpenShiftManagedClustersClient", "UpdateTags", nil, "Failure preparing request") + return + } + + result, err = client.UpdateTagsSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.OpenShiftManagedClustersClient", "UpdateTags", result.Response(), "Failure sending request") + return + } + + return +} + +// UpdateTagsPreparer prepares the UpdateTags request. +func (client OpenShiftManagedClustersClient) UpdateTagsPreparer(ctx context.Context, resourceGroupName string, resourceName string, parameters TagsObject) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "resourceName": autorest.Encode("path", resourceName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-10-27-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/openShiftManagedClusters/{resourceName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateTagsSender sends the UpdateTags request. The method will close the +// http.Response Body if it receives an error. +func (client OpenShiftManagedClustersClient) UpdateTagsSender(req *http.Request) (future OpenShiftManagedClustersUpdateTagsFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// UpdateTagsResponder handles the response to the UpdateTags request. The method always +// closes the http.Response Body. +func (client OpenShiftManagedClustersClient) UpdateTagsResponder(resp *http.Response) (result OpenShiftManagedCluster, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/services/preview/containerservice/mgmt/2019-10-27-preview/containerservice/operations.go b/services/preview/containerservice/mgmt/2019-10-27-preview/containerservice/operations.go new file mode 100644 index 000000000000..0d643c2c5adb --- /dev/null +++ b/services/preview/containerservice/mgmt/2019-10-27-preview/containerservice/operations.go @@ -0,0 +1,109 @@ +package containerservice + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// OperationsClient is the the Container Service Client. +type OperationsClient struct { + BaseClient +} + +// NewOperationsClient creates an instance of the OperationsClient client. +func NewOperationsClient(subscriptionID string) OperationsClient { + return NewOperationsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewOperationsClientWithBaseURI creates an instance of the OperationsClient client using a custom endpoint. Use this +// when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). +func NewOperationsClientWithBaseURI(baseURI string, subscriptionID string) OperationsClient { + return OperationsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// List gets a list of compute operations. +func (client OperationsClient) List(ctx context.Context) (result OperationListResult, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/OperationsClient.List") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.ListPreparer(ctx) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.OperationsClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "containerservice.OperationsClient", "List", resp, "Failure sending request") + return + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.OperationsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client OperationsClient) ListPreparer(ctx context.Context) (*http.Request, error) { + const APIVersion = "2020-01-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/providers/Microsoft.ContainerService/operations"), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client OperationsClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client OperationsClient) ListResponder(resp *http.Response) (result OperationListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/services/preview/containerservice/mgmt/2019-10-27-preview/containerservice/version.go b/services/preview/containerservice/mgmt/2019-10-27-preview/containerservice/version.go new file mode 100644 index 000000000000..16acfc41c807 --- /dev/null +++ b/services/preview/containerservice/mgmt/2019-10-27-preview/containerservice/version.go @@ -0,0 +1,30 @@ +package containerservice + +import "github.com/Azure/azure-sdk-for-go/version" + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +// UserAgent returns the UserAgent string to use when sending http.Requests. +func UserAgent() string { + return "Azure-SDK-For-Go/" + version.Number + " containerservice/2019-10-27-preview" +} + +// Version returns the semantic version (see http://semver.org) of the client. +func Version() string { + return version.Number +} diff --git a/services/preview/synapse/mgmt/2019-06-01-preview/synapse/bigdatapools.go b/services/preview/synapse/mgmt/2019-06-01-preview/synapse/bigdatapools.go new file mode 100644 index 000000000000..158b918a49c4 --- /dev/null +++ b/services/preview/synapse/mgmt/2019-06-01-preview/synapse/bigdatapools.go @@ -0,0 +1,538 @@ +package synapse + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// BigDataPoolsClient is the azure Synapse Analytics Management Client +type BigDataPoolsClient struct { + BaseClient +} + +// NewBigDataPoolsClient creates an instance of the BigDataPoolsClient client. +func NewBigDataPoolsClient(subscriptionID string) BigDataPoolsClient { + return NewBigDataPoolsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewBigDataPoolsClientWithBaseURI creates an instance of the BigDataPoolsClient client using a custom endpoint. Use +// this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). +func NewBigDataPoolsClientWithBaseURI(baseURI string, subscriptionID string) BigDataPoolsClient { + return BigDataPoolsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate create a new Big Data pool. +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// workspaceName - the name of the workspace +// bigDataPoolName - big Data pool name +// bigDataPoolInfo - the Big Data pool to create. +// force - whether to stop any running jobs in the Big Data pool +func (client BigDataPoolsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, workspaceName string, bigDataPoolName string, bigDataPoolInfo BigDataPoolResourceInfo, force *bool) (result BigDataPoolsCreateOrUpdateFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BigDataPoolsClient.CreateOrUpdate") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: bigDataPoolInfo, + Constraints: []validation.Constraint{{Target: "bigDataPoolInfo.BigDataPoolResourceProperties", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "bigDataPoolInfo.BigDataPoolResourceProperties.NodeCount", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "bigDataPoolInfo.BigDataPoolResourceProperties.NodeCount", Name: validation.InclusiveMaximum, Rule: int64(200), Chain: nil}, + {Target: "bigDataPoolInfo.BigDataPoolResourceProperties.NodeCount", Name: validation.InclusiveMinimum, Rule: int64(3), Chain: nil}, + }}, + }}}}}); err != nil { + return result, validation.NewError("synapse.BigDataPoolsClient", "CreateOrUpdate", err.Error()) + } + + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, workspaceName, bigDataPoolName, bigDataPoolInfo, force) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.BigDataPoolsClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + result, err = client.CreateOrUpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.BigDataPoolsClient", "CreateOrUpdate", result.Response(), "Failure sending request") + return + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client BigDataPoolsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, workspaceName string, bigDataPoolName string, bigDataPoolInfo BigDataPoolResourceInfo, force *bool) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "bigDataPoolName": autorest.Encode("path", bigDataPoolName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "workspaceName": autorest.Encode("path", workspaceName), + } + + const APIVersion = "2019-06-01-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if force != nil { + queryParameters["force"] = autorest.Encode("query", *force) + } else { + queryParameters["force"] = autorest.Encode("query", false) + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/bigDataPools/{bigDataPoolName}", pathParameters), + autorest.WithJSON(bigDataPoolInfo), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client BigDataPoolsClient) CreateOrUpdateSender(req *http.Request) (future BigDataPoolsCreateOrUpdateFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client BigDataPoolsClient) CreateOrUpdateResponder(resp *http.Response) (result BigDataPoolResourceInfo, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete delete a Big Data pool from the workspace. +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// workspaceName - the name of the workspace +// bigDataPoolName - big Data pool name +func (client BigDataPoolsClient) Delete(ctx context.Context, resourceGroupName string, workspaceName string, bigDataPoolName string) (result BigDataPoolsDeleteFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BigDataPoolsClient.Delete") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("synapse.BigDataPoolsClient", "Delete", err.Error()) + } + + req, err := client.DeletePreparer(ctx, resourceGroupName, workspaceName, bigDataPoolName) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.BigDataPoolsClient", "Delete", nil, "Failure preparing request") + return + } + + result, err = client.DeleteSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.BigDataPoolsClient", "Delete", result.Response(), "Failure sending request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client BigDataPoolsClient) DeletePreparer(ctx context.Context, resourceGroupName string, workspaceName string, bigDataPoolName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "bigDataPoolName": autorest.Encode("path", bigDataPoolName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "workspaceName": autorest.Encode("path", workspaceName), + } + + const APIVersion = "2019-06-01-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/bigDataPools/{bigDataPoolName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client BigDataPoolsClient) DeleteSender(req *http.Request) (future BigDataPoolsDeleteFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client BigDataPoolsClient) DeleteResponder(resp *http.Response) (result SetObject, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Get get a Big Data pool. +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// workspaceName - the name of the workspace +// bigDataPoolName - big Data pool name +func (client BigDataPoolsClient) Get(ctx context.Context, resourceGroupName string, workspaceName string, bigDataPoolName string) (result BigDataPoolResourceInfo, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BigDataPoolsClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("synapse.BigDataPoolsClient", "Get", err.Error()) + } + + req, err := client.GetPreparer(ctx, resourceGroupName, workspaceName, bigDataPoolName) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.BigDataPoolsClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "synapse.BigDataPoolsClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.BigDataPoolsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client BigDataPoolsClient) GetPreparer(ctx context.Context, resourceGroupName string, workspaceName string, bigDataPoolName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "bigDataPoolName": autorest.Encode("path", bigDataPoolName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "workspaceName": autorest.Encode("path", workspaceName), + } + + const APIVersion = "2019-06-01-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/bigDataPools/{bigDataPoolName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client BigDataPoolsClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client BigDataPoolsClient) GetResponder(resp *http.Response) (result BigDataPoolResourceInfo, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListByWorkspace list Big Data pools in a workspace. +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// workspaceName - the name of the workspace +func (client BigDataPoolsClient) ListByWorkspace(ctx context.Context, resourceGroupName string, workspaceName string) (result BigDataPoolResourceInfoListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BigDataPoolsClient.ListByWorkspace") + defer func() { + sc := -1 + if result.bdprilr.Response.Response != nil { + sc = result.bdprilr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("synapse.BigDataPoolsClient", "ListByWorkspace", err.Error()) + } + + result.fn = client.listByWorkspaceNextResults + req, err := client.ListByWorkspacePreparer(ctx, resourceGroupName, workspaceName) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.BigDataPoolsClient", "ListByWorkspace", nil, "Failure preparing request") + return + } + + resp, err := client.ListByWorkspaceSender(req) + if err != nil { + result.bdprilr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "synapse.BigDataPoolsClient", "ListByWorkspace", resp, "Failure sending request") + return + } + + result.bdprilr, err = client.ListByWorkspaceResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.BigDataPoolsClient", "ListByWorkspace", resp, "Failure responding to request") + } + + return +} + +// ListByWorkspacePreparer prepares the ListByWorkspace request. +func (client BigDataPoolsClient) ListByWorkspacePreparer(ctx context.Context, resourceGroupName string, workspaceName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "workspaceName": autorest.Encode("path", workspaceName), + } + + const APIVersion = "2019-06-01-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/bigDataPools", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListByWorkspaceSender sends the ListByWorkspace request. The method will close the +// http.Response Body if it receives an error. +func (client BigDataPoolsClient) ListByWorkspaceSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListByWorkspaceResponder handles the response to the ListByWorkspace request. The method always +// closes the http.Response Body. +func (client BigDataPoolsClient) ListByWorkspaceResponder(resp *http.Response) (result BigDataPoolResourceInfoListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listByWorkspaceNextResults retrieves the next set of results, if any. +func (client BigDataPoolsClient) listByWorkspaceNextResults(ctx context.Context, lastResults BigDataPoolResourceInfoListResult) (result BigDataPoolResourceInfoListResult, err error) { + req, err := lastResults.bigDataPoolResourceInfoListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "synapse.BigDataPoolsClient", "listByWorkspaceNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListByWorkspaceSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "synapse.BigDataPoolsClient", "listByWorkspaceNextResults", resp, "Failure sending next results request") + } + result, err = client.ListByWorkspaceResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.BigDataPoolsClient", "listByWorkspaceNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListByWorkspaceComplete enumerates all values, automatically crossing page boundaries as required. +func (client BigDataPoolsClient) ListByWorkspaceComplete(ctx context.Context, resourceGroupName string, workspaceName string) (result BigDataPoolResourceInfoListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BigDataPoolsClient.ListByWorkspace") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.ListByWorkspace(ctx, resourceGroupName, workspaceName) + return +} + +// Update patch a Big Data pool. +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// workspaceName - the name of the workspace +// bigDataPoolName - big Data pool name +// bigDataPoolPatchInfo - the updated Big Data pool properties +func (client BigDataPoolsClient) Update(ctx context.Context, resourceGroupName string, workspaceName string, bigDataPoolName string, bigDataPoolPatchInfo BigDataPoolPatchInfo) (result BigDataPoolResourceInfo, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BigDataPoolsClient.Update") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("synapse.BigDataPoolsClient", "Update", err.Error()) + } + + req, err := client.UpdatePreparer(ctx, resourceGroupName, workspaceName, bigDataPoolName, bigDataPoolPatchInfo) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.BigDataPoolsClient", "Update", nil, "Failure preparing request") + return + } + + resp, err := client.UpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "synapse.BigDataPoolsClient", "Update", resp, "Failure sending request") + return + } + + result, err = client.UpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.BigDataPoolsClient", "Update", resp, "Failure responding to request") + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client BigDataPoolsClient) UpdatePreparer(ctx context.Context, resourceGroupName string, workspaceName string, bigDataPoolName string, bigDataPoolPatchInfo BigDataPoolPatchInfo) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "bigDataPoolName": autorest.Encode("path", bigDataPoolName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "workspaceName": autorest.Encode("path", workspaceName), + } + + const APIVersion = "2019-06-01-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/bigDataPools/{bigDataPoolName}", pathParameters), + autorest.WithJSON(bigDataPoolPatchInfo), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client BigDataPoolsClient) UpdateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client BigDataPoolsClient) UpdateResponder(resp *http.Response) (result BigDataPoolResourceInfo, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/services/preview/synapse/mgmt/2019-06-01-preview/synapse/client.go b/services/preview/synapse/mgmt/2019-06-01-preview/synapse/client.go new file mode 100644 index 000000000000..a348ae174427 --- /dev/null +++ b/services/preview/synapse/mgmt/2019-06-01-preview/synapse/client.go @@ -0,0 +1,52 @@ +// Package synapse implements the Azure ARM Synapse service API version 2019-06-01-preview. +// +// Azure Synapse Analytics Management Client +package synapse + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" +) + +const ( + // DefaultBaseURI is the default URI used for the service Synapse + DefaultBaseURI = "https://management.azure.com" +) + +// BaseClient is the base client for Synapse. +type BaseClient struct { + autorest.Client + BaseURI string + SubscriptionID string +} + +// New creates an instance of the BaseClient client. +func New(subscriptionID string) BaseClient { + return NewWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewWithBaseURI creates an instance of the BaseClient client using a custom endpoint. Use this when interacting with +// an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). +func NewWithBaseURI(baseURI string, subscriptionID string) BaseClient { + return BaseClient{ + Client: autorest.NewClientWithUserAgent(UserAgent()), + BaseURI: baseURI, + SubscriptionID: subscriptionID, + } +} diff --git a/services/preview/synapse/mgmt/2019-06-01-preview/synapse/ipfirewallrules.go b/services/preview/synapse/mgmt/2019-06-01-preview/synapse/ipfirewallrules.go new file mode 100644 index 000000000000..81f5dabc3e7e --- /dev/null +++ b/services/preview/synapse/mgmt/2019-06-01-preview/synapse/ipfirewallrules.go @@ -0,0 +1,435 @@ +package synapse + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// IPFirewallRulesClient is the azure Synapse Analytics Management Client +type IPFirewallRulesClient struct { + BaseClient +} + +// NewIPFirewallRulesClient creates an instance of the IPFirewallRulesClient client. +func NewIPFirewallRulesClient(subscriptionID string) IPFirewallRulesClient { + return NewIPFirewallRulesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewIPFirewallRulesClientWithBaseURI creates an instance of the IPFirewallRulesClient client using a custom endpoint. +// Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). +func NewIPFirewallRulesClientWithBaseURI(baseURI string, subscriptionID string) IPFirewallRulesClient { + return IPFirewallRulesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates a firewall rule +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// workspaceName - the name of the workspace +// ruleName - the IP firewall rule name +// IPFirewallRuleInfo - IP firewall rule properties +func (client IPFirewallRulesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, workspaceName string, ruleName string, IPFirewallRuleInfo IPFirewallRuleInfo) (result IPFirewallRulesCreateOrUpdateFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/IPFirewallRulesClient.CreateOrUpdate") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("synapse.IPFirewallRulesClient", "CreateOrUpdate", err.Error()) + } + + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, workspaceName, ruleName, IPFirewallRuleInfo) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.IPFirewallRulesClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + result, err = client.CreateOrUpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.IPFirewallRulesClient", "CreateOrUpdate", result.Response(), "Failure sending request") + return + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client IPFirewallRulesClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, workspaceName string, ruleName string, IPFirewallRuleInfo IPFirewallRuleInfo) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "ruleName": autorest.Encode("path", ruleName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "workspaceName": autorest.Encode("path", workspaceName), + } + + const APIVersion = "2019-06-01-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/firewallRules/{ruleName}", pathParameters), + autorest.WithJSON(IPFirewallRuleInfo), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client IPFirewallRulesClient) CreateOrUpdateSender(req *http.Request) (future IPFirewallRulesCreateOrUpdateFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client IPFirewallRulesClient) CreateOrUpdateResponder(resp *http.Response) (result IPFirewallRuleInfo, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes a firewall rule +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// workspaceName - the name of the workspace +// ruleName - the IP firewall rule name +func (client IPFirewallRulesClient) Delete(ctx context.Context, resourceGroupName string, workspaceName string, ruleName string) (result IPFirewallRulesDeleteFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/IPFirewallRulesClient.Delete") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("synapse.IPFirewallRulesClient", "Delete", err.Error()) + } + + req, err := client.DeletePreparer(ctx, resourceGroupName, workspaceName, ruleName) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.IPFirewallRulesClient", "Delete", nil, "Failure preparing request") + return + } + + result, err = client.DeleteSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.IPFirewallRulesClient", "Delete", result.Response(), "Failure sending request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client IPFirewallRulesClient) DeletePreparer(ctx context.Context, resourceGroupName string, workspaceName string, ruleName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "ruleName": autorest.Encode("path", ruleName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "workspaceName": autorest.Encode("path", workspaceName), + } + + const APIVersion = "2019-06-01-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/firewallRules/{ruleName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client IPFirewallRulesClient) DeleteSender(req *http.Request) (future IPFirewallRulesDeleteFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client IPFirewallRulesClient) DeleteResponder(resp *http.Response) (result SetObject, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListByWorkspace returns a list of firewall rules +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// workspaceName - the name of the workspace +func (client IPFirewallRulesClient) ListByWorkspace(ctx context.Context, resourceGroupName string, workspaceName string) (result IPFirewallRuleInfoListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/IPFirewallRulesClient.ListByWorkspace") + defer func() { + sc := -1 + if result.ifrilr.Response.Response != nil { + sc = result.ifrilr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("synapse.IPFirewallRulesClient", "ListByWorkspace", err.Error()) + } + + result.fn = client.listByWorkspaceNextResults + req, err := client.ListByWorkspacePreparer(ctx, resourceGroupName, workspaceName) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.IPFirewallRulesClient", "ListByWorkspace", nil, "Failure preparing request") + return + } + + resp, err := client.ListByWorkspaceSender(req) + if err != nil { + result.ifrilr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "synapse.IPFirewallRulesClient", "ListByWorkspace", resp, "Failure sending request") + return + } + + result.ifrilr, err = client.ListByWorkspaceResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.IPFirewallRulesClient", "ListByWorkspace", resp, "Failure responding to request") + } + + return +} + +// ListByWorkspacePreparer prepares the ListByWorkspace request. +func (client IPFirewallRulesClient) ListByWorkspacePreparer(ctx context.Context, resourceGroupName string, workspaceName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "workspaceName": autorest.Encode("path", workspaceName), + } + + const APIVersion = "2019-06-01-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/firewallRules", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListByWorkspaceSender sends the ListByWorkspace request. The method will close the +// http.Response Body if it receives an error. +func (client IPFirewallRulesClient) ListByWorkspaceSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListByWorkspaceResponder handles the response to the ListByWorkspace request. The method always +// closes the http.Response Body. +func (client IPFirewallRulesClient) ListByWorkspaceResponder(resp *http.Response) (result IPFirewallRuleInfoListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listByWorkspaceNextResults retrieves the next set of results, if any. +func (client IPFirewallRulesClient) listByWorkspaceNextResults(ctx context.Context, lastResults IPFirewallRuleInfoListResult) (result IPFirewallRuleInfoListResult, err error) { + req, err := lastResults.iPFirewallRuleInfoListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "synapse.IPFirewallRulesClient", "listByWorkspaceNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListByWorkspaceSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "synapse.IPFirewallRulesClient", "listByWorkspaceNextResults", resp, "Failure sending next results request") + } + result, err = client.ListByWorkspaceResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.IPFirewallRulesClient", "listByWorkspaceNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListByWorkspaceComplete enumerates all values, automatically crossing page boundaries as required. +func (client IPFirewallRulesClient) ListByWorkspaceComplete(ctx context.Context, resourceGroupName string, workspaceName string) (result IPFirewallRuleInfoListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/IPFirewallRulesClient.ListByWorkspace") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.ListByWorkspace(ctx, resourceGroupName, workspaceName) + return +} + +// ReplaceAll replaces firewall rules +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// workspaceName - the name of the workspace +// request - replace all IP firewall rules request +func (client IPFirewallRulesClient) ReplaceAll(ctx context.Context, resourceGroupName string, workspaceName string, request ReplaceAllIPFirewallRulesRequest) (result IPFirewallRulesReplaceAllFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/IPFirewallRulesClient.ReplaceAll") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("synapse.IPFirewallRulesClient", "ReplaceAll", err.Error()) + } + + req, err := client.ReplaceAllPreparer(ctx, resourceGroupName, workspaceName, request) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.IPFirewallRulesClient", "ReplaceAll", nil, "Failure preparing request") + return + } + + result, err = client.ReplaceAllSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.IPFirewallRulesClient", "ReplaceAll", result.Response(), "Failure sending request") + return + } + + return +} + +// ReplaceAllPreparer prepares the ReplaceAll request. +func (client IPFirewallRulesClient) ReplaceAllPreparer(ctx context.Context, resourceGroupName string, workspaceName string, request ReplaceAllIPFirewallRulesRequest) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "workspaceName": autorest.Encode("path", workspaceName), + } + + const APIVersion = "2019-06-01-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/replaceAllIpFirewallRules", pathParameters), + autorest.WithJSON(request), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ReplaceAllSender sends the ReplaceAll request. The method will close the +// http.Response Body if it receives an error. +func (client IPFirewallRulesClient) ReplaceAllSender(req *http.Request) (future IPFirewallRulesReplaceAllFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// ReplaceAllResponder handles the response to the ReplaceAll request. The method always +// closes the http.Response Body. +func (client IPFirewallRulesClient) ReplaceAllResponder(resp *http.Response) (result ReplaceAllFirewallRulesOperationResponse, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/services/preview/synapse/mgmt/2019-06-01-preview/synapse/models.go b/services/preview/synapse/mgmt/2019-06-01-preview/synapse/models.go new file mode 100644 index 000000000000..36967508994b --- /dev/null +++ b/services/preview/synapse/mgmt/2019-06-01-preview/synapse/models.go @@ -0,0 +1,5920 @@ +package synapse + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "encoding/json" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/date" + "github.com/Azure/go-autorest/autorest/to" + "github.com/Azure/go-autorest/tracing" + "github.com/satori/go.uuid" + "net/http" +) + +// The package's fully qualified name. +const fqdn = "github.com/Azure/azure-sdk-for-go/services/preview/synapse/mgmt/2019-06-01-preview/synapse" + +// ActualState enumerates the values for actual state. +type ActualState string + +const ( + // Disabled ... + Disabled ActualState = "Disabled" + // Disabling ... + Disabling ActualState = "Disabling" + // Enabled ... + Enabled ActualState = "Enabled" + // Enabling ... + Enabling ActualState = "Enabling" + // Unknown ... + Unknown ActualState = "Unknown" +) + +// PossibleActualStateValues returns an array of possible values for the ActualState const type. +func PossibleActualStateValues() []ActualState { + return []ActualState{Disabled, Disabling, Enabled, Enabling, Unknown} +} + +// BlobAuditingPolicyState enumerates the values for blob auditing policy state. +type BlobAuditingPolicyState string + +const ( + // BlobAuditingPolicyStateDisabled ... + BlobAuditingPolicyStateDisabled BlobAuditingPolicyState = "Disabled" + // BlobAuditingPolicyStateEnabled ... + BlobAuditingPolicyStateEnabled BlobAuditingPolicyState = "Enabled" +) + +// PossibleBlobAuditingPolicyStateValues returns an array of possible values for the BlobAuditingPolicyState const type. +func PossibleBlobAuditingPolicyStateValues() []BlobAuditingPolicyState { + return []BlobAuditingPolicyState{BlobAuditingPolicyStateDisabled, BlobAuditingPolicyStateEnabled} +} + +// ColumnDataType enumerates the values for column data type. +type ColumnDataType string + +const ( + // Bigint ... + Bigint ColumnDataType = "bigint" + // Binary ... + Binary ColumnDataType = "binary" + // Bit ... + Bit ColumnDataType = "bit" + // Char ... + Char ColumnDataType = "char" + // Date ... + Date ColumnDataType = "date" + // Datetime ... + Datetime ColumnDataType = "datetime" + // Datetime2 ... + Datetime2 ColumnDataType = "datetime2" + // Datetimeoffset ... + Datetimeoffset ColumnDataType = "datetimeoffset" + // Decimal ... + Decimal ColumnDataType = "decimal" + // Float ... + Float ColumnDataType = "float" + // Geography ... + Geography ColumnDataType = "geography" + // Geometry ... + Geometry ColumnDataType = "geometry" + // Hierarchyid ... + Hierarchyid ColumnDataType = "hierarchyid" + // Image ... + Image ColumnDataType = "image" + // Int ... + Int ColumnDataType = "int" + // Money ... + Money ColumnDataType = "money" + // Nchar ... + Nchar ColumnDataType = "nchar" + // Ntext ... + Ntext ColumnDataType = "ntext" + // Numeric ... + Numeric ColumnDataType = "numeric" + // Nvarchar ... + Nvarchar ColumnDataType = "nvarchar" + // Real ... + Real ColumnDataType = "real" + // Smalldatetime ... + Smalldatetime ColumnDataType = "smalldatetime" + // Smallint ... + Smallint ColumnDataType = "smallint" + // Smallmoney ... + Smallmoney ColumnDataType = "smallmoney" + // SQLVariant ... + SQLVariant ColumnDataType = "sql_variant" + // Sysname ... + Sysname ColumnDataType = "sysname" + // Text ... + Text ColumnDataType = "text" + // Time ... + Time ColumnDataType = "time" + // Timestamp ... + Timestamp ColumnDataType = "timestamp" + // Tinyint ... + Tinyint ColumnDataType = "tinyint" + // Uniqueidentifier ... + Uniqueidentifier ColumnDataType = "uniqueidentifier" + // Varbinary ... + Varbinary ColumnDataType = "varbinary" + // Varchar ... + Varchar ColumnDataType = "varchar" + // XML ... + XML ColumnDataType = "xml" +) + +// PossibleColumnDataTypeValues returns an array of possible values for the ColumnDataType const type. +func PossibleColumnDataTypeValues() []ColumnDataType { + return []ColumnDataType{Bigint, Binary, Bit, Char, Date, Datetime, Datetime2, Datetimeoffset, Decimal, Float, Geography, Geometry, Hierarchyid, Image, Int, Money, Nchar, Ntext, Numeric, Nvarchar, Real, Smalldatetime, Smallint, Smallmoney, SQLVariant, Sysname, Text, Time, Timestamp, Tinyint, Uniqueidentifier, Varbinary, Varchar, XML} +} + +// DesiredState enumerates the values for desired state. +type DesiredState string + +const ( + // DesiredStateDisabled ... + DesiredStateDisabled DesiredState = "Disabled" + // DesiredStateEnabled ... + DesiredStateEnabled DesiredState = "Enabled" +) + +// PossibleDesiredStateValues returns an array of possible values for the DesiredState const type. +func PossibleDesiredStateValues() []DesiredState { + return []DesiredState{DesiredStateDisabled, DesiredStateEnabled} +} + +// GeoBackupPolicyState enumerates the values for geo backup policy state. +type GeoBackupPolicyState string + +const ( + // GeoBackupPolicyStateDisabled ... + GeoBackupPolicyStateDisabled GeoBackupPolicyState = "Disabled" + // GeoBackupPolicyStateEnabled ... + GeoBackupPolicyStateEnabled GeoBackupPolicyState = "Enabled" +) + +// PossibleGeoBackupPolicyStateValues returns an array of possible values for the GeoBackupPolicyState const type. +func PossibleGeoBackupPolicyStateValues() []GeoBackupPolicyState { + return []GeoBackupPolicyState{GeoBackupPolicyStateDisabled, GeoBackupPolicyStateEnabled} +} + +// ManagementOperationState enumerates the values for management operation state. +type ManagementOperationState string + +const ( + // CancelInProgress ... + CancelInProgress ManagementOperationState = "CancelInProgress" + // Cancelled ... + Cancelled ManagementOperationState = "Cancelled" + // Failed ... + Failed ManagementOperationState = "Failed" + // InProgress ... + InProgress ManagementOperationState = "InProgress" + // Pending ... + Pending ManagementOperationState = "Pending" + // Succeeded ... + Succeeded ManagementOperationState = "Succeeded" +) + +// PossibleManagementOperationStateValues returns an array of possible values for the ManagementOperationState const type. +func PossibleManagementOperationStateValues() []ManagementOperationState { + return []ManagementOperationState{CancelInProgress, Cancelled, Failed, InProgress, Pending, Succeeded} +} + +// NodeSize enumerates the values for node size. +type NodeSize string + +const ( + // Large ... + Large NodeSize = "Large" + // Medium ... + Medium NodeSize = "Medium" + // None ... + None NodeSize = "None" + // Small ... + Small NodeSize = "Small" +) + +// PossibleNodeSizeValues returns an array of possible values for the NodeSize const type. +func PossibleNodeSizeValues() []NodeSize { + return []NodeSize{Large, Medium, None, Small} +} + +// NodeSizeFamily enumerates the values for node size family. +type NodeSizeFamily string + +const ( + // NodeSizeFamilyMemoryOptimized ... + NodeSizeFamilyMemoryOptimized NodeSizeFamily = "MemoryOptimized" + // NodeSizeFamilyNone ... + NodeSizeFamilyNone NodeSizeFamily = "None" +) + +// PossibleNodeSizeFamilyValues returns an array of possible values for the NodeSizeFamily const type. +func PossibleNodeSizeFamilyValues() []NodeSizeFamily { + return []NodeSizeFamily{NodeSizeFamilyMemoryOptimized, NodeSizeFamilyNone} +} + +// OperationStatus enumerates the values for operation status. +type OperationStatus string + +const ( + // OperationStatusCanceled ... + OperationStatusCanceled OperationStatus = "Canceled" + // OperationStatusFailed ... + OperationStatusFailed OperationStatus = "Failed" + // OperationStatusInProgress ... + OperationStatusInProgress OperationStatus = "InProgress" + // OperationStatusSucceeded ... + OperationStatusSucceeded OperationStatus = "Succeeded" +) + +// PossibleOperationStatusValues returns an array of possible values for the OperationStatus const type. +func PossibleOperationStatusValues() []OperationStatus { + return []OperationStatus{OperationStatusCanceled, OperationStatusFailed, OperationStatusInProgress, OperationStatusSucceeded} +} + +// ProvisioningState enumerates the values for provisioning state. +type ProvisioningState string + +const ( + // ProvisioningStateDeleteError ... + ProvisioningStateDeleteError ProvisioningState = "DeleteError" + // ProvisioningStateDeleting ... + ProvisioningStateDeleting ProvisioningState = "Deleting" + // ProvisioningStateFailed ... + ProvisioningStateFailed ProvisioningState = "Failed" + // ProvisioningStateProvisioning ... + ProvisioningStateProvisioning ProvisioningState = "Provisioning" + // ProvisioningStateSucceeded ... + ProvisioningStateSucceeded ProvisioningState = "Succeeded" +) + +// PossibleProvisioningStateValues returns an array of possible values for the ProvisioningState const type. +func PossibleProvisioningStateValues() []ProvisioningState { + return []ProvisioningState{ProvisioningStateDeleteError, ProvisioningStateDeleting, ProvisioningStateFailed, ProvisioningStateProvisioning, ProvisioningStateSucceeded} +} + +// QueryAggregationFunction enumerates the values for query aggregation function. +type QueryAggregationFunction string + +const ( + // Avg ... + Avg QueryAggregationFunction = "avg" + // Max ... + Max QueryAggregationFunction = "max" + // Min ... + Min QueryAggregationFunction = "min" + // Sum ... + Sum QueryAggregationFunction = "sum" +) + +// PossibleQueryAggregationFunctionValues returns an array of possible values for the QueryAggregationFunction const type. +func PossibleQueryAggregationFunctionValues() []QueryAggregationFunction { + return []QueryAggregationFunction{Avg, Max, Min, Sum} +} + +// QueryExecutionType enumerates the values for query execution type. +type QueryExecutionType string + +const ( + // Aborted ... + Aborted QueryExecutionType = "aborted" + // Any ... + Any QueryExecutionType = "any" + // Exception ... + Exception QueryExecutionType = "exception" + // Irregular ... + Irregular QueryExecutionType = "irregular" + // Regular ... + Regular QueryExecutionType = "regular" +) + +// PossibleQueryExecutionTypeValues returns an array of possible values for the QueryExecutionType const type. +func PossibleQueryExecutionTypeValues() []QueryExecutionType { + return []QueryExecutionType{Aborted, Any, Exception, Irregular, Regular} +} + +// QueryMetricUnit enumerates the values for query metric unit. +type QueryMetricUnit string + +const ( + // KB ... + KB QueryMetricUnit = "KB" + // Microseconds ... + Microseconds QueryMetricUnit = "microseconds" + // Percentage ... + Percentage QueryMetricUnit = "percentage" +) + +// PossibleQueryMetricUnitValues returns an array of possible values for the QueryMetricUnit const type. +func PossibleQueryMetricUnitValues() []QueryMetricUnit { + return []QueryMetricUnit{KB, Microseconds, Percentage} +} + +// QueryObservedMetricType enumerates the values for query observed metric type. +type QueryObservedMetricType string + +const ( + // CPU ... + CPU QueryObservedMetricType = "cpu" + // Duration ... + Duration QueryObservedMetricType = "duration" + // ExecutionCount ... + ExecutionCount QueryObservedMetricType = "executionCount" + // Io ... + Io QueryObservedMetricType = "io" + // Logio ... + Logio QueryObservedMetricType = "logio" +) + +// PossibleQueryObservedMetricTypeValues returns an array of possible values for the QueryObservedMetricType const type. +func PossibleQueryObservedMetricTypeValues() []QueryObservedMetricType { + return []QueryObservedMetricType{CPU, Duration, ExecutionCount, Io, Logio} +} + +// ReplicationRole enumerates the values for replication role. +type ReplicationRole string + +const ( + // Copy ... + Copy ReplicationRole = "Copy" + // NonReadableSecondary ... + NonReadableSecondary ReplicationRole = "NonReadableSecondary" + // Primary ... + Primary ReplicationRole = "Primary" + // Secondary ... + Secondary ReplicationRole = "Secondary" + // Source ... + Source ReplicationRole = "Source" +) + +// PossibleReplicationRoleValues returns an array of possible values for the ReplicationRole const type. +func PossibleReplicationRoleValues() []ReplicationRole { + return []ReplicationRole{Copy, NonReadableSecondary, Primary, Secondary, Source} +} + +// ReplicationState enumerates the values for replication state. +type ReplicationState string + +const ( + // CATCHUP ... + CATCHUP ReplicationState = "CATCH_UP" + // PENDING ... + PENDING ReplicationState = "PENDING" + // SEEDING ... + SEEDING ReplicationState = "SEEDING" + // SUSPENDED ... + SUSPENDED ReplicationState = "SUSPENDED" +) + +// PossibleReplicationStateValues returns an array of possible values for the ReplicationState const type. +func PossibleReplicationStateValues() []ReplicationState { + return []ReplicationState{CATCHUP, PENDING, SEEDING, SUSPENDED} +} + +// ResourceIdentityType enumerates the values for resource identity type. +type ResourceIdentityType string + +const ( + // ResourceIdentityTypeNone ... + ResourceIdentityTypeNone ResourceIdentityType = "None" + // ResourceIdentityTypeSystemAssigned ... + ResourceIdentityTypeSystemAssigned ResourceIdentityType = "SystemAssigned" +) + +// PossibleResourceIdentityTypeValues returns an array of possible values for the ResourceIdentityType const type. +func PossibleResourceIdentityTypeValues() []ResourceIdentityType { + return []ResourceIdentityType{ResourceIdentityTypeNone, ResourceIdentityTypeSystemAssigned} +} + +// RestorePointType enumerates the values for restore point type. +type RestorePointType string + +const ( + // CONTINUOUS ... + CONTINUOUS RestorePointType = "CONTINUOUS" + // DISCRETE ... + DISCRETE RestorePointType = "DISCRETE" +) + +// PossibleRestorePointTypeValues returns an array of possible values for the RestorePointType const type. +func PossibleRestorePointTypeValues() []RestorePointType { + return []RestorePointType{CONTINUOUS, DISCRETE} +} + +// SecurityAlertPolicyState enumerates the values for security alert policy state. +type SecurityAlertPolicyState string + +const ( + // SecurityAlertPolicyStateDisabled ... + SecurityAlertPolicyStateDisabled SecurityAlertPolicyState = "Disabled" + // SecurityAlertPolicyStateEnabled ... + SecurityAlertPolicyStateEnabled SecurityAlertPolicyState = "Enabled" + // SecurityAlertPolicyStateNew ... + SecurityAlertPolicyStateNew SecurityAlertPolicyState = "New" +) + +// PossibleSecurityAlertPolicyStateValues returns an array of possible values for the SecurityAlertPolicyState const type. +func PossibleSecurityAlertPolicyStateValues() []SecurityAlertPolicyState { + return []SecurityAlertPolicyState{SecurityAlertPolicyStateDisabled, SecurityAlertPolicyStateEnabled, SecurityAlertPolicyStateNew} +} + +// TransparentDataEncryptionStatus enumerates the values for transparent data encryption status. +type TransparentDataEncryptionStatus string + +const ( + // TransparentDataEncryptionStatusDisabled ... + TransparentDataEncryptionStatusDisabled TransparentDataEncryptionStatus = "Disabled" + // TransparentDataEncryptionStatusEnabled ... + TransparentDataEncryptionStatusEnabled TransparentDataEncryptionStatus = "Enabled" +) + +// PossibleTransparentDataEncryptionStatusValues returns an array of possible values for the TransparentDataEncryptionStatus const type. +func PossibleTransparentDataEncryptionStatusValues() []TransparentDataEncryptionStatus { + return []TransparentDataEncryptionStatus{TransparentDataEncryptionStatusDisabled, TransparentDataEncryptionStatusEnabled} +} + +// VulnerabilityAssessmentPolicyBaselineName enumerates the values for vulnerability assessment policy baseline +// name. +type VulnerabilityAssessmentPolicyBaselineName string + +const ( + // Default ... + Default VulnerabilityAssessmentPolicyBaselineName = "default" + // Master ... + Master VulnerabilityAssessmentPolicyBaselineName = "master" +) + +// PossibleVulnerabilityAssessmentPolicyBaselineNameValues returns an array of possible values for the VulnerabilityAssessmentPolicyBaselineName const type. +func PossibleVulnerabilityAssessmentPolicyBaselineNameValues() []VulnerabilityAssessmentPolicyBaselineName { + return []VulnerabilityAssessmentPolicyBaselineName{Default, Master} +} + +// VulnerabilityAssessmentScanState enumerates the values for vulnerability assessment scan state. +type VulnerabilityAssessmentScanState string + +const ( + // VulnerabilityAssessmentScanStateFailed ... + VulnerabilityAssessmentScanStateFailed VulnerabilityAssessmentScanState = "Failed" + // VulnerabilityAssessmentScanStateFailedToRun ... + VulnerabilityAssessmentScanStateFailedToRun VulnerabilityAssessmentScanState = "FailedToRun" + // VulnerabilityAssessmentScanStateInProgress ... + VulnerabilityAssessmentScanStateInProgress VulnerabilityAssessmentScanState = "InProgress" + // VulnerabilityAssessmentScanStatePassed ... + VulnerabilityAssessmentScanStatePassed VulnerabilityAssessmentScanState = "Passed" +) + +// PossibleVulnerabilityAssessmentScanStateValues returns an array of possible values for the VulnerabilityAssessmentScanState const type. +func PossibleVulnerabilityAssessmentScanStateValues() []VulnerabilityAssessmentScanState { + return []VulnerabilityAssessmentScanState{VulnerabilityAssessmentScanStateFailed, VulnerabilityAssessmentScanStateFailedToRun, VulnerabilityAssessmentScanStateInProgress, VulnerabilityAssessmentScanStatePassed} +} + +// VulnerabilityAssessmentScanTriggerType enumerates the values for vulnerability assessment scan trigger type. +type VulnerabilityAssessmentScanTriggerType string + +const ( + // OnDemand ... + OnDemand VulnerabilityAssessmentScanTriggerType = "OnDemand" + // Recurring ... + Recurring VulnerabilityAssessmentScanTriggerType = "Recurring" +) + +// PossibleVulnerabilityAssessmentScanTriggerTypeValues returns an array of possible values for the VulnerabilityAssessmentScanTriggerType const type. +func PossibleVulnerabilityAssessmentScanTriggerTypeValues() []VulnerabilityAssessmentScanTriggerType { + return []VulnerabilityAssessmentScanTriggerType{OnDemand, Recurring} +} + +// AadAdminProperties workspace active directory administrator properties +type AadAdminProperties struct { + // TenantID - Tenant ID of the workspace active directory administrator + TenantID *string `json:"tenantId,omitempty"` + // Login - Login of the workspace active directory administrator + Login *string `json:"login,omitempty"` + // AdministratorType - Workspace active directory administrator type + AdministratorType *string `json:"administratorType,omitempty"` + // Sid - Object ID of the workspace active directory administrator + Sid *string `json:"sid,omitempty"` +} + +// AutoPauseProperties auto-pausing properties of a Big Data pool powered by Apache Spark +type AutoPauseProperties struct { + // DelayInMinutes - Number of minutes of idle time before the Big Data pool is automatically paused. + DelayInMinutes *int32 `json:"delayInMinutes,omitempty"` + // Enabled - Whether auto-pausing is enabled for the Big Data pool. + Enabled *bool `json:"enabled,omitempty"` +} + +// AutoScaleProperties auto-scaling properties of a Big Data pool powered by Apache Spark +type AutoScaleProperties struct { + // MinNodeCount - The minimum number of nodes the Big Data pool can support. + MinNodeCount *int32 `json:"minNodeCount,omitempty"` + // Enabled - Whether automatic scaling is enabled for the Big Data pool. + Enabled *bool `json:"enabled,omitempty"` + // MaxNodeCount - The maximum number of nodes the Big Data pool can support. + MaxNodeCount *int32 `json:"maxNodeCount,omitempty"` +} + +// AvailableRpOperation an operation that is available in this resource provider +type AvailableRpOperation struct { + // Display - Display properties of the operation + Display *AvailableRpOperationDisplayInfo `json:"display,omitempty"` + // IsDataAction - Whether this operation is a data action + IsDataAction *string `json:"isDataAction,omitempty"` + // Name - Operation name + Name *string `json:"name,omitempty"` + // OperationMetaPropertyInfo - Operation properties + *OperationMetaPropertyInfo `json:"properties,omitempty"` + // Origin - Operation origin + Origin *string `json:"origin,omitempty"` +} + +// MarshalJSON is the custom marshaler for AvailableRpOperation. +func (aro AvailableRpOperation) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if aro.Display != nil { + objectMap["display"] = aro.Display + } + if aro.IsDataAction != nil { + objectMap["isDataAction"] = aro.IsDataAction + } + if aro.Name != nil { + objectMap["name"] = aro.Name + } + if aro.OperationMetaPropertyInfo != nil { + objectMap["properties"] = aro.OperationMetaPropertyInfo + } + if aro.Origin != nil { + objectMap["origin"] = aro.Origin + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for AvailableRpOperation struct. +func (aro *AvailableRpOperation) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "display": + if v != nil { + var display AvailableRpOperationDisplayInfo + err = json.Unmarshal(*v, &display) + if err != nil { + return err + } + aro.Display = &display + } + case "isDataAction": + if v != nil { + var isDataAction string + err = json.Unmarshal(*v, &isDataAction) + if err != nil { + return err + } + aro.IsDataAction = &isDataAction + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + aro.Name = &name + } + case "properties": + if v != nil { + var operationMetaPropertyInfo OperationMetaPropertyInfo + err = json.Unmarshal(*v, &operationMetaPropertyInfo) + if err != nil { + return err + } + aro.OperationMetaPropertyInfo = &operationMetaPropertyInfo + } + case "origin": + if v != nil { + var origin string + err = json.Unmarshal(*v, &origin) + if err != nil { + return err + } + aro.Origin = &origin + } + } + } + + return nil +} + +// AvailableRpOperationDisplayInfo description of an available operation +type AvailableRpOperationDisplayInfo struct { + // Description - Operation description + Description *string `json:"description,omitempty"` + // Resource - Resource type + Resource *string `json:"resource,omitempty"` + // Provider - Resource provider name + Provider *string `json:"provider,omitempty"` + // Operation - Operation name + Operation *string `json:"operation,omitempty"` +} + +// AzureEntityResource the resource model definition for a Azure Resource Manager resource with an etag. +type AzureEntityResource struct { + // Etag - READ-ONLY; Resource Etag. + Etag *string `json:"etag,omitempty"` + // ID - READ-ONLY; Fully qualified resource Id for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; The name of the resource + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts. + Type *string `json:"type,omitempty"` +} + +// BigDataPoolPatchInfo properties patch for a Big Data pool +type BigDataPoolPatchInfo struct { + // Tags - Updated tags for the Big Data pool + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for BigDataPoolPatchInfo. +func (bdppi BigDataPoolPatchInfo) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if bdppi.Tags != nil { + objectMap["tags"] = bdppi.Tags + } + return json.Marshal(objectMap) +} + +// BigDataPoolResourceInfo a Big Data pool +type BigDataPoolResourceInfo struct { + autorest.Response `json:"-"` + // BigDataPoolResourceProperties - Big Data pool properties + *BigDataPoolResourceProperties `json:"properties,omitempty"` + // Tags - Resource tags. + Tags map[string]*string `json:"tags"` + // Location - The geo-location where the resource lives + Location *string `json:"location,omitempty"` + // ID - READ-ONLY; Fully qualified resource Id for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; The name of the resource + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts. + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for BigDataPoolResourceInfo. +func (bdpri BigDataPoolResourceInfo) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if bdpri.BigDataPoolResourceProperties != nil { + objectMap["properties"] = bdpri.BigDataPoolResourceProperties + } + if bdpri.Tags != nil { + objectMap["tags"] = bdpri.Tags + } + if bdpri.Location != nil { + objectMap["location"] = bdpri.Location + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for BigDataPoolResourceInfo struct. +func (bdpri *BigDataPoolResourceInfo) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var bigDataPoolResourceProperties BigDataPoolResourceProperties + err = json.Unmarshal(*v, &bigDataPoolResourceProperties) + if err != nil { + return err + } + bdpri.BigDataPoolResourceProperties = &bigDataPoolResourceProperties + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + bdpri.Tags = tags + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + bdpri.Location = &location + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + bdpri.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + bdpri.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + bdpri.Type = &typeVar + } + } + } + + return nil +} + +// BigDataPoolResourceInfoListResult collection of Big Data pool information +type BigDataPoolResourceInfoListResult struct { + autorest.Response `json:"-"` + // NextLink - Link to the next page of results + NextLink *string `json:"nextLink,omitempty"` + // Value - List of Big Data pools + Value *[]BigDataPoolResourceInfo `json:"value,omitempty"` +} + +// BigDataPoolResourceInfoListResultIterator provides access to a complete listing of +// BigDataPoolResourceInfo values. +type BigDataPoolResourceInfoListResultIterator struct { + i int + page BigDataPoolResourceInfoListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *BigDataPoolResourceInfoListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BigDataPoolResourceInfoListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *BigDataPoolResourceInfoListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter BigDataPoolResourceInfoListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter BigDataPoolResourceInfoListResultIterator) Response() BigDataPoolResourceInfoListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter BigDataPoolResourceInfoListResultIterator) Value() BigDataPoolResourceInfo { + if !iter.page.NotDone() { + return BigDataPoolResourceInfo{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the BigDataPoolResourceInfoListResultIterator type. +func NewBigDataPoolResourceInfoListResultIterator(page BigDataPoolResourceInfoListResultPage) BigDataPoolResourceInfoListResultIterator { + return BigDataPoolResourceInfoListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (bdprilr BigDataPoolResourceInfoListResult) IsEmpty() bool { + return bdprilr.Value == nil || len(*bdprilr.Value) == 0 +} + +// bigDataPoolResourceInfoListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (bdprilr BigDataPoolResourceInfoListResult) bigDataPoolResourceInfoListResultPreparer(ctx context.Context) (*http.Request, error) { + if bdprilr.NextLink == nil || len(to.String(bdprilr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(bdprilr.NextLink))) +} + +// BigDataPoolResourceInfoListResultPage contains a page of BigDataPoolResourceInfo values. +type BigDataPoolResourceInfoListResultPage struct { + fn func(context.Context, BigDataPoolResourceInfoListResult) (BigDataPoolResourceInfoListResult, error) + bdprilr BigDataPoolResourceInfoListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *BigDataPoolResourceInfoListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BigDataPoolResourceInfoListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.bdprilr) + if err != nil { + return err + } + page.bdprilr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *BigDataPoolResourceInfoListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page BigDataPoolResourceInfoListResultPage) NotDone() bool { + return !page.bdprilr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page BigDataPoolResourceInfoListResultPage) Response() BigDataPoolResourceInfoListResult { + return page.bdprilr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page BigDataPoolResourceInfoListResultPage) Values() []BigDataPoolResourceInfo { + if page.bdprilr.IsEmpty() { + return nil + } + return *page.bdprilr.Value +} + +// Creates a new instance of the BigDataPoolResourceInfoListResultPage type. +func NewBigDataPoolResourceInfoListResultPage(getNextPage func(context.Context, BigDataPoolResourceInfoListResult) (BigDataPoolResourceInfoListResult, error)) BigDataPoolResourceInfoListResultPage { + return BigDataPoolResourceInfoListResultPage{fn: getNextPage} +} + +// BigDataPoolResourceProperties properties of a Big Data pool powered by Apache Spark +type BigDataPoolResourceProperties struct { + // ProvisioningState - The state of the Big Data pool. + ProvisioningState *string `json:"provisioningState,omitempty"` + // AutoScale - Auto-scaling properties + AutoScale *AutoScaleProperties `json:"autoScale,omitempty"` + // CreationDate - The time when the Big Data pool was created. + CreationDate *date.Time `json:"creationDate,omitempty"` + // AutoPause - Auto-pausing properties + AutoPause *AutoPauseProperties `json:"autoPause,omitempty"` + // SparkEventsFolder - The Spark events folder + SparkEventsFolder *string `json:"sparkEventsFolder,omitempty"` + // NodeCount - The number of nodes in the Big Data pool. + NodeCount *int32 `json:"nodeCount,omitempty"` + // LibraryRequirements - Library version requirements + LibraryRequirements *LibraryRequirements `json:"libraryRequirements,omitempty"` + // SparkVersion - The Apache Spark version. + SparkVersion *string `json:"sparkVersion,omitempty"` + // DefaultSparkLogFolder - The default folder where Spark logs will be written. + DefaultSparkLogFolder *string `json:"defaultSparkLogFolder,omitempty"` + // NodeSize - The level of compute power that each node in the Big Data pool has. Possible values include: 'None', 'Small', 'Medium', 'Large' + NodeSize NodeSize `json:"nodeSize,omitempty"` + // NodeSizeFamily - The kind of nodes that the Big Data pool provides. Possible values include: 'NodeSizeFamilyNone', 'NodeSizeFamilyMemoryOptimized' + NodeSizeFamily NodeSizeFamily `json:"nodeSizeFamily,omitempty"` +} + +// BigDataPoolsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type BigDataPoolsCreateOrUpdateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *BigDataPoolsCreateOrUpdateFuture) Result(client BigDataPoolsClient) (bdpri BigDataPoolResourceInfo, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.BigDataPoolsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("synapse.BigDataPoolsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if bdpri.Response.Response, err = future.GetResult(sender); err == nil && bdpri.Response.Response.StatusCode != http.StatusNoContent { + bdpri, err = client.CreateOrUpdateResponder(bdpri.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.BigDataPoolsCreateOrUpdateFuture", "Result", bdpri.Response.Response, "Failure responding to request") + } + } + return +} + +// BigDataPoolsDeleteFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type BigDataPoolsDeleteFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *BigDataPoolsDeleteFuture) Result(client BigDataPoolsClient) (so SetObject, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.BigDataPoolsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("synapse.BigDataPoolsDeleteFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if so.Response.Response, err = future.GetResult(sender); err == nil && so.Response.Response.StatusCode != http.StatusNoContent { + so, err = client.DeleteResponder(so.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.BigDataPoolsDeleteFuture", "Result", so.Response.Response, "Failure responding to request") + } + } + return +} + +// CheckNameAvailabilityRequest a request about whether a workspace name is available +type CheckNameAvailabilityRequest struct { + // Name - Workspace name + Name *string `json:"name,omitempty"` + // Type - Type: workspace + Type *string `json:"type,omitempty"` +} + +// CheckNameAvailabilityResponse a response saying whether the workspace name is available +type CheckNameAvailabilityResponse struct { + autorest.Response `json:"-"` + // Message - Validation message + Message *string `json:"message,omitempty"` + // Available - Whether the workspace name is available + Available *bool `json:"available,omitempty"` + // Reason - Reason the workspace name is or is not available + Reason *string `json:"reason,omitempty"` + // Name - Workspace name + Name *string `json:"name,omitempty"` +} + +// CreateSQLPoolRestorePointDefinition contains the information necessary to perform a create Sql pool +// restore point operation. +type CreateSQLPoolRestorePointDefinition struct { + // RestorePointLabel - The restore point label to apply + RestorePointLabel *string `json:"restorePointLabel,omitempty"` +} + +// DataLakeStorageAccountDetails details of the data lake storage account associated with the workspace +type DataLakeStorageAccountDetails struct { + // AccountURL - Account URL + AccountURL *string `json:"accountUrl,omitempty"` + // Filesystem - Filesystem name + Filesystem *string `json:"filesystem,omitempty"` +} + +// DataWarehouseUserActivities user activities of a data warehouse +type DataWarehouseUserActivities struct { + autorest.Response `json:"-"` + // DataWarehouseUserActivitiesProperties - Resource properties. + *DataWarehouseUserActivitiesProperties `json:"properties,omitempty"` + // ID - READ-ONLY; Fully qualified resource Id for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; The name of the resource + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts. + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for DataWarehouseUserActivities. +func (dwua DataWarehouseUserActivities) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if dwua.DataWarehouseUserActivitiesProperties != nil { + objectMap["properties"] = dwua.DataWarehouseUserActivitiesProperties + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for DataWarehouseUserActivities struct. +func (dwua *DataWarehouseUserActivities) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var dataWarehouseUserActivitiesProperties DataWarehouseUserActivitiesProperties + err = json.Unmarshal(*v, &dataWarehouseUserActivitiesProperties) + if err != nil { + return err + } + dwua.DataWarehouseUserActivitiesProperties = &dataWarehouseUserActivitiesProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + dwua.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + dwua.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + dwua.Type = &typeVar + } + } + } + + return nil +} + +// DataWarehouseUserActivitiesProperties user activities of a data warehouse. This currently includes the +// count of running or suspended queries. For more information, please view the sys.dm_pdw_exec_requests +// dynamic management view (DMV). +type DataWarehouseUserActivitiesProperties struct { + // ActiveQueriesCount - READ-ONLY; Count of running and suspended queries. + ActiveQueriesCount *int32 `json:"activeQueriesCount,omitempty"` +} + +// ErrorAdditionalInfo the resource management error additional info. +type ErrorAdditionalInfo struct { + // Type - READ-ONLY; The additional info type. + Type *string `json:"type,omitempty"` + // Info - READ-ONLY; The additional info. + Info interface{} `json:"info,omitempty"` +} + +// ErrorContract contains details when the response code indicates an error. +type ErrorContract struct { + // Error - The error details. + Error *ErrorResponse `json:"error,omitempty"` +} + +// ErrorDetail error details +type ErrorDetail struct { + // Message - Error message + Message *string `json:"message,omitempty"` + // Code - Error code + Code *string `json:"code,omitempty"` + // Target - Error target + Target *string `json:"target,omitempty"` +} + +// ErrorResponse the resource management error response. +type ErrorResponse struct { + // Code - READ-ONLY; The error code. + Code *string `json:"code,omitempty"` + // Message - READ-ONLY; The error message. + Message *string `json:"message,omitempty"` + // Target - READ-ONLY; The error target. + Target *string `json:"target,omitempty"` + // Details - READ-ONLY; The error details. + Details *[]ErrorResponse `json:"details,omitempty"` + // AdditionalInfo - READ-ONLY; The error additional info. + AdditionalInfo *[]ErrorAdditionalInfo `json:"additionalInfo,omitempty"` +} + +// GeoBackupPolicy a database geo backup policy. +type GeoBackupPolicy struct { + autorest.Response `json:"-"` + // GeoBackupPolicyProperties - The properties of the geo backup policy. + *GeoBackupPolicyProperties `json:"properties,omitempty"` + // Kind - READ-ONLY; Kind of geo backup policy. This is metadata used for the Azure portal experience. + Kind *string `json:"kind,omitempty"` + // Location - READ-ONLY; Backup policy location. + Location *string `json:"location,omitempty"` + // ID - READ-ONLY; Fully qualified resource Id for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; The name of the resource + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts. + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for GeoBackupPolicy. +func (gbp GeoBackupPolicy) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if gbp.GeoBackupPolicyProperties != nil { + objectMap["properties"] = gbp.GeoBackupPolicyProperties + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for GeoBackupPolicy struct. +func (gbp *GeoBackupPolicy) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var geoBackupPolicyProperties GeoBackupPolicyProperties + err = json.Unmarshal(*v, &geoBackupPolicyProperties) + if err != nil { + return err + } + gbp.GeoBackupPolicyProperties = &geoBackupPolicyProperties + } + case "kind": + if v != nil { + var kind string + err = json.Unmarshal(*v, &kind) + if err != nil { + return err + } + gbp.Kind = &kind + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + gbp.Location = &location + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + gbp.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + gbp.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + gbp.Type = &typeVar + } + } + } + + return nil +} + +// GeoBackupPolicyProperties the properties of the geo backup policy. +type GeoBackupPolicyProperties struct { + // State - The state of the geo backup policy. Possible values include: 'GeoBackupPolicyStateDisabled', 'GeoBackupPolicyStateEnabled' + State GeoBackupPolicyState `json:"state,omitempty"` + // StorageType - READ-ONLY; The storage type of the geo backup policy. + StorageType *string `json:"storageType,omitempty"` +} + +// IPFirewallRuleInfo IP firewall rule +type IPFirewallRuleInfo struct { + autorest.Response `json:"-"` + // IPFirewallRuleProperties - IP firewall rule properties + *IPFirewallRuleProperties `json:"properties,omitempty"` + // ID - READ-ONLY; Fully qualified resource Id for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; The name of the resource + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts. + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for IPFirewallRuleInfo. +func (ifri IPFirewallRuleInfo) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if ifri.IPFirewallRuleProperties != nil { + objectMap["properties"] = ifri.IPFirewallRuleProperties + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for IPFirewallRuleInfo struct. +func (ifri *IPFirewallRuleInfo) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var IPFirewallRuleProperties IPFirewallRuleProperties + err = json.Unmarshal(*v, &IPFirewallRuleProperties) + if err != nil { + return err + } + ifri.IPFirewallRuleProperties = &IPFirewallRuleProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + ifri.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + ifri.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + ifri.Type = &typeVar + } + } + } + + return nil +} + +// IPFirewallRuleInfoListResult list of IP firewall rules +type IPFirewallRuleInfoListResult struct { + autorest.Response `json:"-"` + // NextLink - Link to next page of results + NextLink *string `json:"nextLink,omitempty"` + // Value - List of IP firewall rules + Value *[]IPFirewallRuleInfo `json:"value,omitempty"` +} + +// IPFirewallRuleInfoListResultIterator provides access to a complete listing of IPFirewallRuleInfo values. +type IPFirewallRuleInfoListResultIterator struct { + i int + page IPFirewallRuleInfoListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *IPFirewallRuleInfoListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/IPFirewallRuleInfoListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *IPFirewallRuleInfoListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter IPFirewallRuleInfoListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter IPFirewallRuleInfoListResultIterator) Response() IPFirewallRuleInfoListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter IPFirewallRuleInfoListResultIterator) Value() IPFirewallRuleInfo { + if !iter.page.NotDone() { + return IPFirewallRuleInfo{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the IPFirewallRuleInfoListResultIterator type. +func NewIPFirewallRuleInfoListResultIterator(page IPFirewallRuleInfoListResultPage) IPFirewallRuleInfoListResultIterator { + return IPFirewallRuleInfoListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (ifrilr IPFirewallRuleInfoListResult) IsEmpty() bool { + return ifrilr.Value == nil || len(*ifrilr.Value) == 0 +} + +// iPFirewallRuleInfoListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (ifrilr IPFirewallRuleInfoListResult) iPFirewallRuleInfoListResultPreparer(ctx context.Context) (*http.Request, error) { + if ifrilr.NextLink == nil || len(to.String(ifrilr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(ifrilr.NextLink))) +} + +// IPFirewallRuleInfoListResultPage contains a page of IPFirewallRuleInfo values. +type IPFirewallRuleInfoListResultPage struct { + fn func(context.Context, IPFirewallRuleInfoListResult) (IPFirewallRuleInfoListResult, error) + ifrilr IPFirewallRuleInfoListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *IPFirewallRuleInfoListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/IPFirewallRuleInfoListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.ifrilr) + if err != nil { + return err + } + page.ifrilr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *IPFirewallRuleInfoListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page IPFirewallRuleInfoListResultPage) NotDone() bool { + return !page.ifrilr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page IPFirewallRuleInfoListResultPage) Response() IPFirewallRuleInfoListResult { + return page.ifrilr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page IPFirewallRuleInfoListResultPage) Values() []IPFirewallRuleInfo { + if page.ifrilr.IsEmpty() { + return nil + } + return *page.ifrilr.Value +} + +// Creates a new instance of the IPFirewallRuleInfoListResultPage type. +func NewIPFirewallRuleInfoListResultPage(getNextPage func(context.Context, IPFirewallRuleInfoListResult) (IPFirewallRuleInfoListResult, error)) IPFirewallRuleInfoListResultPage { + return IPFirewallRuleInfoListResultPage{fn: getNextPage} +} + +// IPFirewallRuleProperties IP firewall rule properties +type IPFirewallRuleProperties struct { + // EndIPAddress - The end IP address of the firewall rule. Must be IPv4 format. Must be greater than or equal to startIpAddress + EndIPAddress *string `json:"endIpAddress,omitempty"` + // ProvisioningState - READ-ONLY; Resource provisioning state. Possible values include: 'ProvisioningStateProvisioning', 'ProvisioningStateSucceeded', 'ProvisioningStateDeleting', 'ProvisioningStateFailed', 'ProvisioningStateDeleteError' + ProvisioningState ProvisioningState `json:"provisioningState,omitempty"` + // StartIPAddress - The start IP address of the firewall rule. Must be IPv4 format + StartIPAddress *string `json:"startIpAddress,omitempty"` +} + +// IPFirewallRulesCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type IPFirewallRulesCreateOrUpdateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *IPFirewallRulesCreateOrUpdateFuture) Result(client IPFirewallRulesClient) (ifri IPFirewallRuleInfo, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.IPFirewallRulesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("synapse.IPFirewallRulesCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if ifri.Response.Response, err = future.GetResult(sender); err == nil && ifri.Response.Response.StatusCode != http.StatusNoContent { + ifri, err = client.CreateOrUpdateResponder(ifri.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.IPFirewallRulesCreateOrUpdateFuture", "Result", ifri.Response.Response, "Failure responding to request") + } + } + return +} + +// IPFirewallRulesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type IPFirewallRulesDeleteFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *IPFirewallRulesDeleteFuture) Result(client IPFirewallRulesClient) (so SetObject, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.IPFirewallRulesDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("synapse.IPFirewallRulesDeleteFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if so.Response.Response, err = future.GetResult(sender); err == nil && so.Response.Response.StatusCode != http.StatusNoContent { + so, err = client.DeleteResponder(so.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.IPFirewallRulesDeleteFuture", "Result", so.Response.Response, "Failure responding to request") + } + } + return +} + +// IPFirewallRulesReplaceAllFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type IPFirewallRulesReplaceAllFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *IPFirewallRulesReplaceAllFuture) Result(client IPFirewallRulesClient) (rafror ReplaceAllFirewallRulesOperationResponse, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.IPFirewallRulesReplaceAllFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("synapse.IPFirewallRulesReplaceAllFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if rafror.Response.Response, err = future.GetResult(sender); err == nil && rafror.Response.Response.StatusCode != http.StatusNoContent { + rafror, err = client.ReplaceAllResponder(rafror.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.IPFirewallRulesReplaceAllFuture", "Result", rafror.Response.Response, "Failure responding to request") + } + } + return +} + +// LibraryRequirements library requirements for a Big Data pool powered by Apache Spark +type LibraryRequirements struct { + // Time - READ-ONLY; The last update time of the library requirements file. + Time *date.Time `json:"time,omitempty"` + // Content - The library requirements. + Content *string `json:"content,omitempty"` + // Filename - The filename of the library requirements file. + Filename *string `json:"filename,omitempty"` +} + +// ListAvailableRpOperation ... +type ListAvailableRpOperation struct { + autorest.Response `json:"-"` + Value *[]AvailableRpOperation `json:"value,omitempty"` +} + +// ManagedIdentity the workspace managed identity +type ManagedIdentity struct { + // PrincipalID - READ-ONLY; The principal ID of the workspace managed identity + PrincipalID *string `json:"principalId,omitempty"` + // TenantID - READ-ONLY; The tenant ID of the workspace managed identity + TenantID *string `json:"tenantId,omitempty"` + // Type - The type of managed identity for the workspace. Possible values include: 'ResourceIdentityTypeNone', 'ResourceIdentityTypeSystemAssigned' + Type ResourceIdentityType `json:"type,omitempty"` +} + +// ManagedIdentitySQLControlSettingsModel sql Control Settings for workspace managed identity +type ManagedIdentitySQLControlSettingsModel struct { + autorest.Response `json:"-"` + // ManagedIdentitySQLControlSettingsModelProperties - Sql Control Settings for workspace managed identity + *ManagedIdentitySQLControlSettingsModelProperties `json:"properties,omitempty"` + // ID - READ-ONLY; Fully qualified resource Id for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; The name of the resource + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts. + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for ManagedIdentitySQLControlSettingsModel. +func (miscsm ManagedIdentitySQLControlSettingsModel) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if miscsm.ManagedIdentitySQLControlSettingsModelProperties != nil { + objectMap["properties"] = miscsm.ManagedIdentitySQLControlSettingsModelProperties + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for ManagedIdentitySQLControlSettingsModel struct. +func (miscsm *ManagedIdentitySQLControlSettingsModel) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var managedIdentitySQLControlSettingsModelProperties ManagedIdentitySQLControlSettingsModelProperties + err = json.Unmarshal(*v, &managedIdentitySQLControlSettingsModelProperties) + if err != nil { + return err + } + miscsm.ManagedIdentitySQLControlSettingsModelProperties = &managedIdentitySQLControlSettingsModelProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + miscsm.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + miscsm.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + miscsm.Type = &typeVar + } + } + } + + return nil +} + +// ManagedIdentitySQLControlSettingsModelProperties sql Control Settings for workspace managed identity +type ManagedIdentitySQLControlSettingsModelProperties struct { + // GrantSQLControlToManagedIdentity - Grant sql control to managed identity + GrantSQLControlToManagedIdentity *ManagedIdentitySQLControlSettingsModelPropertiesGrantSQLControlToManagedIdentity `json:"grantSqlControlToManagedIdentity,omitempty"` +} + +// ManagedIdentitySQLControlSettingsModelPropertiesGrantSQLControlToManagedIdentity grant sql control to +// managed identity +type ManagedIdentitySQLControlSettingsModelPropertiesGrantSQLControlToManagedIdentity struct { + // DesiredState - Desired state. Possible values include: 'DesiredStateEnabled', 'DesiredStateDisabled' + DesiredState DesiredState `json:"desiredState,omitempty"` + // ActualState - READ-ONLY; Actual state. Possible values include: 'Enabling', 'Enabled', 'Disabling', 'Disabled', 'Unknown' + ActualState ActualState `json:"actualState,omitempty"` +} + +// MetadataSyncConfig configuration for metadata sync +type MetadataSyncConfig struct { + autorest.Response `json:"-"` + // MetadataSyncConfigProperties - Metadata Sync Config properties + *MetadataSyncConfigProperties `json:"properties,omitempty"` + // ID - READ-ONLY; Fully qualified resource Id for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; The name of the resource + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts. + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for MetadataSyncConfig. +func (msc MetadataSyncConfig) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if msc.MetadataSyncConfigProperties != nil { + objectMap["properties"] = msc.MetadataSyncConfigProperties + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for MetadataSyncConfig struct. +func (msc *MetadataSyncConfig) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var metadataSyncConfigProperties MetadataSyncConfigProperties + err = json.Unmarshal(*v, &metadataSyncConfigProperties) + if err != nil { + return err + } + msc.MetadataSyncConfigProperties = &metadataSyncConfigProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + msc.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + msc.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + msc.Type = &typeVar + } + } + } + + return nil +} + +// MetadataSyncConfigProperties metadata Sync Config properties +type MetadataSyncConfigProperties struct { + // Enabled - Indicates whether the metadata sync is enabled or disabled + Enabled *bool `json:"enabled,omitempty"` +} + +// OperationMetaLogSpecification what is this? +type OperationMetaLogSpecification struct { + // DisplayName - Log display name + DisplayName *string `json:"displayName,omitempty"` + // BlobDuration - Time range the log covers + BlobDuration *string `json:"blobDuration,omitempty"` + // Name - Log unique name + Name *string `json:"name,omitempty"` +} + +// OperationMetaMetricDimensionSpecification what is this? +type OperationMetaMetricDimensionSpecification struct { + // DisplayName - Dimension display name + DisplayName *string `json:"displayName,omitempty"` + // Name - Dimension unique name + Name *string `json:"name,omitempty"` + // ToBeExportedForShoebox - Whether this metric should be exported for Shoebox + ToBeExportedForShoebox *bool `json:"toBeExportedForShoebox,omitempty"` +} + +// OperationMetaMetricSpecification what is this? +type OperationMetaMetricSpecification struct { + // SourceMdmNamespace - The source MDM namespace + SourceMdmNamespace *string `json:"sourceMdmNamespace,omitempty"` + // DisplayName - Metric display name + DisplayName *string `json:"displayName,omitempty"` + // Name - Metric unique name + Name *string `json:"name,omitempty"` + // AggregationType - Metric aggregation type + AggregationType *string `json:"aggregationType,omitempty"` + // DisplayDescription - Metric description + DisplayDescription *string `json:"displayDescription,omitempty"` + // SourceMdmAccount - The source MDM account + SourceMdmAccount *string `json:"sourceMdmAccount,omitempty"` + // EnableRegionalMdmAccount - Whether the regional MDM account is enabled + EnableRegionalMdmAccount *bool `json:"enableRegionalMdmAccount,omitempty"` + // Unit - Metric units + Unit *string `json:"unit,omitempty"` + // Dimensions - Metric dimensions + Dimensions *[]OperationMetaMetricDimensionSpecification `json:"dimensions,omitempty"` + // SupportsInstanceLevelAggregation - Whether the metric supports instance-level aggregation + SupportsInstanceLevelAggregation *bool `json:"supportsInstanceLevelAggregation,omitempty"` + // MetricFilterPattern - Metric filter + MetricFilterPattern *string `json:"metricFilterPattern,omitempty"` +} + +// OperationMetaPropertyInfo what is this? +type OperationMetaPropertyInfo struct { + // ServiceSpecification - Operation service specification + ServiceSpecification *OperationMetaServiceSpecification `json:"serviceSpecification,omitempty"` +} + +// OperationMetaServiceSpecification what is this? +type OperationMetaServiceSpecification struct { + // MetricSpecifications - Service metric specifications + MetricSpecifications *[]OperationMetaMetricSpecification `json:"metricSpecifications,omitempty"` + // LogSpecifications - Service log specifications + LogSpecifications *[]OperationMetaLogSpecification `json:"logSpecifications,omitempty"` +} + +// OperationResource an operation +type OperationResource struct { + // ID - Operation ID + ID *string `json:"id,omitempty"` + // Name - Operation name + Name *string `json:"name,omitempty"` + // Status - Operation status. Possible values include: 'OperationStatusInProgress', 'OperationStatusSucceeded', 'OperationStatusFailed', 'OperationStatusCanceled' + Status OperationStatus `json:"status,omitempty"` + // Properties - Operation properties + Properties interface{} `json:"properties,omitempty"` + // Error - Errors from the operation + Error *ErrorDetail `json:"error,omitempty"` + // StartTime - Operation start time + StartTime *date.Time `json:"startTime,omitempty"` + // EndTime - Operation start time + EndTime *date.Time `json:"endTime,omitempty"` + // PercentComplete - Completion percentage of the operation + PercentComplete *float64 `json:"percentComplete,omitempty"` +} + +// ProxyResource the resource model definition for a ARM proxy resource. It will have everything other than +// required location and tags +type ProxyResource struct { + // ID - READ-ONLY; Fully qualified resource Id for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; The name of the resource + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts. + Type *string `json:"type,omitempty"` +} + +// QueryInterval a database query. +type QueryInterval struct { + // IntervalStartTime - READ-ONLY; The start time of the measurement interval (ISO8601 format). + IntervalStartTime *date.Time `json:"intervalStartTime,omitempty"` + // ExecutionCount - READ-ONLY; The number of times the query was executed during this interval. + ExecutionCount *float64 `json:"executionCount,omitempty"` + // Metrics - READ-ONLY; The list of query metrics during this interval. + Metrics *[]QueryMetric `json:"metrics,omitempty"` +} + +// QueryMetric a database query. +type QueryMetric struct { + // Name - READ-ONLY; The name of the metric + Name *string `json:"name,omitempty"` + // DisplayName - READ-ONLY; The name of the metric for display in user interface + DisplayName *string `json:"displayName,omitempty"` + // Unit - READ-ONLY; The unit of measurement. Possible values include: 'Percentage', 'KB', 'Microseconds' + Unit QueryMetricUnit `json:"unit,omitempty"` + // Value - READ-ONLY; The measured value + Value *float64 `json:"value,omitempty"` +} + +// QueryStatistic a database query. +type QueryStatistic struct { + // QueryID - READ-ONLY; The id of the query + QueryID *string `json:"queryId,omitempty"` + // Intervals - READ-ONLY; The list of query intervals. + Intervals *[]QueryInterval `json:"intervals,omitempty"` +} + +// ReplaceAllFirewallRulesOperationResponse an existing operation for replacing the firewall rules +type ReplaceAllFirewallRulesOperationResponse struct { + autorest.Response `json:"-"` + // OperationID - The operation ID + OperationID *string `json:"operationId,omitempty"` +} + +// ReplaceAllIPFirewallRulesRequest replace all IP firewall rules request +type ReplaceAllIPFirewallRulesRequest struct { + // IPFirewallRules - IP firewall rule properties + IPFirewallRules map[string]*IPFirewallRuleProperties `json:"ipFirewallRules"` +} + +// MarshalJSON is the custom marshaler for ReplaceAllIPFirewallRulesRequest. +func (raifrr ReplaceAllIPFirewallRulesRequest) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if raifrr.IPFirewallRules != nil { + objectMap["ipFirewallRules"] = raifrr.IPFirewallRules + } + return json.Marshal(objectMap) +} + +// ReplicationLink represents a Sql pool replication link. +type ReplicationLink struct { + // Location - READ-ONLY; Location of the workspace that contains this firewall rule. + Location *string `json:"location,omitempty"` + // ReplicationLinkProperties - The properties representing the resource. + *ReplicationLinkProperties `json:"properties,omitempty"` + // ID - READ-ONLY; Fully qualified resource Id for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; The name of the resource + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts. + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for ReplicationLink. +func (rl ReplicationLink) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if rl.ReplicationLinkProperties != nil { + objectMap["properties"] = rl.ReplicationLinkProperties + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for ReplicationLink struct. +func (rl *ReplicationLink) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + rl.Location = &location + } + case "properties": + if v != nil { + var replicationLinkProperties ReplicationLinkProperties + err = json.Unmarshal(*v, &replicationLinkProperties) + if err != nil { + return err + } + rl.ReplicationLinkProperties = &replicationLinkProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + rl.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + rl.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + rl.Type = &typeVar + } + } + } + + return nil +} + +// ReplicationLinkListResult represents the response to a List Sql pool replication link request. +type ReplicationLinkListResult struct { + autorest.Response `json:"-"` + // Value - The list of Sql pool replication links housed in the Sql pool. + Value *[]ReplicationLink `json:"value,omitempty"` + // NextLink - READ-ONLY; Link to retrieve next page of results. + NextLink *string `json:"nextLink,omitempty"` +} + +// ReplicationLinkListResultIterator provides access to a complete listing of ReplicationLink values. +type ReplicationLinkListResultIterator struct { + i int + page ReplicationLinkListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *ReplicationLinkListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ReplicationLinkListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *ReplicationLinkListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter ReplicationLinkListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter ReplicationLinkListResultIterator) Response() ReplicationLinkListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter ReplicationLinkListResultIterator) Value() ReplicationLink { + if !iter.page.NotDone() { + return ReplicationLink{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the ReplicationLinkListResultIterator type. +func NewReplicationLinkListResultIterator(page ReplicationLinkListResultPage) ReplicationLinkListResultIterator { + return ReplicationLinkListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (rllr ReplicationLinkListResult) IsEmpty() bool { + return rllr.Value == nil || len(*rllr.Value) == 0 +} + +// replicationLinkListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (rllr ReplicationLinkListResult) replicationLinkListResultPreparer(ctx context.Context) (*http.Request, error) { + if rllr.NextLink == nil || len(to.String(rllr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(rllr.NextLink))) +} + +// ReplicationLinkListResultPage contains a page of ReplicationLink values. +type ReplicationLinkListResultPage struct { + fn func(context.Context, ReplicationLinkListResult) (ReplicationLinkListResult, error) + rllr ReplicationLinkListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *ReplicationLinkListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ReplicationLinkListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.rllr) + if err != nil { + return err + } + page.rllr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *ReplicationLinkListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page ReplicationLinkListResultPage) NotDone() bool { + return !page.rllr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page ReplicationLinkListResultPage) Response() ReplicationLinkListResult { + return page.rllr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page ReplicationLinkListResultPage) Values() []ReplicationLink { + if page.rllr.IsEmpty() { + return nil + } + return *page.rllr.Value +} + +// Creates a new instance of the ReplicationLinkListResultPage type. +func NewReplicationLinkListResultPage(getNextPage func(context.Context, ReplicationLinkListResult) (ReplicationLinkListResult, error)) ReplicationLinkListResultPage { + return ReplicationLinkListResultPage{fn: getNextPage} +} + +// ReplicationLinkProperties represents the properties of a Sql pool replication link. +type ReplicationLinkProperties struct { + // IsTerminationAllowed - READ-ONLY; Legacy value indicating whether termination is allowed. Currently always returns true. + IsTerminationAllowed *bool `json:"isTerminationAllowed,omitempty"` + // ReplicationMode - READ-ONLY; Replication mode of this replication link. + ReplicationMode *string `json:"replicationMode,omitempty"` + // PartnerServer - READ-ONLY; The name of the workspace hosting the partner Sql pool. + PartnerServer *string `json:"partnerServer,omitempty"` + // PartnerDatabase - READ-ONLY; The name of the partner Sql pool. + PartnerDatabase *string `json:"partnerDatabase,omitempty"` + // PartnerLocation - READ-ONLY; The Azure Region of the partner Sql pool. + PartnerLocation *string `json:"partnerLocation,omitempty"` + // Role - READ-ONLY; The role of the Sql pool in the replication link. Possible values include: 'Primary', 'Secondary', 'NonReadableSecondary', 'Source', 'Copy' + Role ReplicationRole `json:"role,omitempty"` + // PartnerRole - READ-ONLY; The role of the partner Sql pool in the replication link. Possible values include: 'Primary', 'Secondary', 'NonReadableSecondary', 'Source', 'Copy' + PartnerRole ReplicationRole `json:"partnerRole,omitempty"` + // StartTime - READ-ONLY; The start time for the replication link. + StartTime *date.Time `json:"startTime,omitempty"` + // PercentComplete - READ-ONLY; The percentage of seeding complete for the replication link. + PercentComplete *int32 `json:"percentComplete,omitempty"` + // ReplicationState - READ-ONLY; The replication state for the replication link. Possible values include: 'PENDING', 'SEEDING', 'CATCHUP', 'SUSPENDED' + ReplicationState ReplicationState `json:"replicationState,omitempty"` +} + +// Resource ... +type Resource struct { + // ID - READ-ONLY; Fully qualified resource Id for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; The name of the resource + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts. + Type *string `json:"type,omitempty"` +} + +// ResourceMoveDefinition contains the information necessary to perform a resource move (rename). +type ResourceMoveDefinition struct { + // ID - The target ID for the resource + ID *string `json:"id,omitempty"` +} + +// RestorePoint database restore points. +type RestorePoint struct { + autorest.Response `json:"-"` + // Location - READ-ONLY; Resource location. + Location *string `json:"location,omitempty"` + // RestorePointProperties - Resource properties. + *RestorePointProperties `json:"properties,omitempty"` + // ID - READ-ONLY; Fully qualified resource Id for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; The name of the resource + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts. + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for RestorePoint. +func (rp RestorePoint) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if rp.RestorePointProperties != nil { + objectMap["properties"] = rp.RestorePointProperties + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for RestorePoint struct. +func (rp *RestorePoint) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + rp.Location = &location + } + case "properties": + if v != nil { + var restorePointProperties RestorePointProperties + err = json.Unmarshal(*v, &restorePointProperties) + if err != nil { + return err + } + rp.RestorePointProperties = &restorePointProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + rp.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + rp.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + rp.Type = &typeVar + } + } + } + + return nil +} + +// RestorePointListResult a list of long term retention backups. +type RestorePointListResult struct { + autorest.Response `json:"-"` + // Value - READ-ONLY; Array of results. + Value *[]RestorePoint `json:"value,omitempty"` + // NextLink - READ-ONLY; Link to retrieve next page of results. + NextLink *string `json:"nextLink,omitempty"` +} + +// RestorePointListResultIterator provides access to a complete listing of RestorePoint values. +type RestorePointListResultIterator struct { + i int + page RestorePointListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *RestorePointListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RestorePointListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *RestorePointListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter RestorePointListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter RestorePointListResultIterator) Response() RestorePointListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter RestorePointListResultIterator) Value() RestorePoint { + if !iter.page.NotDone() { + return RestorePoint{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the RestorePointListResultIterator type. +func NewRestorePointListResultIterator(page RestorePointListResultPage) RestorePointListResultIterator { + return RestorePointListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (rplr RestorePointListResult) IsEmpty() bool { + return rplr.Value == nil || len(*rplr.Value) == 0 +} + +// restorePointListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (rplr RestorePointListResult) restorePointListResultPreparer(ctx context.Context) (*http.Request, error) { + if rplr.NextLink == nil || len(to.String(rplr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(rplr.NextLink))) +} + +// RestorePointListResultPage contains a page of RestorePoint values. +type RestorePointListResultPage struct { + fn func(context.Context, RestorePointListResult) (RestorePointListResult, error) + rplr RestorePointListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *RestorePointListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RestorePointListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.rplr) + if err != nil { + return err + } + page.rplr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *RestorePointListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page RestorePointListResultPage) NotDone() bool { + return !page.rplr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page RestorePointListResultPage) Response() RestorePointListResult { + return page.rplr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page RestorePointListResultPage) Values() []RestorePoint { + if page.rplr.IsEmpty() { + return nil + } + return *page.rplr.Value +} + +// Creates a new instance of the RestorePointListResultPage type. +func NewRestorePointListResultPage(getNextPage func(context.Context, RestorePointListResult) (RestorePointListResult, error)) RestorePointListResultPage { + return RestorePointListResultPage{fn: getNextPage} +} + +// RestorePointProperties properties of a database restore point +type RestorePointProperties struct { + // RestorePointType - READ-ONLY; The type of restore point. Possible values include: 'CONTINUOUS', 'DISCRETE' + RestorePointType RestorePointType `json:"restorePointType,omitempty"` + // EarliestRestoreDate - READ-ONLY; The earliest time to which this database can be restored + EarliestRestoreDate *date.Time `json:"earliestRestoreDate,omitempty"` + // RestorePointCreationDate - READ-ONLY; The time the backup was taken + RestorePointCreationDate *date.Time `json:"restorePointCreationDate,omitempty"` + // RestorePointLabel - READ-ONLY; The label of restore point for backup request by user + RestorePointLabel *string `json:"restorePointLabel,omitempty"` +} + +// SecurityAlertPolicyProperties properties of a security alert policy. +type SecurityAlertPolicyProperties struct { + // State - Specifies the state of the policy, whether it is enabled or disabled or a policy has not been applied yet on the specific Sql pool. Possible values include: 'SecurityAlertPolicyStateNew', 'SecurityAlertPolicyStateEnabled', 'SecurityAlertPolicyStateDisabled' + State SecurityAlertPolicyState `json:"state,omitempty"` + // DisabledAlerts - Specifies an array of alerts that are disabled. Allowed values are: Sql_Injection, Sql_Injection_Vulnerability, Access_Anomaly, Data_Exfiltration, Unsafe_Action + DisabledAlerts *[]string `json:"disabledAlerts,omitempty"` + // EmailAddresses - Specifies an array of e-mail addresses to which the alert is sent. + EmailAddresses *[]string `json:"emailAddresses,omitempty"` + // EmailAccountAdmins - Specifies that the alert is sent to the account administrators. + EmailAccountAdmins *bool `json:"emailAccountAdmins,omitempty"` + // StorageEndpoint - Specifies the blob storage endpoint (e.g. https://MyAccount.blob.core.windows.net). This blob storage will hold all Threat Detection audit logs. + StorageEndpoint *string `json:"storageEndpoint,omitempty"` + // StorageAccountAccessKey - Specifies the identifier key of the Threat Detection audit storage account. + StorageAccountAccessKey *string `json:"storageAccountAccessKey,omitempty"` + // RetentionDays - Specifies the number of days to keep in the Threat Detection audit logs. + RetentionDays *int32 `json:"retentionDays,omitempty"` + // CreationTime - READ-ONLY; Specifies the UTC creation time of the policy. + CreationTime *date.Time `json:"creationTime,omitempty"` +} + +// SensitivityLabel a sensitivity label. +type SensitivityLabel struct { + autorest.Response `json:"-"` + // SensitivityLabelProperties - Resource properties. + *SensitivityLabelProperties `json:"properties,omitempty"` + // ID - READ-ONLY; Fully qualified resource Id for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; The name of the resource + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts. + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for SensitivityLabel. +func (sl SensitivityLabel) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if sl.SensitivityLabelProperties != nil { + objectMap["properties"] = sl.SensitivityLabelProperties + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for SensitivityLabel struct. +func (sl *SensitivityLabel) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var sensitivityLabelProperties SensitivityLabelProperties + err = json.Unmarshal(*v, &sensitivityLabelProperties) + if err != nil { + return err + } + sl.SensitivityLabelProperties = &sensitivityLabelProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + sl.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + sl.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + sl.Type = &typeVar + } + } + } + + return nil +} + +// SensitivityLabelListResult a list of sensitivity labels. +type SensitivityLabelListResult struct { + autorest.Response `json:"-"` + // Value - READ-ONLY; Array of results. + Value *[]SensitivityLabel `json:"value,omitempty"` + // NextLink - READ-ONLY; Link to retrieve next page of results. + NextLink *string `json:"nextLink,omitempty"` +} + +// SensitivityLabelListResultIterator provides access to a complete listing of SensitivityLabel values. +type SensitivityLabelListResultIterator struct { + i int + page SensitivityLabelListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *SensitivityLabelListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SensitivityLabelListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *SensitivityLabelListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter SensitivityLabelListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter SensitivityLabelListResultIterator) Response() SensitivityLabelListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter SensitivityLabelListResultIterator) Value() SensitivityLabel { + if !iter.page.NotDone() { + return SensitivityLabel{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the SensitivityLabelListResultIterator type. +func NewSensitivityLabelListResultIterator(page SensitivityLabelListResultPage) SensitivityLabelListResultIterator { + return SensitivityLabelListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (sllr SensitivityLabelListResult) IsEmpty() bool { + return sllr.Value == nil || len(*sllr.Value) == 0 +} + +// sensitivityLabelListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (sllr SensitivityLabelListResult) sensitivityLabelListResultPreparer(ctx context.Context) (*http.Request, error) { + if sllr.NextLink == nil || len(to.String(sllr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(sllr.NextLink))) +} + +// SensitivityLabelListResultPage contains a page of SensitivityLabel values. +type SensitivityLabelListResultPage struct { + fn func(context.Context, SensitivityLabelListResult) (SensitivityLabelListResult, error) + sllr SensitivityLabelListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *SensitivityLabelListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SensitivityLabelListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.sllr) + if err != nil { + return err + } + page.sllr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *SensitivityLabelListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page SensitivityLabelListResultPage) NotDone() bool { + return !page.sllr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page SensitivityLabelListResultPage) Response() SensitivityLabelListResult { + return page.sllr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page SensitivityLabelListResultPage) Values() []SensitivityLabel { + if page.sllr.IsEmpty() { + return nil + } + return *page.sllr.Value +} + +// Creates a new instance of the SensitivityLabelListResultPage type. +func NewSensitivityLabelListResultPage(getNextPage func(context.Context, SensitivityLabelListResult) (SensitivityLabelListResult, error)) SensitivityLabelListResultPage { + return SensitivityLabelListResultPage{fn: getNextPage} +} + +// SensitivityLabelProperties properties of a sensitivity label. +type SensitivityLabelProperties struct { + // LabelName - The label name. + LabelName *string `json:"labelName,omitempty"` + // LabelID - The label ID. + LabelID *string `json:"labelId,omitempty"` + // InformationType - The information type. + InformationType *string `json:"informationType,omitempty"` + // InformationTypeID - The information type ID. + InformationTypeID *string `json:"informationTypeId,omitempty"` + // IsDisabled - READ-ONLY; Is sensitivity recommendation disabled. Applicable for recommended sensitivity label only. Specifies whether the sensitivity recommendation on this column is disabled (dismissed) or not. + IsDisabled *bool `json:"isDisabled,omitempty"` +} + +// SetObject ... +type SetObject struct { + autorest.Response `json:"-"` + Value interface{} `json:"value,omitempty"` +} + +// Sku SQL pool SKU +type Sku struct { + // Tier - The service tier + Tier *string `json:"tier,omitempty"` + // Name - The SKU name + Name *string `json:"name,omitempty"` +} + +// SQLPool a SQL Analytics pool +type SQLPool struct { + autorest.Response `json:"-"` + // Sku - SQL pool SKU + Sku *Sku `json:"sku,omitempty"` + // SQLPoolResourceProperties - SQL pool properties + *SQLPoolResourceProperties `json:"properties,omitempty"` + // Tags - Resource tags. + Tags map[string]*string `json:"tags"` + // Location - The geo-location where the resource lives + Location *string `json:"location,omitempty"` + // ID - READ-ONLY; Fully qualified resource Id for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; The name of the resource + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts. + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for SQLPool. +func (sp SQLPool) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if sp.Sku != nil { + objectMap["sku"] = sp.Sku + } + if sp.SQLPoolResourceProperties != nil { + objectMap["properties"] = sp.SQLPoolResourceProperties + } + if sp.Tags != nil { + objectMap["tags"] = sp.Tags + } + if sp.Location != nil { + objectMap["location"] = sp.Location + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for SQLPool struct. +func (sp *SQLPool) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "sku": + if v != nil { + var sku Sku + err = json.Unmarshal(*v, &sku) + if err != nil { + return err + } + sp.Sku = &sku + } + case "properties": + if v != nil { + var SQLPoolResourceProperties SQLPoolResourceProperties + err = json.Unmarshal(*v, &SQLPoolResourceProperties) + if err != nil { + return err + } + sp.SQLPoolResourceProperties = &SQLPoolResourceProperties + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + sp.Tags = tags + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + sp.Location = &location + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + sp.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + sp.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + sp.Type = &typeVar + } + } + } + + return nil +} + +// SQLPoolBlobAuditingPolicy a Sql pool blob auditing policy. +type SQLPoolBlobAuditingPolicy struct { + autorest.Response `json:"-"` + // Kind - READ-ONLY; Resource kind. + Kind *string `json:"kind,omitempty"` + // SQLPoolBlobAuditingPolicyProperties - Resource properties. + *SQLPoolBlobAuditingPolicyProperties `json:"properties,omitempty"` + // ID - READ-ONLY; Fully qualified resource Id for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; The name of the resource + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts. + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for SQLPoolBlobAuditingPolicy. +func (spbap SQLPoolBlobAuditingPolicy) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if spbap.SQLPoolBlobAuditingPolicyProperties != nil { + objectMap["properties"] = spbap.SQLPoolBlobAuditingPolicyProperties + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for SQLPoolBlobAuditingPolicy struct. +func (spbap *SQLPoolBlobAuditingPolicy) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "kind": + if v != nil { + var kind string + err = json.Unmarshal(*v, &kind) + if err != nil { + return err + } + spbap.Kind = &kind + } + case "properties": + if v != nil { + var SQLPoolBlobAuditingPolicyProperties SQLPoolBlobAuditingPolicyProperties + err = json.Unmarshal(*v, &SQLPoolBlobAuditingPolicyProperties) + if err != nil { + return err + } + spbap.SQLPoolBlobAuditingPolicyProperties = &SQLPoolBlobAuditingPolicyProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + spbap.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + spbap.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + spbap.Type = &typeVar + } + } + } + + return nil +} + +// SQLPoolBlobAuditingPolicyProperties properties of a Sql pool blob auditing policy. +type SQLPoolBlobAuditingPolicyProperties struct { + // State - Specifies the state of the policy. If state is Enabled, storageEndpoint or isAzureMonitorTargetEnabled are required. Possible values include: 'BlobAuditingPolicyStateEnabled', 'BlobAuditingPolicyStateDisabled' + State BlobAuditingPolicyState `json:"state,omitempty"` + // StorageEndpoint - Specifies the blob storage endpoint (e.g. https://MyAccount.blob.core.windows.net). If state is Enabled, storageEndpoint is required. + StorageEndpoint *string `json:"storageEndpoint,omitempty"` + // StorageAccountAccessKey - Specifies the identifier key of the auditing storage account. If state is Enabled and storageEndpoint is specified, storageAccountAccessKey is required. + StorageAccountAccessKey *string `json:"storageAccountAccessKey,omitempty"` + // RetentionDays - Specifies the number of days to keep in the audit logs in the storage account. + RetentionDays *int32 `json:"retentionDays,omitempty"` + // AuditActionsAndGroups - Specifies the Actions-Groups and Actions to audit. + // + // The recommended set of action groups to use is the following combination - this will audit all the queries and stored procedures executed against the database, as well as successful and failed logins: + // + // BATCH_COMPLETED_GROUP, + // SUCCESSFUL_DATABASE_AUTHENTICATION_GROUP, + // FAILED_DATABASE_AUTHENTICATION_GROUP. + // + // This above combination is also the set that is configured by default when enabling auditing from the Azure portal. + // + // The supported action groups to audit are (note: choose only specific groups that cover your auditing needs. Using unnecessary groups could lead to very large quantities of audit records): + // + // APPLICATION_ROLE_CHANGE_PASSWORD_GROUP + // BACKUP_RESTORE_GROUP + // DATABASE_LOGOUT_GROUP + // DATABASE_OBJECT_CHANGE_GROUP + // DATABASE_OBJECT_OWNERSHIP_CHANGE_GROUP + // DATABASE_OBJECT_PERMISSION_CHANGE_GROUP + // DATABASE_OPERATION_GROUP + // DATABASE_PERMISSION_CHANGE_GROUP + // DATABASE_PRINCIPAL_CHANGE_GROUP + // DATABASE_PRINCIPAL_IMPERSONATION_GROUP + // DATABASE_ROLE_MEMBER_CHANGE_GROUP + // FAILED_DATABASE_AUTHENTICATION_GROUP + // SCHEMA_OBJECT_ACCESS_GROUP + // SCHEMA_OBJECT_CHANGE_GROUP + // SCHEMA_OBJECT_OWNERSHIP_CHANGE_GROUP + // SCHEMA_OBJECT_PERMISSION_CHANGE_GROUP + // SUCCESSFUL_DATABASE_AUTHENTICATION_GROUP + // USER_CHANGE_PASSWORD_GROUP + // BATCH_STARTED_GROUP + // BATCH_COMPLETED_GROUP + // + // These are groups that cover all sql statements and stored procedures executed against the database, and should not be used in combination with other groups as this will result in duplicate audit logs. + // + // For more information, see [Database-Level Audit Action Groups](https://docs.microsoft.com/en-us/sql/relational-databases/security/auditing/sql-server-audit-action-groups-and-actions#database-level-audit-action-groups). + // + // For Database auditing policy, specific Actions can also be specified (note that Actions cannot be specified for Server auditing policy). The supported actions to audit are: + // SELECT + // UPDATE + // INSERT + // DELETE + // EXECUTE + // RECEIVE + // REFERENCES + // + // The general form for defining an action to be audited is: + // {action} ON {object} BY {principal} + // + // Note that in the above format can refer to an object like a table, view, or stored procedure, or an entire database or schema. For the latter cases, the forms DATABASE::{db_name} and SCHEMA::{schema_name} are used, respectively. + // + // For example: + // SELECT on dbo.myTable by public + // SELECT on DATABASE::myDatabase by public + // SELECT on SCHEMA::mySchema by public + // + // For more information, see [Database-Level Audit Actions](https://docs.microsoft.com/en-us/sql/relational-databases/security/auditing/sql-server-audit-action-groups-and-actions#database-level-audit-actions) + AuditActionsAndGroups *[]string `json:"auditActionsAndGroups,omitempty"` + // StorageAccountSubscriptionID - Specifies the blob storage subscription Id. + StorageAccountSubscriptionID *uuid.UUID `json:"storageAccountSubscriptionId,omitempty"` + // IsStorageSecondaryKeyInUse - Specifies whether storageAccountAccessKey value is the storage's secondary key. + IsStorageSecondaryKeyInUse *bool `json:"isStorageSecondaryKeyInUse,omitempty"` + // IsAzureMonitorTargetEnabled - Specifies whether audit events are sent to Azure Monitor. + // In order to send the events to Azure Monitor, specify 'state' as 'Enabled' and 'isAzureMonitorTargetEnabled' as true. + // + // When using REST API to configure auditing, Diagnostic Settings with 'SQLSecurityAuditEvents' diagnostic logs category on the database should be also created. + // Note that for server level audit you should use the 'master' database as {databaseName}. + // + // Diagnostic Settings URI format: + // PUT https://management.azure.com/subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/providers/microsoft.insights/diagnosticSettings/{settingsName}?api-version=2017-05-01-preview + // + // For more information, see [Diagnostic Settings REST API](https://go.microsoft.com/fwlink/?linkid=2033207) + // or [Diagnostic Settings PowerShell](https://go.microsoft.com/fwlink/?linkid=2033043) + IsAzureMonitorTargetEnabled *bool `json:"isAzureMonitorTargetEnabled,omitempty"` +} + +// SQLPoolBlobAuditingPolicySQLPoolOperationListResult the response to a list Sql pool operations request +type SQLPoolBlobAuditingPolicySQLPoolOperationListResult struct { + autorest.Response `json:"-"` + // Value - READ-ONLY; Array of results. + Value *[]SQLPoolOperation `json:"value,omitempty"` + // NextLink - READ-ONLY; Link to retrieve next page of results. + NextLink *string `json:"nextLink,omitempty"` +} + +// SQLPoolBlobAuditingPolicySQLPoolOperationListResultIterator provides access to a complete listing of +// SQLPoolOperation values. +type SQLPoolBlobAuditingPolicySQLPoolOperationListResultIterator struct { + i int + page SQLPoolBlobAuditingPolicySQLPoolOperationListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *SQLPoolBlobAuditingPolicySQLPoolOperationListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SQLPoolBlobAuditingPolicySQLPoolOperationListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *SQLPoolBlobAuditingPolicySQLPoolOperationListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter SQLPoolBlobAuditingPolicySQLPoolOperationListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter SQLPoolBlobAuditingPolicySQLPoolOperationListResultIterator) Response() SQLPoolBlobAuditingPolicySQLPoolOperationListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter SQLPoolBlobAuditingPolicySQLPoolOperationListResultIterator) Value() SQLPoolOperation { + if !iter.page.NotDone() { + return SQLPoolOperation{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the SQLPoolBlobAuditingPolicySQLPoolOperationListResultIterator type. +func NewSQLPoolBlobAuditingPolicySQLPoolOperationListResultIterator(page SQLPoolBlobAuditingPolicySQLPoolOperationListResultPage) SQLPoolBlobAuditingPolicySQLPoolOperationListResultIterator { + return SQLPoolBlobAuditingPolicySQLPoolOperationListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (spbapspolr SQLPoolBlobAuditingPolicySQLPoolOperationListResult) IsEmpty() bool { + return spbapspolr.Value == nil || len(*spbapspolr.Value) == 0 +} + +// sQLPoolBlobAuditingPolicySQLPoolOperationListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (spbapspolr SQLPoolBlobAuditingPolicySQLPoolOperationListResult) sQLPoolBlobAuditingPolicySQLPoolOperationListResultPreparer(ctx context.Context) (*http.Request, error) { + if spbapspolr.NextLink == nil || len(to.String(spbapspolr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(spbapspolr.NextLink))) +} + +// SQLPoolBlobAuditingPolicySQLPoolOperationListResultPage contains a page of SQLPoolOperation values. +type SQLPoolBlobAuditingPolicySQLPoolOperationListResultPage struct { + fn func(context.Context, SQLPoolBlobAuditingPolicySQLPoolOperationListResult) (SQLPoolBlobAuditingPolicySQLPoolOperationListResult, error) + spbapspolr SQLPoolBlobAuditingPolicySQLPoolOperationListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *SQLPoolBlobAuditingPolicySQLPoolOperationListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SQLPoolBlobAuditingPolicySQLPoolOperationListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.spbapspolr) + if err != nil { + return err + } + page.spbapspolr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *SQLPoolBlobAuditingPolicySQLPoolOperationListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page SQLPoolBlobAuditingPolicySQLPoolOperationListResultPage) NotDone() bool { + return !page.spbapspolr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page SQLPoolBlobAuditingPolicySQLPoolOperationListResultPage) Response() SQLPoolBlobAuditingPolicySQLPoolOperationListResult { + return page.spbapspolr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page SQLPoolBlobAuditingPolicySQLPoolOperationListResultPage) Values() []SQLPoolOperation { + if page.spbapspolr.IsEmpty() { + return nil + } + return *page.spbapspolr.Value +} + +// Creates a new instance of the SQLPoolBlobAuditingPolicySQLPoolOperationListResultPage type. +func NewSQLPoolBlobAuditingPolicySQLPoolOperationListResultPage(getNextPage func(context.Context, SQLPoolBlobAuditingPolicySQLPoolOperationListResult) (SQLPoolBlobAuditingPolicySQLPoolOperationListResult, error)) SQLPoolBlobAuditingPolicySQLPoolOperationListResultPage { + return SQLPoolBlobAuditingPolicySQLPoolOperationListResultPage{fn: getNextPage} +} + +// SQLPoolColumn a Sql pool column resource. +type SQLPoolColumn struct { + // SQLPoolColumnProperties - Resource properties. + *SQLPoolColumnProperties `json:"properties,omitempty"` + // ID - READ-ONLY; Fully qualified resource Id for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; The name of the resource + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts. + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for SQLPoolColumn. +func (spc SQLPoolColumn) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if spc.SQLPoolColumnProperties != nil { + objectMap["properties"] = spc.SQLPoolColumnProperties + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for SQLPoolColumn struct. +func (spc *SQLPoolColumn) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var SQLPoolColumnProperties SQLPoolColumnProperties + err = json.Unmarshal(*v, &SQLPoolColumnProperties) + if err != nil { + return err + } + spc.SQLPoolColumnProperties = &SQLPoolColumnProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + spc.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + spc.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + spc.Type = &typeVar + } + } + } + + return nil +} + +// SQLPoolColumnListResult a list of Sql pool columns. +type SQLPoolColumnListResult struct { + autorest.Response `json:"-"` + // Value - READ-ONLY; Array of results. + Value *[]SQLPoolColumn `json:"value,omitempty"` + // NextLink - READ-ONLY; Link to retrieve next page of results. + NextLink *string `json:"nextLink,omitempty"` +} + +// SQLPoolColumnListResultIterator provides access to a complete listing of SQLPoolColumn values. +type SQLPoolColumnListResultIterator struct { + i int + page SQLPoolColumnListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *SQLPoolColumnListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SQLPoolColumnListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *SQLPoolColumnListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter SQLPoolColumnListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter SQLPoolColumnListResultIterator) Response() SQLPoolColumnListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter SQLPoolColumnListResultIterator) Value() SQLPoolColumn { + if !iter.page.NotDone() { + return SQLPoolColumn{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the SQLPoolColumnListResultIterator type. +func NewSQLPoolColumnListResultIterator(page SQLPoolColumnListResultPage) SQLPoolColumnListResultIterator { + return SQLPoolColumnListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (spclr SQLPoolColumnListResult) IsEmpty() bool { + return spclr.Value == nil || len(*spclr.Value) == 0 +} + +// sQLPoolColumnListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (spclr SQLPoolColumnListResult) sQLPoolColumnListResultPreparer(ctx context.Context) (*http.Request, error) { + if spclr.NextLink == nil || len(to.String(spclr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(spclr.NextLink))) +} + +// SQLPoolColumnListResultPage contains a page of SQLPoolColumn values. +type SQLPoolColumnListResultPage struct { + fn func(context.Context, SQLPoolColumnListResult) (SQLPoolColumnListResult, error) + spclr SQLPoolColumnListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *SQLPoolColumnListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SQLPoolColumnListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.spclr) + if err != nil { + return err + } + page.spclr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *SQLPoolColumnListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page SQLPoolColumnListResultPage) NotDone() bool { + return !page.spclr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page SQLPoolColumnListResultPage) Response() SQLPoolColumnListResult { + return page.spclr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page SQLPoolColumnListResultPage) Values() []SQLPoolColumn { + if page.spclr.IsEmpty() { + return nil + } + return *page.spclr.Value +} + +// Creates a new instance of the SQLPoolColumnListResultPage type. +func NewSQLPoolColumnListResultPage(getNextPage func(context.Context, SQLPoolColumnListResult) (SQLPoolColumnListResult, error)) SQLPoolColumnListResultPage { + return SQLPoolColumnListResultPage{fn: getNextPage} +} + +// SQLPoolColumnProperties sql pool column properties. +type SQLPoolColumnProperties struct { + // ColumnType - The column data type. Possible values include: 'Image', 'Text', 'Uniqueidentifier', 'Date', 'Time', 'Datetime2', 'Datetimeoffset', 'Tinyint', 'Smallint', 'Int', 'Smalldatetime', 'Real', 'Money', 'Datetime', 'Float', 'SQLVariant', 'Ntext', 'Bit', 'Decimal', 'Numeric', 'Smallmoney', 'Bigint', 'Hierarchyid', 'Geometry', 'Geography', 'Varbinary', 'Varchar', 'Binary', 'Char', 'Timestamp', 'Nvarchar', 'Nchar', 'XML', 'Sysname' + ColumnType ColumnDataType `json:"columnType,omitempty"` +} + +// SQLPoolConnectionPolicy a Sql pool connection policy. +type SQLPoolConnectionPolicy struct { + autorest.Response `json:"-"` + // Kind - READ-ONLY; Resource kind. + Kind *string `json:"kind,omitempty"` + // Location - READ-ONLY; Resource location. + Location *string `json:"location,omitempty"` + // SQLPoolConnectionPolicyProperties - Resource properties. + *SQLPoolConnectionPolicyProperties `json:"properties,omitempty"` + // ID - READ-ONLY; Fully qualified resource Id for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; The name of the resource + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts. + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for SQLPoolConnectionPolicy. +func (spcp SQLPoolConnectionPolicy) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if spcp.SQLPoolConnectionPolicyProperties != nil { + objectMap["properties"] = spcp.SQLPoolConnectionPolicyProperties + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for SQLPoolConnectionPolicy struct. +func (spcp *SQLPoolConnectionPolicy) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "kind": + if v != nil { + var kind string + err = json.Unmarshal(*v, &kind) + if err != nil { + return err + } + spcp.Kind = &kind + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + spcp.Location = &location + } + case "properties": + if v != nil { + var SQLPoolConnectionPolicyProperties SQLPoolConnectionPolicyProperties + err = json.Unmarshal(*v, &SQLPoolConnectionPolicyProperties) + if err != nil { + return err + } + spcp.SQLPoolConnectionPolicyProperties = &SQLPoolConnectionPolicyProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + spcp.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + spcp.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + spcp.Type = &typeVar + } + } + } + + return nil +} + +// SQLPoolConnectionPolicyProperties properties of a Sql pool connection policy. +type SQLPoolConnectionPolicyProperties struct { + // SecurityEnabledAccess - The state of security access. + SecurityEnabledAccess *string `json:"securityEnabledAccess,omitempty"` + // ProxyDNSName - The fully qualified host name of the auditing proxy. + ProxyDNSName *string `json:"proxyDnsName,omitempty"` + // ProxyPort - The port number of the auditing proxy. + ProxyPort *string `json:"proxyPort,omitempty"` + // Visibility - The visibility of the auditing proxy. + Visibility *string `json:"visibility,omitempty"` + // UseServerDefault - Whether server default is enabled or disabled. + UseServerDefault *string `json:"useServerDefault,omitempty"` + // RedirectionState - The state of proxy redirection. + RedirectionState *string `json:"redirectionState,omitempty"` + // State - The connection policy state. + State *string `json:"state,omitempty"` +} + +// SQLPoolInfoListResult list of SQL pools +type SQLPoolInfoListResult struct { + autorest.Response `json:"-"` + // NextLink - Link to the next page of results + NextLink *string `json:"nextLink,omitempty"` + // Value - List of SQL pools + Value *[]SQLPool `json:"value,omitempty"` +} + +// SQLPoolInfoListResultIterator provides access to a complete listing of SQLPool values. +type SQLPoolInfoListResultIterator struct { + i int + page SQLPoolInfoListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *SQLPoolInfoListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SQLPoolInfoListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *SQLPoolInfoListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter SQLPoolInfoListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter SQLPoolInfoListResultIterator) Response() SQLPoolInfoListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter SQLPoolInfoListResultIterator) Value() SQLPool { + if !iter.page.NotDone() { + return SQLPool{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the SQLPoolInfoListResultIterator type. +func NewSQLPoolInfoListResultIterator(page SQLPoolInfoListResultPage) SQLPoolInfoListResultIterator { + return SQLPoolInfoListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (spilr SQLPoolInfoListResult) IsEmpty() bool { + return spilr.Value == nil || len(*spilr.Value) == 0 +} + +// sQLPoolInfoListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (spilr SQLPoolInfoListResult) sQLPoolInfoListResultPreparer(ctx context.Context) (*http.Request, error) { + if spilr.NextLink == nil || len(to.String(spilr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(spilr.NextLink))) +} + +// SQLPoolInfoListResultPage contains a page of SQLPool values. +type SQLPoolInfoListResultPage struct { + fn func(context.Context, SQLPoolInfoListResult) (SQLPoolInfoListResult, error) + spilr SQLPoolInfoListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *SQLPoolInfoListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SQLPoolInfoListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.spilr) + if err != nil { + return err + } + page.spilr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *SQLPoolInfoListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page SQLPoolInfoListResultPage) NotDone() bool { + return !page.spilr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page SQLPoolInfoListResultPage) Response() SQLPoolInfoListResult { + return page.spilr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page SQLPoolInfoListResultPage) Values() []SQLPool { + if page.spilr.IsEmpty() { + return nil + } + return *page.spilr.Value +} + +// Creates a new instance of the SQLPoolInfoListResultPage type. +func NewSQLPoolInfoListResultPage(getNextPage func(context.Context, SQLPoolInfoListResult) (SQLPoolInfoListResult, error)) SQLPoolInfoListResultPage { + return SQLPoolInfoListResultPage{fn: getNextPage} +} + +// SQLPoolOperation a Sql pool operation. +type SQLPoolOperation struct { + // SQLPoolOperationProperties - Resource properties. + *SQLPoolOperationProperties `json:"properties,omitempty"` + // ID - READ-ONLY; Fully qualified resource Id for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; The name of the resource + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts. + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for SQLPoolOperation. +func (spo SQLPoolOperation) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if spo.SQLPoolOperationProperties != nil { + objectMap["properties"] = spo.SQLPoolOperationProperties + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for SQLPoolOperation struct. +func (spo *SQLPoolOperation) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var SQLPoolOperationProperties SQLPoolOperationProperties + err = json.Unmarshal(*v, &SQLPoolOperationProperties) + if err != nil { + return err + } + spo.SQLPoolOperationProperties = &SQLPoolOperationProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + spo.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + spo.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + spo.Type = &typeVar + } + } + } + + return nil +} + +// SQLPoolOperationProperties the properties of a Sql pool operation. +type SQLPoolOperationProperties struct { + // DatabaseName - READ-ONLY; The name of the Sql pool the operation is being performed on. + DatabaseName *string `json:"databaseName,omitempty"` + // Operation - READ-ONLY; The name of operation. + Operation *string `json:"operation,omitempty"` + // OperationFriendlyName - READ-ONLY; The friendly name of operation. + OperationFriendlyName *string `json:"operationFriendlyName,omitempty"` + // PercentComplete - READ-ONLY; The percentage of the operation completed. + PercentComplete *int32 `json:"percentComplete,omitempty"` + // ServerName - READ-ONLY; The name of the server. + ServerName *string `json:"serverName,omitempty"` + // StartTime - READ-ONLY; The operation start time. + StartTime *date.Time `json:"startTime,omitempty"` + // State - READ-ONLY; The operation state. Possible values include: 'Pending', 'InProgress', 'Succeeded', 'Failed', 'CancelInProgress', 'Cancelled' + State ManagementOperationState `json:"state,omitempty"` + // ErrorCode - READ-ONLY; The operation error code. + ErrorCode *int32 `json:"errorCode,omitempty"` + // ErrorDescription - READ-ONLY; The operation error description. + ErrorDescription *string `json:"errorDescription,omitempty"` + // ErrorSeverity - READ-ONLY; The operation error severity. + ErrorSeverity *int32 `json:"errorSeverity,omitempty"` + // IsUserError - READ-ONLY; Whether or not the error is a user error. + IsUserError *bool `json:"isUserError,omitempty"` + // EstimatedCompletionTime - READ-ONLY; The estimated completion time of the operation. + EstimatedCompletionTime *date.Time `json:"estimatedCompletionTime,omitempty"` + // Description - READ-ONLY; The operation description. + Description *string `json:"description,omitempty"` + // IsCancellable - READ-ONLY; Whether the operation can be cancelled. + IsCancellable *bool `json:"isCancellable,omitempty"` +} + +// SQLPoolPatchInfo a SQL Analytics pool patch info +type SQLPoolPatchInfo struct { + // Tags - Resource tags. + Tags map[string]*string `json:"tags"` + // Location - The geo-location where the resource lives + Location *string `json:"location,omitempty"` + // Sku - SQL pool SKU + Sku *Sku `json:"sku,omitempty"` + // SQLPoolResourceProperties - SQL pool properties + *SQLPoolResourceProperties `json:"properties,omitempty"` +} + +// MarshalJSON is the custom marshaler for SQLPoolPatchInfo. +func (sppi SQLPoolPatchInfo) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if sppi.Tags != nil { + objectMap["tags"] = sppi.Tags + } + if sppi.Location != nil { + objectMap["location"] = sppi.Location + } + if sppi.Sku != nil { + objectMap["sku"] = sppi.Sku + } + if sppi.SQLPoolResourceProperties != nil { + objectMap["properties"] = sppi.SQLPoolResourceProperties + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for SQLPoolPatchInfo struct. +func (sppi *SQLPoolPatchInfo) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + sppi.Tags = tags + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + sppi.Location = &location + } + case "sku": + if v != nil { + var sku Sku + err = json.Unmarshal(*v, &sku) + if err != nil { + return err + } + sppi.Sku = &sku + } + case "properties": + if v != nil { + var SQLPoolResourceProperties SQLPoolResourceProperties + err = json.Unmarshal(*v, &SQLPoolResourceProperties) + if err != nil { + return err + } + sppi.SQLPoolResourceProperties = &SQLPoolResourceProperties + } + } + } + + return nil +} + +// SQLPoolResourceProperties properties of a SQL Analytics pool +type SQLPoolResourceProperties struct { + // MaxSizeBytes - Maximum size in bytes + MaxSizeBytes *int64 `json:"maxSizeBytes,omitempty"` + // Collation - Collation mode + Collation *string `json:"collation,omitempty"` + // SourceDatabaseID - Source database to create from + SourceDatabaseID *string `json:"sourceDatabaseId,omitempty"` + // RecoverableDatabaseID - Backup database to restore from + RecoverableDatabaseID *string `json:"recoverableDatabaseId,omitempty"` + // ProvisioningState - Resource state + ProvisioningState *string `json:"provisioningState,omitempty"` + // Status - Resource status + Status *string `json:"status,omitempty"` + // RestorePointInTime - Snapshot time to restore + RestorePointInTime *date.Time `json:"restorePointInTime,omitempty"` + // CreateMode - What is this? + CreateMode *string `json:"createMode,omitempty"` + // CreationDate - Date the SQL pool was created + CreationDate *date.Time `json:"creationDate,omitempty"` +} + +// SQLPoolRestorePointsCreateFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type SQLPoolRestorePointsCreateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *SQLPoolRestorePointsCreateFuture) Result(client SQLPoolRestorePointsClient) (rp RestorePoint, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolRestorePointsCreateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("synapse.SQLPoolRestorePointsCreateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if rp.Response.Response, err = future.GetResult(sender); err == nil && rp.Response.Response.StatusCode != http.StatusNoContent { + rp, err = client.CreateResponder(rp.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolRestorePointsCreateFuture", "Result", rp.Response.Response, "Failure responding to request") + } + } + return +} + +// SQLPoolSchema a Sql pool schema resource. +type SQLPoolSchema struct { + // ID - READ-ONLY; Fully qualified resource Id for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; The name of the resource + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts. + Type *string `json:"type,omitempty"` +} + +// SQLPoolSchemaListResult a list of Sql pool schemas. +type SQLPoolSchemaListResult struct { + autorest.Response `json:"-"` + // Value - READ-ONLY; Array of results. + Value *[]SQLPoolSchema `json:"value,omitempty"` + // NextLink - READ-ONLY; Link to retrieve next page of results. + NextLink *string `json:"nextLink,omitempty"` +} + +// SQLPoolSchemaListResultIterator provides access to a complete listing of SQLPoolSchema values. +type SQLPoolSchemaListResultIterator struct { + i int + page SQLPoolSchemaListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *SQLPoolSchemaListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SQLPoolSchemaListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *SQLPoolSchemaListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter SQLPoolSchemaListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter SQLPoolSchemaListResultIterator) Response() SQLPoolSchemaListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter SQLPoolSchemaListResultIterator) Value() SQLPoolSchema { + if !iter.page.NotDone() { + return SQLPoolSchema{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the SQLPoolSchemaListResultIterator type. +func NewSQLPoolSchemaListResultIterator(page SQLPoolSchemaListResultPage) SQLPoolSchemaListResultIterator { + return SQLPoolSchemaListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (spslr SQLPoolSchemaListResult) IsEmpty() bool { + return spslr.Value == nil || len(*spslr.Value) == 0 +} + +// sQLPoolSchemaListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (spslr SQLPoolSchemaListResult) sQLPoolSchemaListResultPreparer(ctx context.Context) (*http.Request, error) { + if spslr.NextLink == nil || len(to.String(spslr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(spslr.NextLink))) +} + +// SQLPoolSchemaListResultPage contains a page of SQLPoolSchema values. +type SQLPoolSchemaListResultPage struct { + fn func(context.Context, SQLPoolSchemaListResult) (SQLPoolSchemaListResult, error) + spslr SQLPoolSchemaListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *SQLPoolSchemaListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SQLPoolSchemaListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.spslr) + if err != nil { + return err + } + page.spslr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *SQLPoolSchemaListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page SQLPoolSchemaListResultPage) NotDone() bool { + return !page.spslr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page SQLPoolSchemaListResultPage) Response() SQLPoolSchemaListResult { + return page.spslr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page SQLPoolSchemaListResultPage) Values() []SQLPoolSchema { + if page.spslr.IsEmpty() { + return nil + } + return *page.spslr.Value +} + +// Creates a new instance of the SQLPoolSchemaListResultPage type. +func NewSQLPoolSchemaListResultPage(getNextPage func(context.Context, SQLPoolSchemaListResult) (SQLPoolSchemaListResult, error)) SQLPoolSchemaListResultPage { + return SQLPoolSchemaListResultPage{fn: getNextPage} +} + +// SQLPoolsCreateFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type SQLPoolsCreateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *SQLPoolsCreateFuture) Result(client SQLPoolsClient) (sp SQLPool, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolsCreateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("synapse.SQLPoolsCreateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if sp.Response.Response, err = future.GetResult(sender); err == nil && sp.Response.Response.StatusCode != http.StatusNoContent { + sp, err = client.CreateResponder(sp.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolsCreateFuture", "Result", sp.Response.Response, "Failure responding to request") + } + } + return +} + +// SQLPoolsDeleteFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type SQLPoolsDeleteFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *SQLPoolsDeleteFuture) Result(client SQLPoolsClient) (so SetObject, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("synapse.SQLPoolsDeleteFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if so.Response.Response, err = future.GetResult(sender); err == nil && so.Response.Response.StatusCode != http.StatusNoContent { + so, err = client.DeleteResponder(so.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolsDeleteFuture", "Result", so.Response.Response, "Failure responding to request") + } + } + return +} + +// SQLPoolSecurityAlertPolicy a Sql pool security alert policy. +type SQLPoolSecurityAlertPolicy struct { + autorest.Response `json:"-"` + // SecurityAlertPolicyProperties - Resource properties. + *SecurityAlertPolicyProperties `json:"properties,omitempty"` + // ID - READ-ONLY; Fully qualified resource Id for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; The name of the resource + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts. + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for SQLPoolSecurityAlertPolicy. +func (spsap SQLPoolSecurityAlertPolicy) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if spsap.SecurityAlertPolicyProperties != nil { + objectMap["properties"] = spsap.SecurityAlertPolicyProperties + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for SQLPoolSecurityAlertPolicy struct. +func (spsap *SQLPoolSecurityAlertPolicy) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var securityAlertPolicyProperties SecurityAlertPolicyProperties + err = json.Unmarshal(*v, &securityAlertPolicyProperties) + if err != nil { + return err + } + spsap.SecurityAlertPolicyProperties = &securityAlertPolicyProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + spsap.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + spsap.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + spsap.Type = &typeVar + } + } + } + + return nil +} + +// SQLPoolsPauseFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type SQLPoolsPauseFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *SQLPoolsPauseFuture) Result(client SQLPoolsClient) (so SetObject, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolsPauseFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("synapse.SQLPoolsPauseFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if so.Response.Response, err = future.GetResult(sender); err == nil && so.Response.Response.StatusCode != http.StatusNoContent { + so, err = client.PauseResponder(so.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolsPauseFuture", "Result", so.Response.Response, "Failure responding to request") + } + } + return +} + +// SQLPoolsResumeFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type SQLPoolsResumeFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *SQLPoolsResumeFuture) Result(client SQLPoolsClient) (so SetObject, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolsResumeFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("synapse.SQLPoolsResumeFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if so.Response.Response, err = future.GetResult(sender); err == nil && so.Response.Response.StatusCode != http.StatusNoContent { + so, err = client.ResumeResponder(so.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolsResumeFuture", "Result", so.Response.Response, "Failure responding to request") + } + } + return +} + +// SQLPoolTable a Sql pool table resource. +type SQLPoolTable struct { + // ID - READ-ONLY; Fully qualified resource Id for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; The name of the resource + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts. + Type *string `json:"type,omitempty"` +} + +// SQLPoolTableListResult a list of Sql pool tables. +type SQLPoolTableListResult struct { + autorest.Response `json:"-"` + // Value - READ-ONLY; Array of results. + Value *[]SQLPoolTable `json:"value,omitempty"` + // NextLink - READ-ONLY; Link to retrieve next page of results. + NextLink *string `json:"nextLink,omitempty"` +} + +// SQLPoolTableListResultIterator provides access to a complete listing of SQLPoolTable values. +type SQLPoolTableListResultIterator struct { + i int + page SQLPoolTableListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *SQLPoolTableListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SQLPoolTableListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *SQLPoolTableListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter SQLPoolTableListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter SQLPoolTableListResultIterator) Response() SQLPoolTableListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter SQLPoolTableListResultIterator) Value() SQLPoolTable { + if !iter.page.NotDone() { + return SQLPoolTable{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the SQLPoolTableListResultIterator type. +func NewSQLPoolTableListResultIterator(page SQLPoolTableListResultPage) SQLPoolTableListResultIterator { + return SQLPoolTableListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (sptlr SQLPoolTableListResult) IsEmpty() bool { + return sptlr.Value == nil || len(*sptlr.Value) == 0 +} + +// sQLPoolTableListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (sptlr SQLPoolTableListResult) sQLPoolTableListResultPreparer(ctx context.Context) (*http.Request, error) { + if sptlr.NextLink == nil || len(to.String(sptlr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(sptlr.NextLink))) +} + +// SQLPoolTableListResultPage contains a page of SQLPoolTable values. +type SQLPoolTableListResultPage struct { + fn func(context.Context, SQLPoolTableListResult) (SQLPoolTableListResult, error) + sptlr SQLPoolTableListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *SQLPoolTableListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SQLPoolTableListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.sptlr) + if err != nil { + return err + } + page.sptlr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *SQLPoolTableListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page SQLPoolTableListResultPage) NotDone() bool { + return !page.sptlr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page SQLPoolTableListResultPage) Response() SQLPoolTableListResult { + return page.sptlr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page SQLPoolTableListResultPage) Values() []SQLPoolTable { + if page.sptlr.IsEmpty() { + return nil + } + return *page.sptlr.Value +} + +// Creates a new instance of the SQLPoolTableListResultPage type. +func NewSQLPoolTableListResultPage(getNextPage func(context.Context, SQLPoolTableListResult) (SQLPoolTableListResult, error)) SQLPoolTableListResultPage { + return SQLPoolTableListResultPage{fn: getNextPage} +} + +// SQLPoolUsage the Sql pool usages. +type SQLPoolUsage struct { + // Name - READ-ONLY; The name of the usage metric. + Name *string `json:"name,omitempty"` + // ResourceName - READ-ONLY; The name of the resource. + ResourceName *string `json:"resourceName,omitempty"` + // DisplayName - READ-ONLY; The usage metric display name. + DisplayName *string `json:"displayName,omitempty"` + // CurrentValue - READ-ONLY; The current value of the usage metric. + CurrentValue *float64 `json:"currentValue,omitempty"` + // Limit - READ-ONLY; The current limit of the usage metric. + Limit *float64 `json:"limit,omitempty"` + // Unit - READ-ONLY; The units of the usage metric. + Unit *string `json:"unit,omitempty"` + // NextResetTime - READ-ONLY; The next reset time for the usage metric (ISO8601 format). + NextResetTime *date.Time `json:"nextResetTime,omitempty"` +} + +// SQLPoolUsageListResult the response to a list Sql pool usages request. +type SQLPoolUsageListResult struct { + autorest.Response `json:"-"` + // Value - The list of usages for the Sql pool. + Value *[]SQLPoolUsage `json:"value,omitempty"` + // NextLink - READ-ONLY; Link to retrieve next page of results. + NextLink *string `json:"nextLink,omitempty"` +} + +// SQLPoolUsageListResultIterator provides access to a complete listing of SQLPoolUsage values. +type SQLPoolUsageListResultIterator struct { + i int + page SQLPoolUsageListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *SQLPoolUsageListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SQLPoolUsageListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *SQLPoolUsageListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter SQLPoolUsageListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter SQLPoolUsageListResultIterator) Response() SQLPoolUsageListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter SQLPoolUsageListResultIterator) Value() SQLPoolUsage { + if !iter.page.NotDone() { + return SQLPoolUsage{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the SQLPoolUsageListResultIterator type. +func NewSQLPoolUsageListResultIterator(page SQLPoolUsageListResultPage) SQLPoolUsageListResultIterator { + return SQLPoolUsageListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (spulr SQLPoolUsageListResult) IsEmpty() bool { + return spulr.Value == nil || len(*spulr.Value) == 0 +} + +// sQLPoolUsageListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (spulr SQLPoolUsageListResult) sQLPoolUsageListResultPreparer(ctx context.Context) (*http.Request, error) { + if spulr.NextLink == nil || len(to.String(spulr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(spulr.NextLink))) +} + +// SQLPoolUsageListResultPage contains a page of SQLPoolUsage values. +type SQLPoolUsageListResultPage struct { + fn func(context.Context, SQLPoolUsageListResult) (SQLPoolUsageListResult, error) + spulr SQLPoolUsageListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *SQLPoolUsageListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SQLPoolUsageListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.spulr) + if err != nil { + return err + } + page.spulr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *SQLPoolUsageListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page SQLPoolUsageListResultPage) NotDone() bool { + return !page.spulr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page SQLPoolUsageListResultPage) Response() SQLPoolUsageListResult { + return page.spulr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page SQLPoolUsageListResultPage) Values() []SQLPoolUsage { + if page.spulr.IsEmpty() { + return nil + } + return *page.spulr.Value +} + +// Creates a new instance of the SQLPoolUsageListResultPage type. +func NewSQLPoolUsageListResultPage(getNextPage func(context.Context, SQLPoolUsageListResult) (SQLPoolUsageListResult, error)) SQLPoolUsageListResultPage { + return SQLPoolUsageListResultPage{fn: getNextPage} +} + +// SQLPoolVulnerabilityAssessment a Sql pool vulnerability assessment. +type SQLPoolVulnerabilityAssessment struct { + autorest.Response `json:"-"` + // SQLPoolVulnerabilityAssessmentProperties - Resource properties. + *SQLPoolVulnerabilityAssessmentProperties `json:"properties,omitempty"` + // ID - READ-ONLY; Fully qualified resource Id for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; The name of the resource + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts. + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for SQLPoolVulnerabilityAssessment. +func (spva SQLPoolVulnerabilityAssessment) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if spva.SQLPoolVulnerabilityAssessmentProperties != nil { + objectMap["properties"] = spva.SQLPoolVulnerabilityAssessmentProperties + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for SQLPoolVulnerabilityAssessment struct. +func (spva *SQLPoolVulnerabilityAssessment) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var SQLPoolVulnerabilityAssessmentProperties SQLPoolVulnerabilityAssessmentProperties + err = json.Unmarshal(*v, &SQLPoolVulnerabilityAssessmentProperties) + if err != nil { + return err + } + spva.SQLPoolVulnerabilityAssessmentProperties = &SQLPoolVulnerabilityAssessmentProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + spva.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + spva.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + spva.Type = &typeVar + } + } + } + + return nil +} + +// SQLPoolVulnerabilityAssessmentListResult a list of the Sql pool's vulnerability assessments. +type SQLPoolVulnerabilityAssessmentListResult struct { + autorest.Response `json:"-"` + // Value - READ-ONLY; Array of results. + Value *[]SQLPoolVulnerabilityAssessment `json:"value,omitempty"` + // NextLink - READ-ONLY; Link to retrieve next page of results. + NextLink *string `json:"nextLink,omitempty"` +} + +// SQLPoolVulnerabilityAssessmentListResultIterator provides access to a complete listing of +// SQLPoolVulnerabilityAssessment values. +type SQLPoolVulnerabilityAssessmentListResultIterator struct { + i int + page SQLPoolVulnerabilityAssessmentListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *SQLPoolVulnerabilityAssessmentListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SQLPoolVulnerabilityAssessmentListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *SQLPoolVulnerabilityAssessmentListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter SQLPoolVulnerabilityAssessmentListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter SQLPoolVulnerabilityAssessmentListResultIterator) Response() SQLPoolVulnerabilityAssessmentListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter SQLPoolVulnerabilityAssessmentListResultIterator) Value() SQLPoolVulnerabilityAssessment { + if !iter.page.NotDone() { + return SQLPoolVulnerabilityAssessment{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the SQLPoolVulnerabilityAssessmentListResultIterator type. +func NewSQLPoolVulnerabilityAssessmentListResultIterator(page SQLPoolVulnerabilityAssessmentListResultPage) SQLPoolVulnerabilityAssessmentListResultIterator { + return SQLPoolVulnerabilityAssessmentListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (spvalr SQLPoolVulnerabilityAssessmentListResult) IsEmpty() bool { + return spvalr.Value == nil || len(*spvalr.Value) == 0 +} + +// sQLPoolVulnerabilityAssessmentListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (spvalr SQLPoolVulnerabilityAssessmentListResult) sQLPoolVulnerabilityAssessmentListResultPreparer(ctx context.Context) (*http.Request, error) { + if spvalr.NextLink == nil || len(to.String(spvalr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(spvalr.NextLink))) +} + +// SQLPoolVulnerabilityAssessmentListResultPage contains a page of SQLPoolVulnerabilityAssessment values. +type SQLPoolVulnerabilityAssessmentListResultPage struct { + fn func(context.Context, SQLPoolVulnerabilityAssessmentListResult) (SQLPoolVulnerabilityAssessmentListResult, error) + spvalr SQLPoolVulnerabilityAssessmentListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *SQLPoolVulnerabilityAssessmentListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SQLPoolVulnerabilityAssessmentListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.spvalr) + if err != nil { + return err + } + page.spvalr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *SQLPoolVulnerabilityAssessmentListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page SQLPoolVulnerabilityAssessmentListResultPage) NotDone() bool { + return !page.spvalr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page SQLPoolVulnerabilityAssessmentListResultPage) Response() SQLPoolVulnerabilityAssessmentListResult { + return page.spvalr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page SQLPoolVulnerabilityAssessmentListResultPage) Values() []SQLPoolVulnerabilityAssessment { + if page.spvalr.IsEmpty() { + return nil + } + return *page.spvalr.Value +} + +// Creates a new instance of the SQLPoolVulnerabilityAssessmentListResultPage type. +func NewSQLPoolVulnerabilityAssessmentListResultPage(getNextPage func(context.Context, SQLPoolVulnerabilityAssessmentListResult) (SQLPoolVulnerabilityAssessmentListResult, error)) SQLPoolVulnerabilityAssessmentListResultPage { + return SQLPoolVulnerabilityAssessmentListResultPage{fn: getNextPage} +} + +// SQLPoolVulnerabilityAssessmentProperties properties of a Sql pool Vulnerability Assessment. +type SQLPoolVulnerabilityAssessmentProperties struct { + // StorageContainerPath - A blob storage container path to hold the scan results (e.g. https://myStorage.blob.core.windows.net/VaScans/). It is required if server level vulnerability assessment policy doesn't set + StorageContainerPath *string `json:"storageContainerPath,omitempty"` + // StorageContainerSasKey - A shared access signature (SAS Key) that has write access to the blob container specified in 'storageContainerPath' parameter. If 'storageAccountAccessKey' isn't specified, StorageContainerSasKey is required. + StorageContainerSasKey *string `json:"storageContainerSasKey,omitempty"` + // StorageAccountAccessKey - Specifies the identifier key of the storage account for vulnerability assessment scan results. If 'StorageContainerSasKey' isn't specified, storageAccountAccessKey is required. + StorageAccountAccessKey *string `json:"storageAccountAccessKey,omitempty"` + // RecurringScans - The recurring scans settings + RecurringScans *VulnerabilityAssessmentRecurringScansProperties `json:"recurringScans,omitempty"` +} + +// SQLPoolVulnerabilityAssessmentRuleBaseline a Sql pool vulnerability assessment rule baseline. +type SQLPoolVulnerabilityAssessmentRuleBaseline struct { + autorest.Response `json:"-"` + // SQLPoolVulnerabilityAssessmentRuleBaselineProperties - Resource properties. + *SQLPoolVulnerabilityAssessmentRuleBaselineProperties `json:"properties,omitempty"` + // ID - READ-ONLY; Fully qualified resource Id for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; The name of the resource + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts. + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for SQLPoolVulnerabilityAssessmentRuleBaseline. +func (spvarb SQLPoolVulnerabilityAssessmentRuleBaseline) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if spvarb.SQLPoolVulnerabilityAssessmentRuleBaselineProperties != nil { + objectMap["properties"] = spvarb.SQLPoolVulnerabilityAssessmentRuleBaselineProperties + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for SQLPoolVulnerabilityAssessmentRuleBaseline struct. +func (spvarb *SQLPoolVulnerabilityAssessmentRuleBaseline) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var SQLPoolVulnerabilityAssessmentRuleBaselineProperties SQLPoolVulnerabilityAssessmentRuleBaselineProperties + err = json.Unmarshal(*v, &SQLPoolVulnerabilityAssessmentRuleBaselineProperties) + if err != nil { + return err + } + spvarb.SQLPoolVulnerabilityAssessmentRuleBaselineProperties = &SQLPoolVulnerabilityAssessmentRuleBaselineProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + spvarb.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + spvarb.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + spvarb.Type = &typeVar + } + } + } + + return nil +} + +// SQLPoolVulnerabilityAssessmentRuleBaselineItem properties for an Sql pool vulnerability assessment rule +// baseline's result. +type SQLPoolVulnerabilityAssessmentRuleBaselineItem struct { + // Result - The rule baseline result + Result *[]string `json:"result,omitempty"` +} + +// SQLPoolVulnerabilityAssessmentRuleBaselineProperties properties of a Sql pool vulnerability assessment +// rule baseline. +type SQLPoolVulnerabilityAssessmentRuleBaselineProperties struct { + // BaselineResults - The rule baseline result + BaselineResults *[]SQLPoolVulnerabilityAssessmentRuleBaselineItem `json:"baselineResults,omitempty"` +} + +// SQLPoolVulnerabilityAssessmentScanExportProperties properties of the export operation's result. +type SQLPoolVulnerabilityAssessmentScanExportProperties struct { + // ExportedReportLocation - READ-ONLY; Location of the exported report (e.g. https://myStorage.blob.core.windows.net/VaScans/scans/serverName/databaseName/scan_scanId.xlsx). + ExportedReportLocation *string `json:"exportedReportLocation,omitempty"` +} + +// SQLPoolVulnerabilityAssessmentScansExport a Sql pool Vulnerability Assessment scan export resource. +type SQLPoolVulnerabilityAssessmentScansExport struct { + autorest.Response `json:"-"` + // SQLPoolVulnerabilityAssessmentScanExportProperties - Resource properties. + *SQLPoolVulnerabilityAssessmentScanExportProperties `json:"properties,omitempty"` + // ID - READ-ONLY; Fully qualified resource Id for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; The name of the resource + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts. + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for SQLPoolVulnerabilityAssessmentScansExport. +func (spvase SQLPoolVulnerabilityAssessmentScansExport) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if spvase.SQLPoolVulnerabilityAssessmentScanExportProperties != nil { + objectMap["properties"] = spvase.SQLPoolVulnerabilityAssessmentScanExportProperties + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for SQLPoolVulnerabilityAssessmentScansExport struct. +func (spvase *SQLPoolVulnerabilityAssessmentScansExport) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var SQLPoolVulnerabilityAssessmentScanExportProperties SQLPoolVulnerabilityAssessmentScanExportProperties + err = json.Unmarshal(*v, &SQLPoolVulnerabilityAssessmentScanExportProperties) + if err != nil { + return err + } + spvase.SQLPoolVulnerabilityAssessmentScanExportProperties = &SQLPoolVulnerabilityAssessmentScanExportProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + spvase.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + spvase.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + spvase.Type = &typeVar + } + } + } + + return nil +} + +// SQLPoolVulnerabilityAssessmentScansInitiateScanFuture an abstraction for monitoring and retrieving the +// results of a long-running operation. +type SQLPoolVulnerabilityAssessmentScansInitiateScanFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *SQLPoolVulnerabilityAssessmentScansInitiateScanFuture) Result(client SQLPoolVulnerabilityAssessmentScansClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolVulnerabilityAssessmentScansInitiateScanFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("synapse.SQLPoolVulnerabilityAssessmentScansInitiateScanFuture") + return + } + ar.Response = future.Response() + return +} + +// TopQueries a database query. +type TopQueries struct { + // AggregationFunction - READ-ONLY; The function that is used to aggregate each query's metrics. Possible values include: 'Min', 'Max', 'Avg', 'Sum' + AggregationFunction QueryAggregationFunction `json:"aggregationFunction,omitempty"` + // ExecutionType - READ-ONLY; The execution type that is used to filter the query instances that are returned. Possible values include: 'Any', 'Regular', 'Irregular', 'Aborted', 'Exception' + ExecutionType QueryExecutionType `json:"executionType,omitempty"` + // IntervalType - READ-ONLY; The duration of the interval (ISO8601 duration format). + IntervalType *string `json:"intervalType,omitempty"` + // NumberOfTopQueries - READ-ONLY; The number of requested queries. + NumberOfTopQueries *float64 `json:"numberOfTopQueries,omitempty"` + // ObservationStartTime - READ-ONLY; The start time for queries that are returned (ISO8601 format) + ObservationStartTime *date.Time `json:"observationStartTime,omitempty"` + // ObservationEndTime - READ-ONLY; The end time for queries that are returned (ISO8601 format) + ObservationEndTime *date.Time `json:"observationEndTime,omitempty"` + // ObservedMetric - READ-ONLY; The type of metric to use for ordering the top metrics. Possible values include: 'CPU', 'Io', 'Logio', 'Duration', 'ExecutionCount' + ObservedMetric QueryObservedMetricType `json:"observedMetric,omitempty"` + // Queries - READ-ONLY; The list of queries. + Queries *[]QueryStatistic `json:"queries,omitempty"` +} + +// TopQueriesListResult represents the response to a get top queries request. +type TopQueriesListResult struct { + // Value - The list of top queries. + Value *[]TopQueries `json:"value,omitempty"` +} + +// TrackedResource the resource model definition for a ARM tracked top level resource +type TrackedResource struct { + // Tags - Resource tags. + Tags map[string]*string `json:"tags"` + // Location - The geo-location where the resource lives + Location *string `json:"location,omitempty"` + // ID - READ-ONLY; Fully qualified resource Id for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; The name of the resource + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts. + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for TrackedResource. +func (tr TrackedResource) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if tr.Tags != nil { + objectMap["tags"] = tr.Tags + } + if tr.Location != nil { + objectMap["location"] = tr.Location + } + return json.Marshal(objectMap) +} + +// TransparentDataEncryption represents a Sql pool transparent data encryption configuration. +type TransparentDataEncryption struct { + autorest.Response `json:"-"` + // Location - READ-ONLY; Resource location. + Location *string `json:"location,omitempty"` + // TransparentDataEncryptionProperties - Represents the properties of the resource. + *TransparentDataEncryptionProperties `json:"properties,omitempty"` + // ID - READ-ONLY; Fully qualified resource Id for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; The name of the resource + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts. + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for TransparentDataEncryption. +func (tde TransparentDataEncryption) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if tde.TransparentDataEncryptionProperties != nil { + objectMap["properties"] = tde.TransparentDataEncryptionProperties + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for TransparentDataEncryption struct. +func (tde *TransparentDataEncryption) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + tde.Location = &location + } + case "properties": + if v != nil { + var transparentDataEncryptionProperties TransparentDataEncryptionProperties + err = json.Unmarshal(*v, &transparentDataEncryptionProperties) + if err != nil { + return err + } + tde.TransparentDataEncryptionProperties = &transparentDataEncryptionProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + tde.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + tde.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + tde.Type = &typeVar + } + } + } + + return nil +} + +// TransparentDataEncryptionProperties represents the properties of a database transparent data encryption. +type TransparentDataEncryptionProperties struct { + // Status - The status of the database transparent data encryption. Possible values include: 'TransparentDataEncryptionStatusEnabled', 'TransparentDataEncryptionStatusDisabled' + Status TransparentDataEncryptionStatus `json:"status,omitempty"` +} + +// VirtualNetworkProfile virtual Network Profile +type VirtualNetworkProfile struct { + // ComputeSubnetID - Subnet ID used for computes in workspace + ComputeSubnetID *string `json:"computeSubnetId,omitempty"` +} + +// VulnerabilityAssessmentRecurringScansProperties properties of a Vulnerability Assessment recurring +// scans. +type VulnerabilityAssessmentRecurringScansProperties struct { + // IsEnabled - Recurring scans state. + IsEnabled *bool `json:"isEnabled,omitempty"` + // EmailSubscriptionAdmins - Specifies that the schedule scan notification will be is sent to the subscription administrators. + EmailSubscriptionAdmins *bool `json:"emailSubscriptionAdmins,omitempty"` + // Emails - Specifies an array of e-mail addresses to which the scan notification is sent. + Emails *[]string `json:"emails,omitempty"` +} + +// VulnerabilityAssessmentScanError properties of a vulnerability assessment scan error. +type VulnerabilityAssessmentScanError struct { + // Code - READ-ONLY; The error code. + Code *string `json:"code,omitempty"` + // Message - READ-ONLY; The error message. + Message *string `json:"message,omitempty"` +} + +// VulnerabilityAssessmentScanRecord a vulnerability assessment scan record. +type VulnerabilityAssessmentScanRecord struct { + // VulnerabilityAssessmentScanRecordProperties - Resource properties. + *VulnerabilityAssessmentScanRecordProperties `json:"properties,omitempty"` + // ID - READ-ONLY; Fully qualified resource Id for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; The name of the resource + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts. + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for VulnerabilityAssessmentScanRecord. +func (vasr VulnerabilityAssessmentScanRecord) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if vasr.VulnerabilityAssessmentScanRecordProperties != nil { + objectMap["properties"] = vasr.VulnerabilityAssessmentScanRecordProperties + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for VulnerabilityAssessmentScanRecord struct. +func (vasr *VulnerabilityAssessmentScanRecord) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var vulnerabilityAssessmentScanRecordProperties VulnerabilityAssessmentScanRecordProperties + err = json.Unmarshal(*v, &vulnerabilityAssessmentScanRecordProperties) + if err != nil { + return err + } + vasr.VulnerabilityAssessmentScanRecordProperties = &vulnerabilityAssessmentScanRecordProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + vasr.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + vasr.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + vasr.Type = &typeVar + } + } + } + + return nil +} + +// VulnerabilityAssessmentScanRecordListResult a list of vulnerability assessment scan records. +type VulnerabilityAssessmentScanRecordListResult struct { + autorest.Response `json:"-"` + // Value - READ-ONLY; Array of results. + Value *[]VulnerabilityAssessmentScanRecord `json:"value,omitempty"` + // NextLink - READ-ONLY; Link to retrieve next page of results. + NextLink *string `json:"nextLink,omitempty"` +} + +// VulnerabilityAssessmentScanRecordListResultIterator provides access to a complete listing of +// VulnerabilityAssessmentScanRecord values. +type VulnerabilityAssessmentScanRecordListResultIterator struct { + i int + page VulnerabilityAssessmentScanRecordListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *VulnerabilityAssessmentScanRecordListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/VulnerabilityAssessmentScanRecordListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *VulnerabilityAssessmentScanRecordListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter VulnerabilityAssessmentScanRecordListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter VulnerabilityAssessmentScanRecordListResultIterator) Response() VulnerabilityAssessmentScanRecordListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter VulnerabilityAssessmentScanRecordListResultIterator) Value() VulnerabilityAssessmentScanRecord { + if !iter.page.NotDone() { + return VulnerabilityAssessmentScanRecord{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the VulnerabilityAssessmentScanRecordListResultIterator type. +func NewVulnerabilityAssessmentScanRecordListResultIterator(page VulnerabilityAssessmentScanRecordListResultPage) VulnerabilityAssessmentScanRecordListResultIterator { + return VulnerabilityAssessmentScanRecordListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (vasrlr VulnerabilityAssessmentScanRecordListResult) IsEmpty() bool { + return vasrlr.Value == nil || len(*vasrlr.Value) == 0 +} + +// vulnerabilityAssessmentScanRecordListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (vasrlr VulnerabilityAssessmentScanRecordListResult) vulnerabilityAssessmentScanRecordListResultPreparer(ctx context.Context) (*http.Request, error) { + if vasrlr.NextLink == nil || len(to.String(vasrlr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(vasrlr.NextLink))) +} + +// VulnerabilityAssessmentScanRecordListResultPage contains a page of VulnerabilityAssessmentScanRecord +// values. +type VulnerabilityAssessmentScanRecordListResultPage struct { + fn func(context.Context, VulnerabilityAssessmentScanRecordListResult) (VulnerabilityAssessmentScanRecordListResult, error) + vasrlr VulnerabilityAssessmentScanRecordListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *VulnerabilityAssessmentScanRecordListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/VulnerabilityAssessmentScanRecordListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.vasrlr) + if err != nil { + return err + } + page.vasrlr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *VulnerabilityAssessmentScanRecordListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page VulnerabilityAssessmentScanRecordListResultPage) NotDone() bool { + return !page.vasrlr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page VulnerabilityAssessmentScanRecordListResultPage) Response() VulnerabilityAssessmentScanRecordListResult { + return page.vasrlr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page VulnerabilityAssessmentScanRecordListResultPage) Values() []VulnerabilityAssessmentScanRecord { + if page.vasrlr.IsEmpty() { + return nil + } + return *page.vasrlr.Value +} + +// Creates a new instance of the VulnerabilityAssessmentScanRecordListResultPage type. +func NewVulnerabilityAssessmentScanRecordListResultPage(getNextPage func(context.Context, VulnerabilityAssessmentScanRecordListResult) (VulnerabilityAssessmentScanRecordListResult, error)) VulnerabilityAssessmentScanRecordListResultPage { + return VulnerabilityAssessmentScanRecordListResultPage{fn: getNextPage} +} + +// VulnerabilityAssessmentScanRecordProperties properties of a vulnerability assessment scan record. +type VulnerabilityAssessmentScanRecordProperties struct { + // ScanID - READ-ONLY; The scan ID. + ScanID *string `json:"scanId,omitempty"` + // TriggerType - READ-ONLY; The scan trigger type. Possible values include: 'OnDemand', 'Recurring' + TriggerType VulnerabilityAssessmentScanTriggerType `json:"triggerType,omitempty"` + // State - READ-ONLY; The scan status. Possible values include: 'VulnerabilityAssessmentScanStatePassed', 'VulnerabilityAssessmentScanStateFailed', 'VulnerabilityAssessmentScanStateFailedToRun', 'VulnerabilityAssessmentScanStateInProgress' + State VulnerabilityAssessmentScanState `json:"state,omitempty"` + // StartTime - READ-ONLY; The scan start time (UTC). + StartTime *date.Time `json:"startTime,omitempty"` + // EndTime - READ-ONLY; The scan end time (UTC). + EndTime *date.Time `json:"endTime,omitempty"` + // Errors - READ-ONLY; The scan errors. + Errors *[]VulnerabilityAssessmentScanError `json:"errors,omitempty"` + // StorageContainerPath - READ-ONLY; The scan results storage container path. + StorageContainerPath *string `json:"storageContainerPath,omitempty"` + // NumberOfFailedSecurityChecks - READ-ONLY; The number of failed security checks. + NumberOfFailedSecurityChecks *int32 `json:"numberOfFailedSecurityChecks,omitempty"` +} + +// Workspace a workspace +type Workspace struct { + autorest.Response `json:"-"` + // WorkspaceProperties - Workspace resource properties + *WorkspaceProperties `json:"properties,omitempty"` + // Identity - Identity of the workspace + Identity *ManagedIdentity `json:"identity,omitempty"` + // Tags - Resource tags. + Tags map[string]*string `json:"tags"` + // Location - The geo-location where the resource lives + Location *string `json:"location,omitempty"` + // ID - READ-ONLY; Fully qualified resource Id for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; The name of the resource + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts. + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for Workspace. +func (w Workspace) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if w.WorkspaceProperties != nil { + objectMap["properties"] = w.WorkspaceProperties + } + if w.Identity != nil { + objectMap["identity"] = w.Identity + } + if w.Tags != nil { + objectMap["tags"] = w.Tags + } + if w.Location != nil { + objectMap["location"] = w.Location + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for Workspace struct. +func (w *Workspace) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var workspaceProperties WorkspaceProperties + err = json.Unmarshal(*v, &workspaceProperties) + if err != nil { + return err + } + w.WorkspaceProperties = &workspaceProperties + } + case "identity": + if v != nil { + var identity ManagedIdentity + err = json.Unmarshal(*v, &identity) + if err != nil { + return err + } + w.Identity = &identity + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + w.Tags = tags + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + w.Location = &location + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + w.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + w.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + w.Type = &typeVar + } + } + } + + return nil +} + +// WorkspaceAadAdminInfo workspace active directory administrator +type WorkspaceAadAdminInfo struct { + autorest.Response `json:"-"` + // AadAdminProperties - Workspace active directory administrator properties + *AadAdminProperties `json:"properties,omitempty"` + // ID - READ-ONLY; Fully qualified resource Id for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; The name of the resource + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts. + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for WorkspaceAadAdminInfo. +func (waai WorkspaceAadAdminInfo) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if waai.AadAdminProperties != nil { + objectMap["properties"] = waai.AadAdminProperties + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for WorkspaceAadAdminInfo struct. +func (waai *WorkspaceAadAdminInfo) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var aadAdminProperties AadAdminProperties + err = json.Unmarshal(*v, &aadAdminProperties) + if err != nil { + return err + } + waai.AadAdminProperties = &aadAdminProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + waai.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + waai.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + waai.Type = &typeVar + } + } + } + + return nil +} + +// WorkspaceAadAdminsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type WorkspaceAadAdminsCreateOrUpdateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *WorkspaceAadAdminsCreateOrUpdateFuture) Result(client WorkspaceAadAdminsClient) (waai WorkspaceAadAdminInfo, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.WorkspaceAadAdminsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("synapse.WorkspaceAadAdminsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if waai.Response.Response, err = future.GetResult(sender); err == nil && waai.Response.Response.StatusCode != http.StatusNoContent { + waai, err = client.CreateOrUpdateResponder(waai.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.WorkspaceAadAdminsCreateOrUpdateFuture", "Result", waai.Response.Response, "Failure responding to request") + } + } + return +} + +// WorkspaceAadAdminsDeleteFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type WorkspaceAadAdminsDeleteFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *WorkspaceAadAdminsDeleteFuture) Result(client WorkspaceAadAdminsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.WorkspaceAadAdminsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("synapse.WorkspaceAadAdminsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + +// WorkspaceInfoListResult list of workspaces +type WorkspaceInfoListResult struct { + autorest.Response `json:"-"` + // NextLink - Link to the next page of results + NextLink *string `json:"nextLink,omitempty"` + // Value - List of workspaces + Value *[]Workspace `json:"value,omitempty"` +} + +// WorkspaceInfoListResultIterator provides access to a complete listing of Workspace values. +type WorkspaceInfoListResultIterator struct { + i int + page WorkspaceInfoListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *WorkspaceInfoListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/WorkspaceInfoListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *WorkspaceInfoListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter WorkspaceInfoListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter WorkspaceInfoListResultIterator) Response() WorkspaceInfoListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter WorkspaceInfoListResultIterator) Value() Workspace { + if !iter.page.NotDone() { + return Workspace{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the WorkspaceInfoListResultIterator type. +func NewWorkspaceInfoListResultIterator(page WorkspaceInfoListResultPage) WorkspaceInfoListResultIterator { + return WorkspaceInfoListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (wilr WorkspaceInfoListResult) IsEmpty() bool { + return wilr.Value == nil || len(*wilr.Value) == 0 +} + +// workspaceInfoListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (wilr WorkspaceInfoListResult) workspaceInfoListResultPreparer(ctx context.Context) (*http.Request, error) { + if wilr.NextLink == nil || len(to.String(wilr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(wilr.NextLink))) +} + +// WorkspaceInfoListResultPage contains a page of Workspace values. +type WorkspaceInfoListResultPage struct { + fn func(context.Context, WorkspaceInfoListResult) (WorkspaceInfoListResult, error) + wilr WorkspaceInfoListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *WorkspaceInfoListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/WorkspaceInfoListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.wilr) + if err != nil { + return err + } + page.wilr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *WorkspaceInfoListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page WorkspaceInfoListResultPage) NotDone() bool { + return !page.wilr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page WorkspaceInfoListResultPage) Response() WorkspaceInfoListResult { + return page.wilr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page WorkspaceInfoListResultPage) Values() []Workspace { + if page.wilr.IsEmpty() { + return nil + } + return *page.wilr.Value +} + +// Creates a new instance of the WorkspaceInfoListResultPage type. +func NewWorkspaceInfoListResultPage(getNextPage func(context.Context, WorkspaceInfoListResult) (WorkspaceInfoListResult, error)) WorkspaceInfoListResultPage { + return WorkspaceInfoListResultPage{fn: getNextPage} +} + +// WorkspacePatchInfo workspace patch details +type WorkspacePatchInfo struct { + // Tags - Resource tags + Tags map[string]*string `json:"tags"` + // Identity - The identity of the workspace + Identity *ManagedIdentity `json:"identity,omitempty"` + // WorkspacePatchProperties - Workspace patch properties + *WorkspacePatchProperties `json:"properties,omitempty"` +} + +// MarshalJSON is the custom marshaler for WorkspacePatchInfo. +func (wpi WorkspacePatchInfo) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if wpi.Tags != nil { + objectMap["tags"] = wpi.Tags + } + if wpi.Identity != nil { + objectMap["identity"] = wpi.Identity + } + if wpi.WorkspacePatchProperties != nil { + objectMap["properties"] = wpi.WorkspacePatchProperties + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for WorkspacePatchInfo struct. +func (wpi *WorkspacePatchInfo) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + wpi.Tags = tags + } + case "identity": + if v != nil { + var identity ManagedIdentity + err = json.Unmarshal(*v, &identity) + if err != nil { + return err + } + wpi.Identity = &identity + } + case "properties": + if v != nil { + var workspacePatchProperties WorkspacePatchProperties + err = json.Unmarshal(*v, &workspacePatchProperties) + if err != nil { + return err + } + wpi.WorkspacePatchProperties = &workspacePatchProperties + } + } + } + + return nil +} + +// WorkspacePatchProperties workspace patch properties +type WorkspacePatchProperties struct { + // SQLAdministratorLoginPassword - SQL administrator login password + SQLAdministratorLoginPassword *string `json:"sqlAdministratorLoginPassword,omitempty"` + // ProvisioningState - READ-ONLY; Resource provisioning state + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// WorkspaceProperties workspace properties +type WorkspaceProperties struct { + // DefaultDataLakeStorage - Workspace default data lake storage account details + DefaultDataLakeStorage *DataLakeStorageAccountDetails `json:"defaultDataLakeStorage,omitempty"` + // SQLAdministratorLoginPassword - SQL administrator login password + SQLAdministratorLoginPassword *string `json:"sqlAdministratorLoginPassword,omitempty"` + // ManagedResourceGroupName - READ-ONLY; Workspace managed resource group + ManagedResourceGroupName *string `json:"managedResourceGroupName,omitempty"` + // ProvisioningState - READ-ONLY; Resource provisioning state + ProvisioningState *string `json:"provisioningState,omitempty"` + // SQLAdministratorLogin - Login for workspace SQL active directory administrator + SQLAdministratorLogin *string `json:"sqlAdministratorLogin,omitempty"` + // VirtualNetworkProfile - Virtual Network profile + VirtualNetworkProfile *VirtualNetworkProfile `json:"virtualNetworkProfile,omitempty"` + // ConnectivityEndpoints - Connectivity endpoints + ConnectivityEndpoints map[string]*string `json:"connectivityEndpoints"` +} + +// MarshalJSON is the custom marshaler for WorkspaceProperties. +func (wp WorkspaceProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if wp.DefaultDataLakeStorage != nil { + objectMap["defaultDataLakeStorage"] = wp.DefaultDataLakeStorage + } + if wp.SQLAdministratorLoginPassword != nil { + objectMap["sqlAdministratorLoginPassword"] = wp.SQLAdministratorLoginPassword + } + if wp.SQLAdministratorLogin != nil { + objectMap["sqlAdministratorLogin"] = wp.SQLAdministratorLogin + } + if wp.VirtualNetworkProfile != nil { + objectMap["virtualNetworkProfile"] = wp.VirtualNetworkProfile + } + if wp.ConnectivityEndpoints != nil { + objectMap["connectivityEndpoints"] = wp.ConnectivityEndpoints + } + return json.Marshal(objectMap) +} + +// WorkspacesCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type WorkspacesCreateOrUpdateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *WorkspacesCreateOrUpdateFuture) Result(client WorkspacesClient) (w Workspace, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.WorkspacesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("synapse.WorkspacesCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if w.Response.Response, err = future.GetResult(sender); err == nil && w.Response.Response.StatusCode != http.StatusNoContent { + w, err = client.CreateOrUpdateResponder(w.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.WorkspacesCreateOrUpdateFuture", "Result", w.Response.Response, "Failure responding to request") + } + } + return +} + +// WorkspacesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type WorkspacesDeleteFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *WorkspacesDeleteFuture) Result(client WorkspacesClient) (so SetObject, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.WorkspacesDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("synapse.WorkspacesDeleteFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if so.Response.Response, err = future.GetResult(sender); err == nil && so.Response.Response.StatusCode != http.StatusNoContent { + so, err = client.DeleteResponder(so.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.WorkspacesDeleteFuture", "Result", so.Response.Response, "Failure responding to request") + } + } + return +} + +// WorkspacesUpdateFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type WorkspacesUpdateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *WorkspacesUpdateFuture) Result(client WorkspacesClient) (w Workspace, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.WorkspacesUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("synapse.WorkspacesUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if w.Response.Response, err = future.GetResult(sender); err == nil && w.Response.Response.StatusCode != http.StatusNoContent { + w, err = client.UpdateResponder(w.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.WorkspacesUpdateFuture", "Result", w.Response.Response, "Failure responding to request") + } + } + return +} diff --git a/services/preview/synapse/mgmt/2019-06-01-preview/synapse/operations.go b/services/preview/synapse/mgmt/2019-06-01-preview/synapse/operations.go new file mode 100644 index 000000000000..40f7dea1419a --- /dev/null +++ b/services/preview/synapse/mgmt/2019-06-01-preview/synapse/operations.go @@ -0,0 +1,360 @@ +package synapse + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// OperationsClient is the azure Synapse Analytics Management Client +type OperationsClient struct { + BaseClient +} + +// NewOperationsClient creates an instance of the OperationsClient client. +func NewOperationsClient(subscriptionID string) OperationsClient { + return NewOperationsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewOperationsClientWithBaseURI creates an instance of the OperationsClient client using a custom endpoint. Use this +// when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). +func NewOperationsClientWithBaseURI(baseURI string, subscriptionID string) OperationsClient { + return OperationsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CheckNameAvailability check whether a workspace name is available +// Parameters: +// request - the check request +func (client OperationsClient) CheckNameAvailability(ctx context.Context, request CheckNameAvailabilityRequest) (result CheckNameAvailabilityResponse, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/OperationsClient.CheckNameAvailability") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("synapse.OperationsClient", "CheckNameAvailability", err.Error()) + } + + req, err := client.CheckNameAvailabilityPreparer(ctx, request) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.OperationsClient", "CheckNameAvailability", nil, "Failure preparing request") + return + } + + resp, err := client.CheckNameAvailabilitySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "synapse.OperationsClient", "CheckNameAvailability", resp, "Failure sending request") + return + } + + result, err = client.CheckNameAvailabilityResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.OperationsClient", "CheckNameAvailability", resp, "Failure responding to request") + } + + return +} + +// CheckNameAvailabilityPreparer prepares the CheckNameAvailability request. +func (client OperationsClient) CheckNameAvailabilityPreparer(ctx context.Context, request CheckNameAvailabilityRequest) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-06-01-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Synapse/checkNameAvailability", pathParameters), + autorest.WithJSON(request), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CheckNameAvailabilitySender sends the CheckNameAvailability request. The method will close the +// http.Response Body if it receives an error. +func (client OperationsClient) CheckNameAvailabilitySender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// CheckNameAvailabilityResponder handles the response to the CheckNameAvailability request. The method always +// closes the http.Response Body. +func (client OperationsClient) CheckNameAvailabilityResponder(resp *http.Response) (result CheckNameAvailabilityResponse, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetAzureAsyncHeaderResult get the status of an operation +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// workspaceName - the name of the workspace +// operationID - operation ID +func (client OperationsClient) GetAzureAsyncHeaderResult(ctx context.Context, resourceGroupName string, workspaceName string, operationID string) (result SetObject, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/OperationsClient.GetAzureAsyncHeaderResult") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("synapse.OperationsClient", "GetAzureAsyncHeaderResult", err.Error()) + } + + req, err := client.GetAzureAsyncHeaderResultPreparer(ctx, resourceGroupName, workspaceName, operationID) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.OperationsClient", "GetAzureAsyncHeaderResult", nil, "Failure preparing request") + return + } + + resp, err := client.GetAzureAsyncHeaderResultSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "synapse.OperationsClient", "GetAzureAsyncHeaderResult", resp, "Failure sending request") + return + } + + result, err = client.GetAzureAsyncHeaderResultResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.OperationsClient", "GetAzureAsyncHeaderResult", resp, "Failure responding to request") + } + + return +} + +// GetAzureAsyncHeaderResultPreparer prepares the GetAzureAsyncHeaderResult request. +func (client OperationsClient) GetAzureAsyncHeaderResultPreparer(ctx context.Context, resourceGroupName string, workspaceName string, operationID string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "operationId": autorest.Encode("path", operationID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "workspaceName": autorest.Encode("path", workspaceName), + } + + const APIVersion = "2019-06-01-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/operationStatuses/{operationId}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetAzureAsyncHeaderResultSender sends the GetAzureAsyncHeaderResult request. The method will close the +// http.Response Body if it receives an error. +func (client OperationsClient) GetAzureAsyncHeaderResultSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetAzureAsyncHeaderResultResponder handles the response to the GetAzureAsyncHeaderResult request. The method always +// closes the http.Response Body. +func (client OperationsClient) GetAzureAsyncHeaderResultResponder(resp *http.Response) (result SetObject, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNotFound, http.StatusInternalServerError), + autorest.ByUnmarshallingJSON(&result.Value), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetLocationHeaderResult get the result of an operation +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// workspaceName - the name of the workspace +// operationID - operation ID +func (client OperationsClient) GetLocationHeaderResult(ctx context.Context, resourceGroupName string, workspaceName string, operationID string) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/OperationsClient.GetLocationHeaderResult") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("synapse.OperationsClient", "GetLocationHeaderResult", err.Error()) + } + + req, err := client.GetLocationHeaderResultPreparer(ctx, resourceGroupName, workspaceName, operationID) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.OperationsClient", "GetLocationHeaderResult", nil, "Failure preparing request") + return + } + + resp, err := client.GetLocationHeaderResultSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "synapse.OperationsClient", "GetLocationHeaderResult", resp, "Failure sending request") + return + } + + result, err = client.GetLocationHeaderResultResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.OperationsClient", "GetLocationHeaderResult", resp, "Failure responding to request") + } + + return +} + +// GetLocationHeaderResultPreparer prepares the GetLocationHeaderResult request. +func (client OperationsClient) GetLocationHeaderResultPreparer(ctx context.Context, resourceGroupName string, workspaceName string, operationID string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "operationId": autorest.Encode("path", operationID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "workspaceName": autorest.Encode("path", workspaceName), + } + + const APIVersion = "2019-06-01-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/operationResults/{operationId}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetLocationHeaderResultSender sends the GetLocationHeaderResult request. The method will close the +// http.Response Body if it receives an error. +func (client OperationsClient) GetLocationHeaderResultSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetLocationHeaderResultResponder handles the response to the GetLocationHeaderResult request. The method always +// closes the http.Response Body. +func (client OperationsClient) GetLocationHeaderResultResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// List get all available operations +func (client OperationsClient) List(ctx context.Context) (result ListAvailableRpOperation, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/OperationsClient.List") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.ListPreparer(ctx) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.OperationsClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "synapse.OperationsClient", "List", resp, "Failure sending request") + return + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.OperationsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client OperationsClient) ListPreparer(ctx context.Context) (*http.Request, error) { + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/providers/Microsoft.Synapse/operations")) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client OperationsClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client OperationsClient) ListResponder(resp *http.Response) (result ListAvailableRpOperation, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Value), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/services/preview/synapse/mgmt/2019-06-01-preview/synapse/sqlpoolblobauditingpolicies.go b/services/preview/synapse/mgmt/2019-06-01-preview/synapse/sqlpoolblobauditingpolicies.go new file mode 100644 index 000000000000..5b87612685cb --- /dev/null +++ b/services/preview/synapse/mgmt/2019-06-01-preview/synapse/sqlpoolblobauditingpolicies.go @@ -0,0 +1,226 @@ +package synapse + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// SQLPoolBlobAuditingPoliciesClient is the azure Synapse Analytics Management Client +type SQLPoolBlobAuditingPoliciesClient struct { + BaseClient +} + +// NewSQLPoolBlobAuditingPoliciesClient creates an instance of the SQLPoolBlobAuditingPoliciesClient client. +func NewSQLPoolBlobAuditingPoliciesClient(subscriptionID string) SQLPoolBlobAuditingPoliciesClient { + return NewSQLPoolBlobAuditingPoliciesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewSQLPoolBlobAuditingPoliciesClientWithBaseURI creates an instance of the SQLPoolBlobAuditingPoliciesClient client +// using a custom endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign +// clouds, Azure stack). +func NewSQLPoolBlobAuditingPoliciesClientWithBaseURI(baseURI string, subscriptionID string) SQLPoolBlobAuditingPoliciesClient { + return SQLPoolBlobAuditingPoliciesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates a SQL pool's blob auditing policy. +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// workspaceName - the name of the workspace +// SQLPoolName - SQL pool name +// parameters - the database blob auditing policy. +func (client SQLPoolBlobAuditingPoliciesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string, parameters SQLPoolBlobAuditingPolicy) (result SQLPoolBlobAuditingPolicy, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SQLPoolBlobAuditingPoliciesClient.CreateOrUpdate") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("synapse.SQLPoolBlobAuditingPoliciesClient", "CreateOrUpdate", err.Error()) + } + + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, workspaceName, SQLPoolName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolBlobAuditingPoliciesClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "synapse.SQLPoolBlobAuditingPoliciesClient", "CreateOrUpdate", resp, "Failure sending request") + return + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolBlobAuditingPoliciesClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client SQLPoolBlobAuditingPoliciesClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string, parameters SQLPoolBlobAuditingPolicy) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "blobAuditingPolicyName": autorest.Encode("path", "default"), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "sqlPoolName": autorest.Encode("path", SQLPoolName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "workspaceName": autorest.Encode("path", workspaceName), + } + + const APIVersion = "2019-06-01-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + parameters.Kind = nil + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools/{sqlPoolName}/auditingSettings/{blobAuditingPolicyName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client SQLPoolBlobAuditingPoliciesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client SQLPoolBlobAuditingPoliciesClient) CreateOrUpdateResponder(resp *http.Response) (result SQLPoolBlobAuditingPolicy, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Get get a SQL pool's blob auditing policy. +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// workspaceName - the name of the workspace +// SQLPoolName - SQL pool name +func (client SQLPoolBlobAuditingPoliciesClient) Get(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string) (result SQLPoolBlobAuditingPolicy, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SQLPoolBlobAuditingPoliciesClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("synapse.SQLPoolBlobAuditingPoliciesClient", "Get", err.Error()) + } + + req, err := client.GetPreparer(ctx, resourceGroupName, workspaceName, SQLPoolName) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolBlobAuditingPoliciesClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "synapse.SQLPoolBlobAuditingPoliciesClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolBlobAuditingPoliciesClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client SQLPoolBlobAuditingPoliciesClient) GetPreparer(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "blobAuditingPolicyName": autorest.Encode("path", "default"), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "sqlPoolName": autorest.Encode("path", SQLPoolName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "workspaceName": autorest.Encode("path", workspaceName), + } + + const APIVersion = "2019-06-01-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools/{sqlPoolName}/auditingSettings/{blobAuditingPolicyName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client SQLPoolBlobAuditingPoliciesClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client SQLPoolBlobAuditingPoliciesClient) GetResponder(resp *http.Response) (result SQLPoolBlobAuditingPolicy, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/services/preview/synapse/mgmt/2019-06-01-preview/synapse/sqlpoolconnectionpolicies.go b/services/preview/synapse/mgmt/2019-06-01-preview/synapse/sqlpoolconnectionpolicies.go new file mode 100644 index 000000000000..2971c7de37a6 --- /dev/null +++ b/services/preview/synapse/mgmt/2019-06-01-preview/synapse/sqlpoolconnectionpolicies.go @@ -0,0 +1,133 @@ +package synapse + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// SQLPoolConnectionPoliciesClient is the azure Synapse Analytics Management Client +type SQLPoolConnectionPoliciesClient struct { + BaseClient +} + +// NewSQLPoolConnectionPoliciesClient creates an instance of the SQLPoolConnectionPoliciesClient client. +func NewSQLPoolConnectionPoliciesClient(subscriptionID string) SQLPoolConnectionPoliciesClient { + return NewSQLPoolConnectionPoliciesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewSQLPoolConnectionPoliciesClientWithBaseURI creates an instance of the SQLPoolConnectionPoliciesClient client +// using a custom endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign +// clouds, Azure stack). +func NewSQLPoolConnectionPoliciesClientWithBaseURI(baseURI string, subscriptionID string) SQLPoolConnectionPoliciesClient { + return SQLPoolConnectionPoliciesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Get get a Sql pool's connection policy, which is used with table auditing. +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// workspaceName - the name of the workspace +// SQLPoolName - SQL pool name +func (client SQLPoolConnectionPoliciesClient) Get(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string) (result SQLPoolConnectionPolicy, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SQLPoolConnectionPoliciesClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("synapse.SQLPoolConnectionPoliciesClient", "Get", err.Error()) + } + + req, err := client.GetPreparer(ctx, resourceGroupName, workspaceName, SQLPoolName) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolConnectionPoliciesClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "synapse.SQLPoolConnectionPoliciesClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolConnectionPoliciesClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client SQLPoolConnectionPoliciesClient) GetPreparer(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "connectionPolicyName": autorest.Encode("path", "default"), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "sqlPoolName": autorest.Encode("path", SQLPoolName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "workspaceName": autorest.Encode("path", workspaceName), + } + + const APIVersion = "2019-06-01-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools/{sqlPoolName}/connectionPolicies/{connectionPolicyName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client SQLPoolConnectionPoliciesClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client SQLPoolConnectionPoliciesClient) GetResponder(resp *http.Response) (result SQLPoolConnectionPolicy, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/services/preview/synapse/mgmt/2019-06-01-preview/synapse/sqlpooldatawarehouseuseractivities.go b/services/preview/synapse/mgmt/2019-06-01-preview/synapse/sqlpooldatawarehouseuseractivities.go new file mode 100644 index 000000000000..74b4141e95a5 --- /dev/null +++ b/services/preview/synapse/mgmt/2019-06-01-preview/synapse/sqlpooldatawarehouseuseractivities.go @@ -0,0 +1,134 @@ +package synapse + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// SQLPoolDataWarehouseUserActivitiesClient is the azure Synapse Analytics Management Client +type SQLPoolDataWarehouseUserActivitiesClient struct { + BaseClient +} + +// NewSQLPoolDataWarehouseUserActivitiesClient creates an instance of the SQLPoolDataWarehouseUserActivitiesClient +// client. +func NewSQLPoolDataWarehouseUserActivitiesClient(subscriptionID string) SQLPoolDataWarehouseUserActivitiesClient { + return NewSQLPoolDataWarehouseUserActivitiesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewSQLPoolDataWarehouseUserActivitiesClientWithBaseURI creates an instance of the +// SQLPoolDataWarehouseUserActivitiesClient client using a custom endpoint. Use this when interacting with an Azure +// cloud that uses a non-standard base URI (sovereign clouds, Azure stack). +func NewSQLPoolDataWarehouseUserActivitiesClientWithBaseURI(baseURI string, subscriptionID string) SQLPoolDataWarehouseUserActivitiesClient { + return SQLPoolDataWarehouseUserActivitiesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Get gets the user activities of a SQL pool which includes running and suspended queries +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// workspaceName - the name of the workspace +// SQLPoolName - SQL pool name +func (client SQLPoolDataWarehouseUserActivitiesClient) Get(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string) (result DataWarehouseUserActivities, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SQLPoolDataWarehouseUserActivitiesClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("synapse.SQLPoolDataWarehouseUserActivitiesClient", "Get", err.Error()) + } + + req, err := client.GetPreparer(ctx, resourceGroupName, workspaceName, SQLPoolName) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolDataWarehouseUserActivitiesClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "synapse.SQLPoolDataWarehouseUserActivitiesClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolDataWarehouseUserActivitiesClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client SQLPoolDataWarehouseUserActivitiesClient) GetPreparer(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "dataWarehouseUserActivityName": autorest.Encode("path", "current"), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "sqlPoolName": autorest.Encode("path", SQLPoolName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "workspaceName": autorest.Encode("path", workspaceName), + } + + const APIVersion = "2019-06-01-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools/{sqlPoolName}/dataWarehouseUserActivities/{dataWarehouseUserActivityName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client SQLPoolDataWarehouseUserActivitiesClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client SQLPoolDataWarehouseUserActivitiesClient) GetResponder(resp *http.Response) (result DataWarehouseUserActivities, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/services/preview/synapse/mgmt/2019-06-01-preview/synapse/sqlpoolgeobackuppolicies.go b/services/preview/synapse/mgmt/2019-06-01-preview/synapse/sqlpoolgeobackuppolicies.go new file mode 100644 index 000000000000..7edf02057d8c --- /dev/null +++ b/services/preview/synapse/mgmt/2019-06-01-preview/synapse/sqlpoolgeobackuppolicies.go @@ -0,0 +1,133 @@ +package synapse + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// SQLPoolGeoBackupPoliciesClient is the azure Synapse Analytics Management Client +type SQLPoolGeoBackupPoliciesClient struct { + BaseClient +} + +// NewSQLPoolGeoBackupPoliciesClient creates an instance of the SQLPoolGeoBackupPoliciesClient client. +func NewSQLPoolGeoBackupPoliciesClient(subscriptionID string) SQLPoolGeoBackupPoliciesClient { + return NewSQLPoolGeoBackupPoliciesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewSQLPoolGeoBackupPoliciesClientWithBaseURI creates an instance of the SQLPoolGeoBackupPoliciesClient client using +// a custom endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign +// clouds, Azure stack). +func NewSQLPoolGeoBackupPoliciesClientWithBaseURI(baseURI string, subscriptionID string) SQLPoolGeoBackupPoliciesClient { + return SQLPoolGeoBackupPoliciesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Get get the specified SQL pool geo backup policy +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// workspaceName - the name of the workspace +// SQLPoolName - SQL pool name +func (client SQLPoolGeoBackupPoliciesClient) Get(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string) (result GeoBackupPolicy, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SQLPoolGeoBackupPoliciesClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("synapse.SQLPoolGeoBackupPoliciesClient", "Get", err.Error()) + } + + req, err := client.GetPreparer(ctx, resourceGroupName, workspaceName, SQLPoolName) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolGeoBackupPoliciesClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "synapse.SQLPoolGeoBackupPoliciesClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolGeoBackupPoliciesClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client SQLPoolGeoBackupPoliciesClient) GetPreparer(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "geoBackupPolicyName": autorest.Encode("path", "Default"), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "sqlPoolName": autorest.Encode("path", SQLPoolName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "workspaceName": autorest.Encode("path", workspaceName), + } + + const APIVersion = "2019-06-01-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools/{sqlPoolName}/geoBackupPolicies/{geoBackupPolicyName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client SQLPoolGeoBackupPoliciesClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client SQLPoolGeoBackupPoliciesClient) GetResponder(resp *http.Response) (result GeoBackupPolicy, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/services/preview/synapse/mgmt/2019-06-01-preview/synapse/sqlpoolmetadatasyncconfigs.go b/services/preview/synapse/mgmt/2019-06-01-preview/synapse/sqlpoolmetadatasyncconfigs.go new file mode 100644 index 000000000000..505ced48fd0a --- /dev/null +++ b/services/preview/synapse/mgmt/2019-06-01-preview/synapse/sqlpoolmetadatasyncconfigs.go @@ -0,0 +1,223 @@ +package synapse + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// SQLPoolMetadataSyncConfigsClient is the azure Synapse Analytics Management Client +type SQLPoolMetadataSyncConfigsClient struct { + BaseClient +} + +// NewSQLPoolMetadataSyncConfigsClient creates an instance of the SQLPoolMetadataSyncConfigsClient client. +func NewSQLPoolMetadataSyncConfigsClient(subscriptionID string) SQLPoolMetadataSyncConfigsClient { + return NewSQLPoolMetadataSyncConfigsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewSQLPoolMetadataSyncConfigsClientWithBaseURI creates an instance of the SQLPoolMetadataSyncConfigsClient client +// using a custom endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign +// clouds, Azure stack). +func NewSQLPoolMetadataSyncConfigsClientWithBaseURI(baseURI string, subscriptionID string) SQLPoolMetadataSyncConfigsClient { + return SQLPoolMetadataSyncConfigsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Create set the metadata sync configuration for a SQL pool +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// workspaceName - the name of the workspace +// SQLPoolName - SQL pool name +// metadataSyncConfiguration - metadata sync configuration +func (client SQLPoolMetadataSyncConfigsClient) Create(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string, metadataSyncConfiguration MetadataSyncConfig) (result MetadataSyncConfig, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SQLPoolMetadataSyncConfigsClient.Create") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("synapse.SQLPoolMetadataSyncConfigsClient", "Create", err.Error()) + } + + req, err := client.CreatePreparer(ctx, resourceGroupName, workspaceName, SQLPoolName, metadataSyncConfiguration) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolMetadataSyncConfigsClient", "Create", nil, "Failure preparing request") + return + } + + resp, err := client.CreateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "synapse.SQLPoolMetadataSyncConfigsClient", "Create", resp, "Failure sending request") + return + } + + result, err = client.CreateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolMetadataSyncConfigsClient", "Create", resp, "Failure responding to request") + } + + return +} + +// CreatePreparer prepares the Create request. +func (client SQLPoolMetadataSyncConfigsClient) CreatePreparer(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string, metadataSyncConfiguration MetadataSyncConfig) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "sqlPoolName": autorest.Encode("path", SQLPoolName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "workspaceName": autorest.Encode("path", workspaceName), + } + + const APIVersion = "2019-06-01-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools/{sqlPoolName}/metadataSync/config", pathParameters), + autorest.WithJSON(metadataSyncConfiguration), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateSender sends the Create request. The method will close the +// http.Response Body if it receives an error. +func (client SQLPoolMetadataSyncConfigsClient) CreateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// CreateResponder handles the response to the Create request. The method always +// closes the http.Response Body. +func (client SQLPoolMetadataSyncConfigsClient) CreateResponder(resp *http.Response) (result MetadataSyncConfig, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNotFound), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Get get the metadata sync configuration for a SQL pool +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// workspaceName - the name of the workspace +// SQLPoolName - SQL pool name +func (client SQLPoolMetadataSyncConfigsClient) Get(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string) (result MetadataSyncConfig, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SQLPoolMetadataSyncConfigsClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("synapse.SQLPoolMetadataSyncConfigsClient", "Get", err.Error()) + } + + req, err := client.GetPreparer(ctx, resourceGroupName, workspaceName, SQLPoolName) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolMetadataSyncConfigsClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "synapse.SQLPoolMetadataSyncConfigsClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolMetadataSyncConfigsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client SQLPoolMetadataSyncConfigsClient) GetPreparer(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "sqlPoolName": autorest.Encode("path", SQLPoolName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "workspaceName": autorest.Encode("path", workspaceName), + } + + const APIVersion = "2019-06-01-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools/{sqlPoolName}/metadataSync/config", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client SQLPoolMetadataSyncConfigsClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client SQLPoolMetadataSyncConfigsClient) GetResponder(resp *http.Response) (result MetadataSyncConfig, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNotFound), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/services/preview/synapse/mgmt/2019-06-01-preview/synapse/sqlpooloperationresults.go b/services/preview/synapse/mgmt/2019-06-01-preview/synapse/sqlpooloperationresults.go new file mode 100644 index 000000000000..e476d03e4c04 --- /dev/null +++ b/services/preview/synapse/mgmt/2019-06-01-preview/synapse/sqlpooloperationresults.go @@ -0,0 +1,134 @@ +package synapse + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// SQLPoolOperationResultsClient is the azure Synapse Analytics Management Client +type SQLPoolOperationResultsClient struct { + BaseClient +} + +// NewSQLPoolOperationResultsClient creates an instance of the SQLPoolOperationResultsClient client. +func NewSQLPoolOperationResultsClient(subscriptionID string) SQLPoolOperationResultsClient { + return NewSQLPoolOperationResultsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewSQLPoolOperationResultsClientWithBaseURI creates an instance of the SQLPoolOperationResultsClient client using a +// custom endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, +// Azure stack). +func NewSQLPoolOperationResultsClientWithBaseURI(baseURI string, subscriptionID string) SQLPoolOperationResultsClient { + return SQLPoolOperationResultsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// GetLocationHeaderResult get the status of a SQL pool operation +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// workspaceName - the name of the workspace +// SQLPoolName - SQL pool name +// operationID - operation ID +func (client SQLPoolOperationResultsClient) GetLocationHeaderResult(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string, operationID string) (result SetObject, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SQLPoolOperationResultsClient.GetLocationHeaderResult") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("synapse.SQLPoolOperationResultsClient", "GetLocationHeaderResult", err.Error()) + } + + req, err := client.GetLocationHeaderResultPreparer(ctx, resourceGroupName, workspaceName, SQLPoolName, operationID) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolOperationResultsClient", "GetLocationHeaderResult", nil, "Failure preparing request") + return + } + + resp, err := client.GetLocationHeaderResultSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "synapse.SQLPoolOperationResultsClient", "GetLocationHeaderResult", resp, "Failure sending request") + return + } + + result, err = client.GetLocationHeaderResultResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolOperationResultsClient", "GetLocationHeaderResult", resp, "Failure responding to request") + } + + return +} + +// GetLocationHeaderResultPreparer prepares the GetLocationHeaderResult request. +func (client SQLPoolOperationResultsClient) GetLocationHeaderResultPreparer(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string, operationID string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "operationId": autorest.Encode("path", operationID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "sqlPoolName": autorest.Encode("path", SQLPoolName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "workspaceName": autorest.Encode("path", workspaceName), + } + + const APIVersion = "2019-06-01-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools/{sqlPoolName}/operationResults/{operationId}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetLocationHeaderResultSender sends the GetLocationHeaderResult request. The method will close the +// http.Response Body if it receives an error. +func (client SQLPoolOperationResultsClient) GetLocationHeaderResultSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetLocationHeaderResultResponder handles the response to the GetLocationHeaderResult request. The method always +// closes the http.Response Body. +func (client SQLPoolOperationResultsClient) GetLocationHeaderResultResponder(resp *http.Response) (result SetObject, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Value), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/services/preview/synapse/mgmt/2019-06-01-preview/synapse/sqlpooloperations.go b/services/preview/synapse/mgmt/2019-06-01-preview/synapse/sqlpooloperations.go new file mode 100644 index 000000000000..8929da54a14e --- /dev/null +++ b/services/preview/synapse/mgmt/2019-06-01-preview/synapse/sqlpooloperations.go @@ -0,0 +1,170 @@ +package synapse + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// SQLPoolOperationsClient is the azure Synapse Analytics Management Client +type SQLPoolOperationsClient struct { + BaseClient +} + +// NewSQLPoolOperationsClient creates an instance of the SQLPoolOperationsClient client. +func NewSQLPoolOperationsClient(subscriptionID string) SQLPoolOperationsClient { + return NewSQLPoolOperationsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewSQLPoolOperationsClientWithBaseURI creates an instance of the SQLPoolOperationsClient client using a custom +// endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure +// stack). +func NewSQLPoolOperationsClientWithBaseURI(baseURI string, subscriptionID string) SQLPoolOperationsClient { + return SQLPoolOperationsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// List gets a list of operations performed on the SQL pool. +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// workspaceName - the name of the workspace +// SQLPoolName - SQL pool name +func (client SQLPoolOperationsClient) List(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string) (result SQLPoolBlobAuditingPolicySQLPoolOperationListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SQLPoolOperationsClient.List") + defer func() { + sc := -1 + if result.spbapspolr.Response.Response != nil { + sc = result.spbapspolr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("synapse.SQLPoolOperationsClient", "List", err.Error()) + } + + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, resourceGroupName, workspaceName, SQLPoolName) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolOperationsClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.spbapspolr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "synapse.SQLPoolOperationsClient", "List", resp, "Failure sending request") + return + } + + result.spbapspolr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolOperationsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client SQLPoolOperationsClient) ListPreparer(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "sqlPoolName": autorest.Encode("path", SQLPoolName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "workspaceName": autorest.Encode("path", workspaceName), + } + + const APIVersion = "2019-06-01-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools/{sqlPoolName}/operations", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client SQLPoolOperationsClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client SQLPoolOperationsClient) ListResponder(resp *http.Response) (result SQLPoolBlobAuditingPolicySQLPoolOperationListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client SQLPoolOperationsClient) listNextResults(ctx context.Context, lastResults SQLPoolBlobAuditingPolicySQLPoolOperationListResult) (result SQLPoolBlobAuditingPolicySQLPoolOperationListResult, err error) { + req, err := lastResults.sQLPoolBlobAuditingPolicySQLPoolOperationListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "synapse.SQLPoolOperationsClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "synapse.SQLPoolOperationsClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolOperationsClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client SQLPoolOperationsClient) ListComplete(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string) (result SQLPoolBlobAuditingPolicySQLPoolOperationListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SQLPoolOperationsClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx, resourceGroupName, workspaceName, SQLPoolName) + return +} diff --git a/services/preview/synapse/mgmt/2019-06-01-preview/synapse/sqlpoolreplicationlinks.go b/services/preview/synapse/mgmt/2019-06-01-preview/synapse/sqlpoolreplicationlinks.go new file mode 100644 index 000000000000..5a97c67160f7 --- /dev/null +++ b/services/preview/synapse/mgmt/2019-06-01-preview/synapse/sqlpoolreplicationlinks.go @@ -0,0 +1,170 @@ +package synapse + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// SQLPoolReplicationLinksClient is the azure Synapse Analytics Management Client +type SQLPoolReplicationLinksClient struct { + BaseClient +} + +// NewSQLPoolReplicationLinksClient creates an instance of the SQLPoolReplicationLinksClient client. +func NewSQLPoolReplicationLinksClient(subscriptionID string) SQLPoolReplicationLinksClient { + return NewSQLPoolReplicationLinksClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewSQLPoolReplicationLinksClientWithBaseURI creates an instance of the SQLPoolReplicationLinksClient client using a +// custom endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, +// Azure stack). +func NewSQLPoolReplicationLinksClientWithBaseURI(baseURI string, subscriptionID string) SQLPoolReplicationLinksClient { + return SQLPoolReplicationLinksClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// List lists a Sql pool's replication links. +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// workspaceName - the name of the workspace +// SQLPoolName - SQL pool name +func (client SQLPoolReplicationLinksClient) List(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string) (result ReplicationLinkListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SQLPoolReplicationLinksClient.List") + defer func() { + sc := -1 + if result.rllr.Response.Response != nil { + sc = result.rllr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("synapse.SQLPoolReplicationLinksClient", "List", err.Error()) + } + + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, resourceGroupName, workspaceName, SQLPoolName) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolReplicationLinksClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.rllr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "synapse.SQLPoolReplicationLinksClient", "List", resp, "Failure sending request") + return + } + + result.rllr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolReplicationLinksClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client SQLPoolReplicationLinksClient) ListPreparer(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "sqlPoolName": autorest.Encode("path", SQLPoolName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "workspaceName": autorest.Encode("path", workspaceName), + } + + const APIVersion = "2019-06-01-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools/{sqlPoolName}/replicationLinks", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client SQLPoolReplicationLinksClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client SQLPoolReplicationLinksClient) ListResponder(resp *http.Response) (result ReplicationLinkListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client SQLPoolReplicationLinksClient) listNextResults(ctx context.Context, lastResults ReplicationLinkListResult) (result ReplicationLinkListResult, err error) { + req, err := lastResults.replicationLinkListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "synapse.SQLPoolReplicationLinksClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "synapse.SQLPoolReplicationLinksClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolReplicationLinksClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client SQLPoolReplicationLinksClient) ListComplete(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string) (result ReplicationLinkListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SQLPoolReplicationLinksClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx, resourceGroupName, workspaceName, SQLPoolName) + return +} diff --git a/services/preview/synapse/mgmt/2019-06-01-preview/synapse/sqlpoolrestorepoints.go b/services/preview/synapse/mgmt/2019-06-01-preview/synapse/sqlpoolrestorepoints.go new file mode 100644 index 000000000000..e229d1bca231 --- /dev/null +++ b/services/preview/synapse/mgmt/2019-06-01-preview/synapse/sqlpoolrestorepoints.go @@ -0,0 +1,263 @@ +package synapse + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// SQLPoolRestorePointsClient is the azure Synapse Analytics Management Client +type SQLPoolRestorePointsClient struct { + BaseClient +} + +// NewSQLPoolRestorePointsClient creates an instance of the SQLPoolRestorePointsClient client. +func NewSQLPoolRestorePointsClient(subscriptionID string) SQLPoolRestorePointsClient { + return NewSQLPoolRestorePointsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewSQLPoolRestorePointsClientWithBaseURI creates an instance of the SQLPoolRestorePointsClient client using a custom +// endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure +// stack). +func NewSQLPoolRestorePointsClientWithBaseURI(baseURI string, subscriptionID string) SQLPoolRestorePointsClient { + return SQLPoolRestorePointsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Create creates a restore point for a data warehouse. +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// workspaceName - the name of the workspace +// SQLPoolName - SQL pool name +// parameters - the definition for creating the restore point of this Sql pool. +func (client SQLPoolRestorePointsClient) Create(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string, parameters CreateSQLPoolRestorePointDefinition) (result SQLPoolRestorePointsCreateFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SQLPoolRestorePointsClient.Create") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.RestorePointLabel", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + return result, validation.NewError("synapse.SQLPoolRestorePointsClient", "Create", err.Error()) + } + + req, err := client.CreatePreparer(ctx, resourceGroupName, workspaceName, SQLPoolName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolRestorePointsClient", "Create", nil, "Failure preparing request") + return + } + + result, err = client.CreateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolRestorePointsClient", "Create", result.Response(), "Failure sending request") + return + } + + return +} + +// CreatePreparer prepares the Create request. +func (client SQLPoolRestorePointsClient) CreatePreparer(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string, parameters CreateSQLPoolRestorePointDefinition) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "sqlPoolName": autorest.Encode("path", SQLPoolName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "workspaceName": autorest.Encode("path", workspaceName), + } + + const APIVersion = "2019-06-01-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools/{sqlPoolName}/restorePoints", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateSender sends the Create request. The method will close the +// http.Response Body if it receives an error. +func (client SQLPoolRestorePointsClient) CreateSender(req *http.Request) (future SQLPoolRestorePointsCreateFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// CreateResponder handles the response to the Create request. The method always +// closes the http.Response Body. +func (client SQLPoolRestorePointsClient) CreateResponder(resp *http.Response) (result RestorePoint, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List get SQL pool backup information +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// workspaceName - the name of the workspace +// SQLPoolName - SQL pool name +func (client SQLPoolRestorePointsClient) List(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string) (result RestorePointListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SQLPoolRestorePointsClient.List") + defer func() { + sc := -1 + if result.rplr.Response.Response != nil { + sc = result.rplr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("synapse.SQLPoolRestorePointsClient", "List", err.Error()) + } + + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, resourceGroupName, workspaceName, SQLPoolName) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolRestorePointsClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.rplr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "synapse.SQLPoolRestorePointsClient", "List", resp, "Failure sending request") + return + } + + result.rplr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolRestorePointsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client SQLPoolRestorePointsClient) ListPreparer(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "sqlPoolName": autorest.Encode("path", SQLPoolName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "workspaceName": autorest.Encode("path", workspaceName), + } + + const APIVersion = "2019-06-01-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools/{sqlPoolName}/restorePoints", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client SQLPoolRestorePointsClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client SQLPoolRestorePointsClient) ListResponder(resp *http.Response) (result RestorePointListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client SQLPoolRestorePointsClient) listNextResults(ctx context.Context, lastResults RestorePointListResult) (result RestorePointListResult, err error) { + req, err := lastResults.restorePointListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "synapse.SQLPoolRestorePointsClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "synapse.SQLPoolRestorePointsClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolRestorePointsClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client SQLPoolRestorePointsClient) ListComplete(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string) (result RestorePointListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SQLPoolRestorePointsClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx, resourceGroupName, workspaceName, SQLPoolName) + return +} diff --git a/services/preview/synapse/mgmt/2019-06-01-preview/synapse/sqlpools.go b/services/preview/synapse/mgmt/2019-06-01-preview/synapse/sqlpools.go new file mode 100644 index 000000000000..c17d10e5ef48 --- /dev/null +++ b/services/preview/synapse/mgmt/2019-06-01-preview/synapse/sqlpools.go @@ -0,0 +1,793 @@ +package synapse + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// SQLPoolsClient is the azure Synapse Analytics Management Client +type SQLPoolsClient struct { + BaseClient +} + +// NewSQLPoolsClient creates an instance of the SQLPoolsClient client. +func NewSQLPoolsClient(subscriptionID string) SQLPoolsClient { + return NewSQLPoolsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewSQLPoolsClientWithBaseURI creates an instance of the SQLPoolsClient client using a custom endpoint. Use this +// when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). +func NewSQLPoolsClientWithBaseURI(baseURI string, subscriptionID string) SQLPoolsClient { + return SQLPoolsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Create create a SQL pool +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// workspaceName - the name of the workspace +// SQLPoolName - SQL pool name +// SQLPoolInfo - the SQL pool to create +func (client SQLPoolsClient) Create(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string, SQLPoolInfo SQLPool) (result SQLPoolsCreateFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SQLPoolsClient.Create") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("synapse.SQLPoolsClient", "Create", err.Error()) + } + + req, err := client.CreatePreparer(ctx, resourceGroupName, workspaceName, SQLPoolName, SQLPoolInfo) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolsClient", "Create", nil, "Failure preparing request") + return + } + + result, err = client.CreateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolsClient", "Create", result.Response(), "Failure sending request") + return + } + + return +} + +// CreatePreparer prepares the Create request. +func (client SQLPoolsClient) CreatePreparer(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string, SQLPoolInfo SQLPool) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "sqlPoolName": autorest.Encode("path", SQLPoolName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "workspaceName": autorest.Encode("path", workspaceName), + } + + const APIVersion = "2019-06-01-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools/{sqlPoolName}", pathParameters), + autorest.WithJSON(SQLPoolInfo), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateSender sends the Create request. The method will close the +// http.Response Body if it receives an error. +func (client SQLPoolsClient) CreateSender(req *http.Request) (future SQLPoolsCreateFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// CreateResponder handles the response to the Create request. The method always +// closes the http.Response Body. +func (client SQLPoolsClient) CreateResponder(resp *http.Response) (result SQLPool, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNotFound), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete delete a SQL pool +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// workspaceName - the name of the workspace +// SQLPoolName - SQL pool name +func (client SQLPoolsClient) Delete(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string) (result SQLPoolsDeleteFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SQLPoolsClient.Delete") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("synapse.SQLPoolsClient", "Delete", err.Error()) + } + + req, err := client.DeletePreparer(ctx, resourceGroupName, workspaceName, SQLPoolName) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolsClient", "Delete", nil, "Failure preparing request") + return + } + + result, err = client.DeleteSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolsClient", "Delete", result.Response(), "Failure sending request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client SQLPoolsClient) DeletePreparer(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "sqlPoolName": autorest.Encode("path", SQLPoolName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "workspaceName": autorest.Encode("path", workspaceName), + } + + const APIVersion = "2019-06-01-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools/{sqlPoolName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client SQLPoolsClient) DeleteSender(req *http.Request) (future SQLPoolsDeleteFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client SQLPoolsClient) DeleteResponder(resp *http.Response) (result SetObject, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Get get SQL pool properties +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// workspaceName - the name of the workspace +// SQLPoolName - SQL pool name +func (client SQLPoolsClient) Get(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string) (result SQLPool, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SQLPoolsClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("synapse.SQLPoolsClient", "Get", err.Error()) + } + + req, err := client.GetPreparer(ctx, resourceGroupName, workspaceName, SQLPoolName) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolsClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "synapse.SQLPoolsClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client SQLPoolsClient) GetPreparer(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "sqlPoolName": autorest.Encode("path", SQLPoolName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "workspaceName": autorest.Encode("path", workspaceName), + } + + const APIVersion = "2019-06-01-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools/{sqlPoolName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client SQLPoolsClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client SQLPoolsClient) GetResponder(resp *http.Response) (result SQLPool, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListByWorkspace list all SQL pools +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// workspaceName - the name of the workspace +func (client SQLPoolsClient) ListByWorkspace(ctx context.Context, resourceGroupName string, workspaceName string) (result SQLPoolInfoListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SQLPoolsClient.ListByWorkspace") + defer func() { + sc := -1 + if result.spilr.Response.Response != nil { + sc = result.spilr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("synapse.SQLPoolsClient", "ListByWorkspace", err.Error()) + } + + result.fn = client.listByWorkspaceNextResults + req, err := client.ListByWorkspacePreparer(ctx, resourceGroupName, workspaceName) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolsClient", "ListByWorkspace", nil, "Failure preparing request") + return + } + + resp, err := client.ListByWorkspaceSender(req) + if err != nil { + result.spilr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "synapse.SQLPoolsClient", "ListByWorkspace", resp, "Failure sending request") + return + } + + result.spilr, err = client.ListByWorkspaceResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolsClient", "ListByWorkspace", resp, "Failure responding to request") + } + + return +} + +// ListByWorkspacePreparer prepares the ListByWorkspace request. +func (client SQLPoolsClient) ListByWorkspacePreparer(ctx context.Context, resourceGroupName string, workspaceName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "workspaceName": autorest.Encode("path", workspaceName), + } + + const APIVersion = "2019-06-01-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListByWorkspaceSender sends the ListByWorkspace request. The method will close the +// http.Response Body if it receives an error. +func (client SQLPoolsClient) ListByWorkspaceSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListByWorkspaceResponder handles the response to the ListByWorkspace request. The method always +// closes the http.Response Body. +func (client SQLPoolsClient) ListByWorkspaceResponder(resp *http.Response) (result SQLPoolInfoListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listByWorkspaceNextResults retrieves the next set of results, if any. +func (client SQLPoolsClient) listByWorkspaceNextResults(ctx context.Context, lastResults SQLPoolInfoListResult) (result SQLPoolInfoListResult, err error) { + req, err := lastResults.sQLPoolInfoListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "synapse.SQLPoolsClient", "listByWorkspaceNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListByWorkspaceSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "synapse.SQLPoolsClient", "listByWorkspaceNextResults", resp, "Failure sending next results request") + } + result, err = client.ListByWorkspaceResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolsClient", "listByWorkspaceNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListByWorkspaceComplete enumerates all values, automatically crossing page boundaries as required. +func (client SQLPoolsClient) ListByWorkspaceComplete(ctx context.Context, resourceGroupName string, workspaceName string) (result SQLPoolInfoListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SQLPoolsClient.ListByWorkspace") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.ListByWorkspace(ctx, resourceGroupName, workspaceName) + return +} + +// Pause pause a SQL pool +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// workspaceName - the name of the workspace +// SQLPoolName - SQL pool name +func (client SQLPoolsClient) Pause(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string) (result SQLPoolsPauseFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SQLPoolsClient.Pause") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("synapse.SQLPoolsClient", "Pause", err.Error()) + } + + req, err := client.PausePreparer(ctx, resourceGroupName, workspaceName, SQLPoolName) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolsClient", "Pause", nil, "Failure preparing request") + return + } + + result, err = client.PauseSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolsClient", "Pause", result.Response(), "Failure sending request") + return + } + + return +} + +// PausePreparer prepares the Pause request. +func (client SQLPoolsClient) PausePreparer(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "sqlPoolName": autorest.Encode("path", SQLPoolName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "workspaceName": autorest.Encode("path", workspaceName), + } + + const APIVersion = "2019-06-01-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools/{sqlPoolName}/pause", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// PauseSender sends the Pause request. The method will close the +// http.Response Body if it receives an error. +func (client SQLPoolsClient) PauseSender(req *http.Request) (future SQLPoolsPauseFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// PauseResponder handles the response to the Pause request. The method always +// closes the http.Response Body. +func (client SQLPoolsClient) PauseResponder(resp *http.Response) (result SetObject, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Rename rename a SQL pool. +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// workspaceName - the name of the workspace +// SQLPoolName - SQL pool name +// parameters - the resource move definition for renaming this Sql pool. +func (client SQLPoolsClient) Rename(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string, parameters ResourceMoveDefinition) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SQLPoolsClient.Rename") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.ID", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + return result, validation.NewError("synapse.SQLPoolsClient", "Rename", err.Error()) + } + + req, err := client.RenamePreparer(ctx, resourceGroupName, workspaceName, SQLPoolName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolsClient", "Rename", nil, "Failure preparing request") + return + } + + resp, err := client.RenameSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "synapse.SQLPoolsClient", "Rename", resp, "Failure sending request") + return + } + + result, err = client.RenameResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolsClient", "Rename", resp, "Failure responding to request") + } + + return +} + +// RenamePreparer prepares the Rename request. +func (client SQLPoolsClient) RenamePreparer(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string, parameters ResourceMoveDefinition) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "sqlPoolName": autorest.Encode("path", SQLPoolName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "workspaceName": autorest.Encode("path", workspaceName), + } + + const APIVersion = "2019-06-01-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools/{sqlPoolName}/move", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// RenameSender sends the Rename request. The method will close the +// http.Response Body if it receives an error. +func (client SQLPoolsClient) RenameSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// RenameResponder handles the response to the Rename request. The method always +// closes the http.Response Body. +func (client SQLPoolsClient) RenameResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// Resume resume a SQL pool +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// workspaceName - the name of the workspace +// SQLPoolName - SQL pool name +func (client SQLPoolsClient) Resume(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string) (result SQLPoolsResumeFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SQLPoolsClient.Resume") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("synapse.SQLPoolsClient", "Resume", err.Error()) + } + + req, err := client.ResumePreparer(ctx, resourceGroupName, workspaceName, SQLPoolName) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolsClient", "Resume", nil, "Failure preparing request") + return + } + + result, err = client.ResumeSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolsClient", "Resume", result.Response(), "Failure sending request") + return + } + + return +} + +// ResumePreparer prepares the Resume request. +func (client SQLPoolsClient) ResumePreparer(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "sqlPoolName": autorest.Encode("path", SQLPoolName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "workspaceName": autorest.Encode("path", workspaceName), + } + + const APIVersion = "2019-06-01-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools/{sqlPoolName}/resume", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ResumeSender sends the Resume request. The method will close the +// http.Response Body if it receives an error. +func (client SQLPoolsClient) ResumeSender(req *http.Request) (future SQLPoolsResumeFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// ResumeResponder handles the response to the Resume request. The method always +// closes the http.Response Body. +func (client SQLPoolsClient) ResumeResponder(resp *http.Response) (result SetObject, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Update apply a partial update to a SQL pool +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// workspaceName - the name of the workspace +// SQLPoolName - SQL pool name +// SQLPoolInfo - the updated SQL pool properties +func (client SQLPoolsClient) Update(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string, SQLPoolInfo SQLPoolPatchInfo) (result SQLPool, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SQLPoolsClient.Update") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("synapse.SQLPoolsClient", "Update", err.Error()) + } + + req, err := client.UpdatePreparer(ctx, resourceGroupName, workspaceName, SQLPoolName, SQLPoolInfo) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolsClient", "Update", nil, "Failure preparing request") + return + } + + resp, err := client.UpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "synapse.SQLPoolsClient", "Update", resp, "Failure sending request") + return + } + + result, err = client.UpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolsClient", "Update", resp, "Failure responding to request") + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client SQLPoolsClient) UpdatePreparer(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string, SQLPoolInfo SQLPoolPatchInfo) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "sqlPoolName": autorest.Encode("path", SQLPoolName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "workspaceName": autorest.Encode("path", workspaceName), + } + + const APIVersion = "2019-06-01-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools/{sqlPoolName}", pathParameters), + autorest.WithJSON(SQLPoolInfo), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client SQLPoolsClient) UpdateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client SQLPoolsClient) UpdateResponder(resp *http.Response) (result SQLPool, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/services/preview/synapse/mgmt/2019-06-01-preview/synapse/sqlpoolschemas.go b/services/preview/synapse/mgmt/2019-06-01-preview/synapse/sqlpoolschemas.go new file mode 100644 index 000000000000..3d66a13b5de6 --- /dev/null +++ b/services/preview/synapse/mgmt/2019-06-01-preview/synapse/sqlpoolschemas.go @@ -0,0 +1,173 @@ +package synapse + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// SQLPoolSchemasClient is the azure Synapse Analytics Management Client +type SQLPoolSchemasClient struct { + BaseClient +} + +// NewSQLPoolSchemasClient creates an instance of the SQLPoolSchemasClient client. +func NewSQLPoolSchemasClient(subscriptionID string) SQLPoolSchemasClient { + return NewSQLPoolSchemasClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewSQLPoolSchemasClientWithBaseURI creates an instance of the SQLPoolSchemasClient client using a custom endpoint. +// Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). +func NewSQLPoolSchemasClientWithBaseURI(baseURI string, subscriptionID string) SQLPoolSchemasClient { + return SQLPoolSchemasClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// List gets schemas of a given SQL pool. +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// workspaceName - the name of the workspace +// SQLPoolName - SQL pool name +// filter - an OData filter expression that filters elements in the collection. +func (client SQLPoolSchemasClient) List(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string, filter string) (result SQLPoolSchemaListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SQLPoolSchemasClient.List") + defer func() { + sc := -1 + if result.spslr.Response.Response != nil { + sc = result.spslr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("synapse.SQLPoolSchemasClient", "List", err.Error()) + } + + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, resourceGroupName, workspaceName, SQLPoolName, filter) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolSchemasClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.spslr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "synapse.SQLPoolSchemasClient", "List", resp, "Failure sending request") + return + } + + result.spslr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolSchemasClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client SQLPoolSchemasClient) ListPreparer(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string, filter string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "sqlPoolName": autorest.Encode("path", SQLPoolName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "workspaceName": autorest.Encode("path", workspaceName), + } + + const APIVersion = "2019-06-01-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools/{sqlPoolName}/schemas", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client SQLPoolSchemasClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client SQLPoolSchemasClient) ListResponder(resp *http.Response) (result SQLPoolSchemaListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client SQLPoolSchemasClient) listNextResults(ctx context.Context, lastResults SQLPoolSchemaListResult) (result SQLPoolSchemaListResult, err error) { + req, err := lastResults.sQLPoolSchemaListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "synapse.SQLPoolSchemasClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "synapse.SQLPoolSchemasClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolSchemasClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client SQLPoolSchemasClient) ListComplete(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string, filter string) (result SQLPoolSchemaListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SQLPoolSchemasClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx, resourceGroupName, workspaceName, SQLPoolName, filter) + return +} diff --git a/services/preview/synapse/mgmt/2019-06-01-preview/synapse/sqlpoolsecurityalertpolicies.go b/services/preview/synapse/mgmt/2019-06-01-preview/synapse/sqlpoolsecurityalertpolicies.go new file mode 100644 index 000000000000..ea1e33860e02 --- /dev/null +++ b/services/preview/synapse/mgmt/2019-06-01-preview/synapse/sqlpoolsecurityalertpolicies.go @@ -0,0 +1,225 @@ +package synapse + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// SQLPoolSecurityAlertPoliciesClient is the azure Synapse Analytics Management Client +type SQLPoolSecurityAlertPoliciesClient struct { + BaseClient +} + +// NewSQLPoolSecurityAlertPoliciesClient creates an instance of the SQLPoolSecurityAlertPoliciesClient client. +func NewSQLPoolSecurityAlertPoliciesClient(subscriptionID string) SQLPoolSecurityAlertPoliciesClient { + return NewSQLPoolSecurityAlertPoliciesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewSQLPoolSecurityAlertPoliciesClientWithBaseURI creates an instance of the SQLPoolSecurityAlertPoliciesClient +// client using a custom endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI +// (sovereign clouds, Azure stack). +func NewSQLPoolSecurityAlertPoliciesClientWithBaseURI(baseURI string, subscriptionID string) SQLPoolSecurityAlertPoliciesClient { + return SQLPoolSecurityAlertPoliciesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate create or update a Sql pool's security alert policy. +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// workspaceName - the name of the workspace +// SQLPoolName - SQL pool name +// parameters - the Sql pool security alert policy. +func (client SQLPoolSecurityAlertPoliciesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string, parameters SQLPoolSecurityAlertPolicy) (result SQLPoolSecurityAlertPolicy, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SQLPoolSecurityAlertPoliciesClient.CreateOrUpdate") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("synapse.SQLPoolSecurityAlertPoliciesClient", "CreateOrUpdate", err.Error()) + } + + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, workspaceName, SQLPoolName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolSecurityAlertPoliciesClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "synapse.SQLPoolSecurityAlertPoliciesClient", "CreateOrUpdate", resp, "Failure sending request") + return + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolSecurityAlertPoliciesClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client SQLPoolSecurityAlertPoliciesClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string, parameters SQLPoolSecurityAlertPolicy) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "securityAlertPolicyName": autorest.Encode("path", "default"), + "sqlPoolName": autorest.Encode("path", SQLPoolName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "workspaceName": autorest.Encode("path", workspaceName), + } + + const APIVersion = "2019-06-01-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools/{sqlPoolName}/securityAlertPolicies/{securityAlertPolicyName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client SQLPoolSecurityAlertPoliciesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client SQLPoolSecurityAlertPoliciesClient) CreateOrUpdateResponder(resp *http.Response) (result SQLPoolSecurityAlertPolicy, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Get get a Sql pool's security alert policy. +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// workspaceName - the name of the workspace +// SQLPoolName - SQL pool name +func (client SQLPoolSecurityAlertPoliciesClient) Get(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string) (result SQLPoolSecurityAlertPolicy, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SQLPoolSecurityAlertPoliciesClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("synapse.SQLPoolSecurityAlertPoliciesClient", "Get", err.Error()) + } + + req, err := client.GetPreparer(ctx, resourceGroupName, workspaceName, SQLPoolName) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolSecurityAlertPoliciesClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "synapse.SQLPoolSecurityAlertPoliciesClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolSecurityAlertPoliciesClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client SQLPoolSecurityAlertPoliciesClient) GetPreparer(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "securityAlertPolicyName": autorest.Encode("path", "default"), + "sqlPoolName": autorest.Encode("path", SQLPoolName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "workspaceName": autorest.Encode("path", workspaceName), + } + + const APIVersion = "2019-06-01-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools/{sqlPoolName}/securityAlertPolicies/{securityAlertPolicyName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client SQLPoolSecurityAlertPoliciesClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client SQLPoolSecurityAlertPoliciesClient) GetResponder(resp *http.Response) (result SQLPoolSecurityAlertPolicy, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/services/preview/synapse/mgmt/2019-06-01-preview/synapse/sqlpoolsensitivitylabels.go b/services/preview/synapse/mgmt/2019-06-01-preview/synapse/sqlpoolsensitivitylabels.go new file mode 100644 index 000000000000..5ec03938e422 --- /dev/null +++ b/services/preview/synapse/mgmt/2019-06-01-preview/synapse/sqlpoolsensitivitylabels.go @@ -0,0 +1,693 @@ +package synapse + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// SQLPoolSensitivityLabelsClient is the azure Synapse Analytics Management Client +type SQLPoolSensitivityLabelsClient struct { + BaseClient +} + +// NewSQLPoolSensitivityLabelsClient creates an instance of the SQLPoolSensitivityLabelsClient client. +func NewSQLPoolSensitivityLabelsClient(subscriptionID string) SQLPoolSensitivityLabelsClient { + return NewSQLPoolSensitivityLabelsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewSQLPoolSensitivityLabelsClientWithBaseURI creates an instance of the SQLPoolSensitivityLabelsClient client using +// a custom endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign +// clouds, Azure stack). +func NewSQLPoolSensitivityLabelsClientWithBaseURI(baseURI string, subscriptionID string) SQLPoolSensitivityLabelsClient { + return SQLPoolSensitivityLabelsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates the sensitivity label of a given column in a Sql pool +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// workspaceName - the name of the workspace +// SQLPoolName - SQL pool name +// schemaName - the name of the schema. +// tableName - the name of the table. +// columnName - the name of the column. +// parameters - the column sensitivity label resource. +func (client SQLPoolSensitivityLabelsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string, schemaName string, tableName string, columnName string, parameters SensitivityLabel) (result SensitivityLabel, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SQLPoolSensitivityLabelsClient.CreateOrUpdate") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("synapse.SQLPoolSensitivityLabelsClient", "CreateOrUpdate", err.Error()) + } + + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, workspaceName, SQLPoolName, schemaName, tableName, columnName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolSensitivityLabelsClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "synapse.SQLPoolSensitivityLabelsClient", "CreateOrUpdate", resp, "Failure sending request") + return + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolSensitivityLabelsClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client SQLPoolSensitivityLabelsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string, schemaName string, tableName string, columnName string, parameters SensitivityLabel) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "columnName": autorest.Encode("path", columnName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "schemaName": autorest.Encode("path", schemaName), + "sensitivityLabelSource": autorest.Encode("path", "current"), + "sqlPoolName": autorest.Encode("path", SQLPoolName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "tableName": autorest.Encode("path", tableName), + "workspaceName": autorest.Encode("path", workspaceName), + } + + const APIVersion = "2019-06-01-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools/{sqlPoolName}/schemas/{schemaName}/tables/{tableName}/columns/{columnName}/sensitivityLabels/{sensitivityLabelSource}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client SQLPoolSensitivityLabelsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client SQLPoolSensitivityLabelsClient) CreateOrUpdateResponder(resp *http.Response) (result SensitivityLabel, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes the sensitivity label of a given column in a Sql pool +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// workspaceName - the name of the workspace +// SQLPoolName - SQL pool name +// schemaName - the name of the schema. +// tableName - the name of the table. +// columnName - the name of the column. +func (client SQLPoolSensitivityLabelsClient) Delete(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string, schemaName string, tableName string, columnName string) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SQLPoolSensitivityLabelsClient.Delete") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("synapse.SQLPoolSensitivityLabelsClient", "Delete", err.Error()) + } + + req, err := client.DeletePreparer(ctx, resourceGroupName, workspaceName, SQLPoolName, schemaName, tableName, columnName) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolSensitivityLabelsClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "synapse.SQLPoolSensitivityLabelsClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolSensitivityLabelsClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client SQLPoolSensitivityLabelsClient) DeletePreparer(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string, schemaName string, tableName string, columnName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "columnName": autorest.Encode("path", columnName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "schemaName": autorest.Encode("path", schemaName), + "sensitivityLabelSource": autorest.Encode("path", "current"), + "sqlPoolName": autorest.Encode("path", SQLPoolName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "tableName": autorest.Encode("path", tableName), + "workspaceName": autorest.Encode("path", workspaceName), + } + + const APIVersion = "2019-06-01-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools/{sqlPoolName}/schemas/{schemaName}/tables/{tableName}/columns/{columnName}/sensitivityLabels/{sensitivityLabelSource}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client SQLPoolSensitivityLabelsClient) DeleteSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client SQLPoolSensitivityLabelsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// DisableRecommendation disables sensitivity recommendations on a given column +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// workspaceName - the name of the workspace +// SQLPoolName - SQL pool name +// schemaName - the name of the schema. +// tableName - the name of the table. +// columnName - the name of the column. +func (client SQLPoolSensitivityLabelsClient) DisableRecommendation(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string, schemaName string, tableName string, columnName string) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SQLPoolSensitivityLabelsClient.DisableRecommendation") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("synapse.SQLPoolSensitivityLabelsClient", "DisableRecommendation", err.Error()) + } + + req, err := client.DisableRecommendationPreparer(ctx, resourceGroupName, workspaceName, SQLPoolName, schemaName, tableName, columnName) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolSensitivityLabelsClient", "DisableRecommendation", nil, "Failure preparing request") + return + } + + resp, err := client.DisableRecommendationSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "synapse.SQLPoolSensitivityLabelsClient", "DisableRecommendation", resp, "Failure sending request") + return + } + + result, err = client.DisableRecommendationResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolSensitivityLabelsClient", "DisableRecommendation", resp, "Failure responding to request") + } + + return +} + +// DisableRecommendationPreparer prepares the DisableRecommendation request. +func (client SQLPoolSensitivityLabelsClient) DisableRecommendationPreparer(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string, schemaName string, tableName string, columnName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "columnName": autorest.Encode("path", columnName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "schemaName": autorest.Encode("path", schemaName), + "sensitivityLabelSource": autorest.Encode("path", "recommended"), + "sqlPoolName": autorest.Encode("path", SQLPoolName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "tableName": autorest.Encode("path", tableName), + "workspaceName": autorest.Encode("path", workspaceName), + } + + const APIVersion = "2019-06-01-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools/{sqlPoolName}/schemas/{schemaName}/tables/{tableName}/columns/{columnName}/sensitivityLabels/{sensitivityLabelSource}/disable", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DisableRecommendationSender sends the DisableRecommendation request. The method will close the +// http.Response Body if it receives an error. +func (client SQLPoolSensitivityLabelsClient) DisableRecommendationSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// DisableRecommendationResponder handles the response to the DisableRecommendation request. The method always +// closes the http.Response Body. +func (client SQLPoolSensitivityLabelsClient) DisableRecommendationResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// EnableRecommendation enables sensitivity recommendations on a given column (recommendations are enabled by default +// on all columns) +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// workspaceName - the name of the workspace +// SQLPoolName - SQL pool name +// schemaName - the name of the schema. +// tableName - the name of the table. +// columnName - the name of the column. +func (client SQLPoolSensitivityLabelsClient) EnableRecommendation(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string, schemaName string, tableName string, columnName string) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SQLPoolSensitivityLabelsClient.EnableRecommendation") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("synapse.SQLPoolSensitivityLabelsClient", "EnableRecommendation", err.Error()) + } + + req, err := client.EnableRecommendationPreparer(ctx, resourceGroupName, workspaceName, SQLPoolName, schemaName, tableName, columnName) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolSensitivityLabelsClient", "EnableRecommendation", nil, "Failure preparing request") + return + } + + resp, err := client.EnableRecommendationSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "synapse.SQLPoolSensitivityLabelsClient", "EnableRecommendation", resp, "Failure sending request") + return + } + + result, err = client.EnableRecommendationResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolSensitivityLabelsClient", "EnableRecommendation", resp, "Failure responding to request") + } + + return +} + +// EnableRecommendationPreparer prepares the EnableRecommendation request. +func (client SQLPoolSensitivityLabelsClient) EnableRecommendationPreparer(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string, schemaName string, tableName string, columnName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "columnName": autorest.Encode("path", columnName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "schemaName": autorest.Encode("path", schemaName), + "sensitivityLabelSource": autorest.Encode("path", "recommended"), + "sqlPoolName": autorest.Encode("path", SQLPoolName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "tableName": autorest.Encode("path", tableName), + "workspaceName": autorest.Encode("path", workspaceName), + } + + const APIVersion = "2019-06-01-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools/{sqlPoolName}/schemas/{schemaName}/tables/{tableName}/columns/{columnName}/sensitivityLabels/{sensitivityLabelSource}/enable", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// EnableRecommendationSender sends the EnableRecommendation request. The method will close the +// http.Response Body if it receives an error. +func (client SQLPoolSensitivityLabelsClient) EnableRecommendationSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// EnableRecommendationResponder handles the response to the EnableRecommendation request. The method always +// closes the http.Response Body. +func (client SQLPoolSensitivityLabelsClient) EnableRecommendationResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// ListCurrent gets SQL pool sensitivity labels. +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// workspaceName - the name of the workspace +// SQLPoolName - SQL pool name +// filter - an OData filter expression that filters elements in the collection. +func (client SQLPoolSensitivityLabelsClient) ListCurrent(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string, filter string) (result SensitivityLabelListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SQLPoolSensitivityLabelsClient.ListCurrent") + defer func() { + sc := -1 + if result.sllr.Response.Response != nil { + sc = result.sllr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("synapse.SQLPoolSensitivityLabelsClient", "ListCurrent", err.Error()) + } + + result.fn = client.listCurrentNextResults + req, err := client.ListCurrentPreparer(ctx, resourceGroupName, workspaceName, SQLPoolName, filter) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolSensitivityLabelsClient", "ListCurrent", nil, "Failure preparing request") + return + } + + resp, err := client.ListCurrentSender(req) + if err != nil { + result.sllr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "synapse.SQLPoolSensitivityLabelsClient", "ListCurrent", resp, "Failure sending request") + return + } + + result.sllr, err = client.ListCurrentResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolSensitivityLabelsClient", "ListCurrent", resp, "Failure responding to request") + } + + return +} + +// ListCurrentPreparer prepares the ListCurrent request. +func (client SQLPoolSensitivityLabelsClient) ListCurrentPreparer(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string, filter string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "sqlPoolName": autorest.Encode("path", SQLPoolName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "workspaceName": autorest.Encode("path", workspaceName), + } + + const APIVersion = "2019-06-01-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools/{sqlPoolName}/currentSensitivityLabels", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListCurrentSender sends the ListCurrent request. The method will close the +// http.Response Body if it receives an error. +func (client SQLPoolSensitivityLabelsClient) ListCurrentSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListCurrentResponder handles the response to the ListCurrent request. The method always +// closes the http.Response Body. +func (client SQLPoolSensitivityLabelsClient) ListCurrentResponder(resp *http.Response) (result SensitivityLabelListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listCurrentNextResults retrieves the next set of results, if any. +func (client SQLPoolSensitivityLabelsClient) listCurrentNextResults(ctx context.Context, lastResults SensitivityLabelListResult) (result SensitivityLabelListResult, err error) { + req, err := lastResults.sensitivityLabelListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "synapse.SQLPoolSensitivityLabelsClient", "listCurrentNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListCurrentSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "synapse.SQLPoolSensitivityLabelsClient", "listCurrentNextResults", resp, "Failure sending next results request") + } + result, err = client.ListCurrentResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolSensitivityLabelsClient", "listCurrentNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListCurrentComplete enumerates all values, automatically crossing page boundaries as required. +func (client SQLPoolSensitivityLabelsClient) ListCurrentComplete(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string, filter string) (result SensitivityLabelListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SQLPoolSensitivityLabelsClient.ListCurrent") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.ListCurrent(ctx, resourceGroupName, workspaceName, SQLPoolName, filter) + return +} + +// ListRecommended gets sensitivity labels of a given SQL pool. +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// workspaceName - the name of the workspace +// SQLPoolName - SQL pool name +// includeDisabledRecommendations - specifies whether to include disabled recommendations or not. +// skipToken - an OData query option to indicate how many elements to skip in the collection. +// filter - an OData filter expression that filters elements in the collection. +func (client SQLPoolSensitivityLabelsClient) ListRecommended(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string, includeDisabledRecommendations *bool, skipToken string, filter string) (result SensitivityLabelListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SQLPoolSensitivityLabelsClient.ListRecommended") + defer func() { + sc := -1 + if result.sllr.Response.Response != nil { + sc = result.sllr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("synapse.SQLPoolSensitivityLabelsClient", "ListRecommended", err.Error()) + } + + result.fn = client.listRecommendedNextResults + req, err := client.ListRecommendedPreparer(ctx, resourceGroupName, workspaceName, SQLPoolName, includeDisabledRecommendations, skipToken, filter) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolSensitivityLabelsClient", "ListRecommended", nil, "Failure preparing request") + return + } + + resp, err := client.ListRecommendedSender(req) + if err != nil { + result.sllr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "synapse.SQLPoolSensitivityLabelsClient", "ListRecommended", resp, "Failure sending request") + return + } + + result.sllr, err = client.ListRecommendedResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolSensitivityLabelsClient", "ListRecommended", resp, "Failure responding to request") + } + + return +} + +// ListRecommendedPreparer prepares the ListRecommended request. +func (client SQLPoolSensitivityLabelsClient) ListRecommendedPreparer(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string, includeDisabledRecommendations *bool, skipToken string, filter string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "sqlPoolName": autorest.Encode("path", SQLPoolName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "workspaceName": autorest.Encode("path", workspaceName), + } + + const APIVersion = "2019-06-01-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if includeDisabledRecommendations != nil { + queryParameters["includeDisabledRecommendations"] = autorest.Encode("query", *includeDisabledRecommendations) + } + if len(skipToken) > 0 { + queryParameters["$skipToken"] = autorest.Encode("query", skipToken) + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools/{sqlPoolName}/recommendedSensitivityLabels", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListRecommendedSender sends the ListRecommended request. The method will close the +// http.Response Body if it receives an error. +func (client SQLPoolSensitivityLabelsClient) ListRecommendedSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListRecommendedResponder handles the response to the ListRecommended request. The method always +// closes the http.Response Body. +func (client SQLPoolSensitivityLabelsClient) ListRecommendedResponder(resp *http.Response) (result SensitivityLabelListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listRecommendedNextResults retrieves the next set of results, if any. +func (client SQLPoolSensitivityLabelsClient) listRecommendedNextResults(ctx context.Context, lastResults SensitivityLabelListResult) (result SensitivityLabelListResult, err error) { + req, err := lastResults.sensitivityLabelListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "synapse.SQLPoolSensitivityLabelsClient", "listRecommendedNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListRecommendedSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "synapse.SQLPoolSensitivityLabelsClient", "listRecommendedNextResults", resp, "Failure sending next results request") + } + result, err = client.ListRecommendedResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolSensitivityLabelsClient", "listRecommendedNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListRecommendedComplete enumerates all values, automatically crossing page boundaries as required. +func (client SQLPoolSensitivityLabelsClient) ListRecommendedComplete(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string, includeDisabledRecommendations *bool, skipToken string, filter string) (result SensitivityLabelListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SQLPoolSensitivityLabelsClient.ListRecommended") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.ListRecommended(ctx, resourceGroupName, workspaceName, SQLPoolName, includeDisabledRecommendations, skipToken, filter) + return +} diff --git a/services/preview/synapse/mgmt/2019-06-01-preview/synapse/sqlpooltablecolumns.go b/services/preview/synapse/mgmt/2019-06-01-preview/synapse/sqlpooltablecolumns.go new file mode 100644 index 000000000000..2766e012785f --- /dev/null +++ b/services/preview/synapse/mgmt/2019-06-01-preview/synapse/sqlpooltablecolumns.go @@ -0,0 +1,178 @@ +package synapse + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// SQLPoolTableColumnsClient is the azure Synapse Analytics Management Client +type SQLPoolTableColumnsClient struct { + BaseClient +} + +// NewSQLPoolTableColumnsClient creates an instance of the SQLPoolTableColumnsClient client. +func NewSQLPoolTableColumnsClient(subscriptionID string) SQLPoolTableColumnsClient { + return NewSQLPoolTableColumnsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewSQLPoolTableColumnsClientWithBaseURI creates an instance of the SQLPoolTableColumnsClient client using a custom +// endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure +// stack). +func NewSQLPoolTableColumnsClientWithBaseURI(baseURI string, subscriptionID string) SQLPoolTableColumnsClient { + return SQLPoolTableColumnsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// ListByTableName gets columns in a given table in a SQL pool. +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// workspaceName - the name of the workspace +// SQLPoolName - SQL pool name +// schemaName - the name of the schema. +// tableName - the name of the table. +// filter - an OData filter expression that filters elements in the collection. +func (client SQLPoolTableColumnsClient) ListByTableName(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string, schemaName string, tableName string, filter string) (result SQLPoolColumnListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SQLPoolTableColumnsClient.ListByTableName") + defer func() { + sc := -1 + if result.spclr.Response.Response != nil { + sc = result.spclr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("synapse.SQLPoolTableColumnsClient", "ListByTableName", err.Error()) + } + + result.fn = client.listByTableNameNextResults + req, err := client.ListByTableNamePreparer(ctx, resourceGroupName, workspaceName, SQLPoolName, schemaName, tableName, filter) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolTableColumnsClient", "ListByTableName", nil, "Failure preparing request") + return + } + + resp, err := client.ListByTableNameSender(req) + if err != nil { + result.spclr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "synapse.SQLPoolTableColumnsClient", "ListByTableName", resp, "Failure sending request") + return + } + + result.spclr, err = client.ListByTableNameResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolTableColumnsClient", "ListByTableName", resp, "Failure responding to request") + } + + return +} + +// ListByTableNamePreparer prepares the ListByTableName request. +func (client SQLPoolTableColumnsClient) ListByTableNamePreparer(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string, schemaName string, tableName string, filter string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "schemaName": autorest.Encode("path", schemaName), + "sqlPoolName": autorest.Encode("path", SQLPoolName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "tableName": autorest.Encode("path", tableName), + "workspaceName": autorest.Encode("path", workspaceName), + } + + const APIVersion = "2019-06-01-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools/{sqlPoolName}/schemas/{schemaName}/tables/{tableName}/columns", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListByTableNameSender sends the ListByTableName request. The method will close the +// http.Response Body if it receives an error. +func (client SQLPoolTableColumnsClient) ListByTableNameSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListByTableNameResponder handles the response to the ListByTableName request. The method always +// closes the http.Response Body. +func (client SQLPoolTableColumnsClient) ListByTableNameResponder(resp *http.Response) (result SQLPoolColumnListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listByTableNameNextResults retrieves the next set of results, if any. +func (client SQLPoolTableColumnsClient) listByTableNameNextResults(ctx context.Context, lastResults SQLPoolColumnListResult) (result SQLPoolColumnListResult, err error) { + req, err := lastResults.sQLPoolColumnListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "synapse.SQLPoolTableColumnsClient", "listByTableNameNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListByTableNameSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "synapse.SQLPoolTableColumnsClient", "listByTableNameNextResults", resp, "Failure sending next results request") + } + result, err = client.ListByTableNameResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolTableColumnsClient", "listByTableNameNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListByTableNameComplete enumerates all values, automatically crossing page boundaries as required. +func (client SQLPoolTableColumnsClient) ListByTableNameComplete(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string, schemaName string, tableName string, filter string) (result SQLPoolColumnListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SQLPoolTableColumnsClient.ListByTableName") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.ListByTableName(ctx, resourceGroupName, workspaceName, SQLPoolName, schemaName, tableName, filter) + return +} diff --git a/services/preview/synapse/mgmt/2019-06-01-preview/synapse/sqlpooltables.go b/services/preview/synapse/mgmt/2019-06-01-preview/synapse/sqlpooltables.go new file mode 100644 index 000000000000..1f845f79d044 --- /dev/null +++ b/services/preview/synapse/mgmt/2019-06-01-preview/synapse/sqlpooltables.go @@ -0,0 +1,175 @@ +package synapse + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// SQLPoolTablesClient is the azure Synapse Analytics Management Client +type SQLPoolTablesClient struct { + BaseClient +} + +// NewSQLPoolTablesClient creates an instance of the SQLPoolTablesClient client. +func NewSQLPoolTablesClient(subscriptionID string) SQLPoolTablesClient { + return NewSQLPoolTablesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewSQLPoolTablesClientWithBaseURI creates an instance of the SQLPoolTablesClient client using a custom endpoint. +// Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). +func NewSQLPoolTablesClientWithBaseURI(baseURI string, subscriptionID string) SQLPoolTablesClient { + return SQLPoolTablesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// ListBySchema gets tables of a given schema in a SQL pool. +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// workspaceName - the name of the workspace +// SQLPoolName - SQL pool name +// schemaName - the name of the schema. +// filter - an OData filter expression that filters elements in the collection. +func (client SQLPoolTablesClient) ListBySchema(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string, schemaName string, filter string) (result SQLPoolTableListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SQLPoolTablesClient.ListBySchema") + defer func() { + sc := -1 + if result.sptlr.Response.Response != nil { + sc = result.sptlr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("synapse.SQLPoolTablesClient", "ListBySchema", err.Error()) + } + + result.fn = client.listBySchemaNextResults + req, err := client.ListBySchemaPreparer(ctx, resourceGroupName, workspaceName, SQLPoolName, schemaName, filter) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolTablesClient", "ListBySchema", nil, "Failure preparing request") + return + } + + resp, err := client.ListBySchemaSender(req) + if err != nil { + result.sptlr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "synapse.SQLPoolTablesClient", "ListBySchema", resp, "Failure sending request") + return + } + + result.sptlr, err = client.ListBySchemaResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolTablesClient", "ListBySchema", resp, "Failure responding to request") + } + + return +} + +// ListBySchemaPreparer prepares the ListBySchema request. +func (client SQLPoolTablesClient) ListBySchemaPreparer(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string, schemaName string, filter string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "schemaName": autorest.Encode("path", schemaName), + "sqlPoolName": autorest.Encode("path", SQLPoolName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "workspaceName": autorest.Encode("path", workspaceName), + } + + const APIVersion = "2019-06-01-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools/{sqlPoolName}/schemas/{schemaName}/tables", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListBySchemaSender sends the ListBySchema request. The method will close the +// http.Response Body if it receives an error. +func (client SQLPoolTablesClient) ListBySchemaSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListBySchemaResponder handles the response to the ListBySchema request. The method always +// closes the http.Response Body. +func (client SQLPoolTablesClient) ListBySchemaResponder(resp *http.Response) (result SQLPoolTableListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listBySchemaNextResults retrieves the next set of results, if any. +func (client SQLPoolTablesClient) listBySchemaNextResults(ctx context.Context, lastResults SQLPoolTableListResult) (result SQLPoolTableListResult, err error) { + req, err := lastResults.sQLPoolTableListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "synapse.SQLPoolTablesClient", "listBySchemaNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListBySchemaSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "synapse.SQLPoolTablesClient", "listBySchemaNextResults", resp, "Failure sending next results request") + } + result, err = client.ListBySchemaResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolTablesClient", "listBySchemaNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListBySchemaComplete enumerates all values, automatically crossing page boundaries as required. +func (client SQLPoolTablesClient) ListBySchemaComplete(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string, schemaName string, filter string) (result SQLPoolTableListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SQLPoolTablesClient.ListBySchema") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.ListBySchema(ctx, resourceGroupName, workspaceName, SQLPoolName, schemaName, filter) + return +} diff --git a/services/preview/synapse/mgmt/2019-06-01-preview/synapse/sqlpooltransparentdataencryptions.go b/services/preview/synapse/mgmt/2019-06-01-preview/synapse/sqlpooltransparentdataencryptions.go new file mode 100644 index 000000000000..534c054ef2c4 --- /dev/null +++ b/services/preview/synapse/mgmt/2019-06-01-preview/synapse/sqlpooltransparentdataencryptions.go @@ -0,0 +1,227 @@ +package synapse + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// SQLPoolTransparentDataEncryptionsClient is the azure Synapse Analytics Management Client +type SQLPoolTransparentDataEncryptionsClient struct { + BaseClient +} + +// NewSQLPoolTransparentDataEncryptionsClient creates an instance of the SQLPoolTransparentDataEncryptionsClient +// client. +func NewSQLPoolTransparentDataEncryptionsClient(subscriptionID string) SQLPoolTransparentDataEncryptionsClient { + return NewSQLPoolTransparentDataEncryptionsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewSQLPoolTransparentDataEncryptionsClientWithBaseURI creates an instance of the +// SQLPoolTransparentDataEncryptionsClient client using a custom endpoint. Use this when interacting with an Azure +// cloud that uses a non-standard base URI (sovereign clouds, Azure stack). +func NewSQLPoolTransparentDataEncryptionsClientWithBaseURI(baseURI string, subscriptionID string) SQLPoolTransparentDataEncryptionsClient { + return SQLPoolTransparentDataEncryptionsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates a Sql pool's transparent data encryption configuration. +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// workspaceName - the name of the workspace +// SQLPoolName - SQL pool name +// parameters - the required parameters for creating or updating transparent data encryption. +func (client SQLPoolTransparentDataEncryptionsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string, parameters TransparentDataEncryption) (result TransparentDataEncryption, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SQLPoolTransparentDataEncryptionsClient.CreateOrUpdate") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("synapse.SQLPoolTransparentDataEncryptionsClient", "CreateOrUpdate", err.Error()) + } + + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, workspaceName, SQLPoolName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolTransparentDataEncryptionsClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "synapse.SQLPoolTransparentDataEncryptionsClient", "CreateOrUpdate", resp, "Failure sending request") + return + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolTransparentDataEncryptionsClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client SQLPoolTransparentDataEncryptionsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string, parameters TransparentDataEncryption) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "sqlPoolName": autorest.Encode("path", SQLPoolName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "transparentDataEncryptionName": autorest.Encode("path", "current"), + "workspaceName": autorest.Encode("path", workspaceName), + } + + const APIVersion = "2019-06-01-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + parameters.Location = nil + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools/{sqlPoolName}/transparentDataEncryption/{transparentDataEncryptionName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client SQLPoolTransparentDataEncryptionsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client SQLPoolTransparentDataEncryptionsClient) CreateOrUpdateResponder(resp *http.Response) (result TransparentDataEncryption, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Get get a SQL pool's transparent data encryption configuration. +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// workspaceName - the name of the workspace +// SQLPoolName - SQL pool name +func (client SQLPoolTransparentDataEncryptionsClient) Get(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string) (result TransparentDataEncryption, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SQLPoolTransparentDataEncryptionsClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("synapse.SQLPoolTransparentDataEncryptionsClient", "Get", err.Error()) + } + + req, err := client.GetPreparer(ctx, resourceGroupName, workspaceName, SQLPoolName) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolTransparentDataEncryptionsClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "synapse.SQLPoolTransparentDataEncryptionsClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolTransparentDataEncryptionsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client SQLPoolTransparentDataEncryptionsClient) GetPreparer(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "sqlPoolName": autorest.Encode("path", SQLPoolName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "transparentDataEncryptionName": autorest.Encode("path", "current"), + "workspaceName": autorest.Encode("path", workspaceName), + } + + const APIVersion = "2019-06-01-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools/{sqlPoolName}/transparentDataEncryption/{transparentDataEncryptionName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client SQLPoolTransparentDataEncryptionsClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client SQLPoolTransparentDataEncryptionsClient) GetResponder(resp *http.Response) (result TransparentDataEncryption, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/services/preview/synapse/mgmt/2019-06-01-preview/synapse/sqlpoolusages.go b/services/preview/synapse/mgmt/2019-06-01-preview/synapse/sqlpoolusages.go new file mode 100644 index 000000000000..df7d90632fa0 --- /dev/null +++ b/services/preview/synapse/mgmt/2019-06-01-preview/synapse/sqlpoolusages.go @@ -0,0 +1,169 @@ +package synapse + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// SQLPoolUsagesClient is the azure Synapse Analytics Management Client +type SQLPoolUsagesClient struct { + BaseClient +} + +// NewSQLPoolUsagesClient creates an instance of the SQLPoolUsagesClient client. +func NewSQLPoolUsagesClient(subscriptionID string) SQLPoolUsagesClient { + return NewSQLPoolUsagesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewSQLPoolUsagesClientWithBaseURI creates an instance of the SQLPoolUsagesClient client using a custom endpoint. +// Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). +func NewSQLPoolUsagesClientWithBaseURI(baseURI string, subscriptionID string) SQLPoolUsagesClient { + return SQLPoolUsagesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// List gets SQL pool usages. +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// workspaceName - the name of the workspace +// SQLPoolName - SQL pool name +func (client SQLPoolUsagesClient) List(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string) (result SQLPoolUsageListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SQLPoolUsagesClient.List") + defer func() { + sc := -1 + if result.spulr.Response.Response != nil { + sc = result.spulr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("synapse.SQLPoolUsagesClient", "List", err.Error()) + } + + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, resourceGroupName, workspaceName, SQLPoolName) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolUsagesClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.spulr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "synapse.SQLPoolUsagesClient", "List", resp, "Failure sending request") + return + } + + result.spulr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolUsagesClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client SQLPoolUsagesClient) ListPreparer(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "sqlPoolName": autorest.Encode("path", SQLPoolName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "workspaceName": autorest.Encode("path", workspaceName), + } + + const APIVersion = "2019-06-01-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools/{sqlPoolName}/usages", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client SQLPoolUsagesClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client SQLPoolUsagesClient) ListResponder(resp *http.Response) (result SQLPoolUsageListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client SQLPoolUsagesClient) listNextResults(ctx context.Context, lastResults SQLPoolUsageListResult) (result SQLPoolUsageListResult, err error) { + req, err := lastResults.sQLPoolUsageListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "synapse.SQLPoolUsagesClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "synapse.SQLPoolUsagesClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolUsagesClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client SQLPoolUsagesClient) ListComplete(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string) (result SQLPoolUsageListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SQLPoolUsagesClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx, resourceGroupName, workspaceName, SQLPoolName) + return +} diff --git a/services/preview/synapse/mgmt/2019-06-01-preview/synapse/sqlpoolvulnerabilityassessmentrulebaselines.go b/services/preview/synapse/mgmt/2019-06-01-preview/synapse/sqlpoolvulnerabilityassessmentrulebaselines.go new file mode 100644 index 000000000000..30f9b2d54d6f --- /dev/null +++ b/services/preview/synapse/mgmt/2019-06-01-preview/synapse/sqlpoolvulnerabilityassessmentrulebaselines.go @@ -0,0 +1,238 @@ +package synapse + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// SQLPoolVulnerabilityAssessmentRuleBaselinesClient is the azure Synapse Analytics Management Client +type SQLPoolVulnerabilityAssessmentRuleBaselinesClient struct { + BaseClient +} + +// NewSQLPoolVulnerabilityAssessmentRuleBaselinesClient creates an instance of the +// SQLPoolVulnerabilityAssessmentRuleBaselinesClient client. +func NewSQLPoolVulnerabilityAssessmentRuleBaselinesClient(subscriptionID string) SQLPoolVulnerabilityAssessmentRuleBaselinesClient { + return NewSQLPoolVulnerabilityAssessmentRuleBaselinesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewSQLPoolVulnerabilityAssessmentRuleBaselinesClientWithBaseURI creates an instance of the +// SQLPoolVulnerabilityAssessmentRuleBaselinesClient client using a custom endpoint. Use this when interacting with an +// Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). +func NewSQLPoolVulnerabilityAssessmentRuleBaselinesClientWithBaseURI(baseURI string, subscriptionID string) SQLPoolVulnerabilityAssessmentRuleBaselinesClient { + return SQLPoolVulnerabilityAssessmentRuleBaselinesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates a Sql pool's vulnerability assessment rule baseline. +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// workspaceName - the name of the workspace +// SQLPoolName - SQL pool name +// ruleID - the vulnerability assessment rule ID. +// baselineName - the name of the vulnerability assessment rule baseline (default implies a baseline on a Sql +// pool level rule and master for workspace level rule). +// parameters - the requested rule baseline resource. +func (client SQLPoolVulnerabilityAssessmentRuleBaselinesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string, ruleID string, baselineName VulnerabilityAssessmentPolicyBaselineName, parameters SQLPoolVulnerabilityAssessmentRuleBaseline) (result SQLPoolVulnerabilityAssessmentRuleBaseline, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SQLPoolVulnerabilityAssessmentRuleBaselinesClient.CreateOrUpdate") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.SQLPoolVulnerabilityAssessmentRuleBaselineProperties", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.SQLPoolVulnerabilityAssessmentRuleBaselineProperties.BaselineResults", Name: validation.Null, Rule: true, Chain: nil}}}}}}); err != nil { + return result, validation.NewError("synapse.SQLPoolVulnerabilityAssessmentRuleBaselinesClient", "CreateOrUpdate", err.Error()) + } + + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, workspaceName, SQLPoolName, ruleID, baselineName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolVulnerabilityAssessmentRuleBaselinesClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "synapse.SQLPoolVulnerabilityAssessmentRuleBaselinesClient", "CreateOrUpdate", resp, "Failure sending request") + return + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolVulnerabilityAssessmentRuleBaselinesClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client SQLPoolVulnerabilityAssessmentRuleBaselinesClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string, ruleID string, baselineName VulnerabilityAssessmentPolicyBaselineName, parameters SQLPoolVulnerabilityAssessmentRuleBaseline) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "baselineName": autorest.Encode("path", baselineName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "ruleId": autorest.Encode("path", ruleID), + "sqlPoolName": autorest.Encode("path", SQLPoolName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vulnerabilityAssessmentName": autorest.Encode("path", "default"), + "workspaceName": autorest.Encode("path", workspaceName), + } + + const APIVersion = "2019-06-01-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools/{sqlPoolName}/vulnerabilityAssessments/{vulnerabilityAssessmentName}/rules/{ruleId}/baselines/{baselineName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client SQLPoolVulnerabilityAssessmentRuleBaselinesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client SQLPoolVulnerabilityAssessmentRuleBaselinesClient) CreateOrUpdateResponder(resp *http.Response) (result SQLPoolVulnerabilityAssessmentRuleBaseline, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete removes the database's vulnerability assessment rule baseline. +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// workspaceName - the name of the workspace +// SQLPoolName - SQL pool name +// ruleID - the vulnerability assessment rule ID. +// baselineName - the name of the vulnerability assessment rule baseline (default implies a baseline on a Sql +// pool level rule and master for workspace level rule). +func (client SQLPoolVulnerabilityAssessmentRuleBaselinesClient) Delete(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string, ruleID string, baselineName VulnerabilityAssessmentPolicyBaselineName) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SQLPoolVulnerabilityAssessmentRuleBaselinesClient.Delete") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("synapse.SQLPoolVulnerabilityAssessmentRuleBaselinesClient", "Delete", err.Error()) + } + + req, err := client.DeletePreparer(ctx, resourceGroupName, workspaceName, SQLPoolName, ruleID, baselineName) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolVulnerabilityAssessmentRuleBaselinesClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "synapse.SQLPoolVulnerabilityAssessmentRuleBaselinesClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolVulnerabilityAssessmentRuleBaselinesClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client SQLPoolVulnerabilityAssessmentRuleBaselinesClient) DeletePreparer(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string, ruleID string, baselineName VulnerabilityAssessmentPolicyBaselineName) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "baselineName": autorest.Encode("path", baselineName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "ruleId": autorest.Encode("path", ruleID), + "sqlPoolName": autorest.Encode("path", SQLPoolName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vulnerabilityAssessmentName": autorest.Encode("path", "default"), + "workspaceName": autorest.Encode("path", workspaceName), + } + + const APIVersion = "2019-06-01-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools/{sqlPoolName}/vulnerabilityAssessments/{vulnerabilityAssessmentName}/rules/{ruleId}/baselines/{baselineName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client SQLPoolVulnerabilityAssessmentRuleBaselinesClient) DeleteSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client SQLPoolVulnerabilityAssessmentRuleBaselinesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} diff --git a/services/preview/synapse/mgmt/2019-06-01-preview/synapse/sqlpoolvulnerabilityassessments.go b/services/preview/synapse/mgmt/2019-06-01-preview/synapse/sqlpoolvulnerabilityassessments.go new file mode 100644 index 000000000000..e3cbdccec27c --- /dev/null +++ b/services/preview/synapse/mgmt/2019-06-01-preview/synapse/sqlpoolvulnerabilityassessments.go @@ -0,0 +1,439 @@ +package synapse + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// SQLPoolVulnerabilityAssessmentsClient is the azure Synapse Analytics Management Client +type SQLPoolVulnerabilityAssessmentsClient struct { + BaseClient +} + +// NewSQLPoolVulnerabilityAssessmentsClient creates an instance of the SQLPoolVulnerabilityAssessmentsClient client. +func NewSQLPoolVulnerabilityAssessmentsClient(subscriptionID string) SQLPoolVulnerabilityAssessmentsClient { + return NewSQLPoolVulnerabilityAssessmentsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewSQLPoolVulnerabilityAssessmentsClientWithBaseURI creates an instance of the SQLPoolVulnerabilityAssessmentsClient +// client using a custom endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI +// (sovereign clouds, Azure stack). +func NewSQLPoolVulnerabilityAssessmentsClientWithBaseURI(baseURI string, subscriptionID string) SQLPoolVulnerabilityAssessmentsClient { + return SQLPoolVulnerabilityAssessmentsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates the Sql pool vulnerability assessment. +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// workspaceName - the name of the workspace +// SQLPoolName - SQL pool name +// parameters - the requested resource. +func (client SQLPoolVulnerabilityAssessmentsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string, parameters SQLPoolVulnerabilityAssessment) (result SQLPoolVulnerabilityAssessment, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SQLPoolVulnerabilityAssessmentsClient.CreateOrUpdate") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("synapse.SQLPoolVulnerabilityAssessmentsClient", "CreateOrUpdate", err.Error()) + } + + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, workspaceName, SQLPoolName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolVulnerabilityAssessmentsClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "synapse.SQLPoolVulnerabilityAssessmentsClient", "CreateOrUpdate", resp, "Failure sending request") + return + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolVulnerabilityAssessmentsClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client SQLPoolVulnerabilityAssessmentsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string, parameters SQLPoolVulnerabilityAssessment) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "sqlPoolName": autorest.Encode("path", SQLPoolName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vulnerabilityAssessmentName": autorest.Encode("path", "default"), + "workspaceName": autorest.Encode("path", workspaceName), + } + + const APIVersion = "2019-06-01-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools/{sqlPoolName}/vulnerabilityAssessments/{vulnerabilityAssessmentName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client SQLPoolVulnerabilityAssessmentsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client SQLPoolVulnerabilityAssessmentsClient) CreateOrUpdateResponder(resp *http.Response) (result SQLPoolVulnerabilityAssessment, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete removes the database's vulnerability assessment. +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// workspaceName - the name of the workspace +// SQLPoolName - SQL pool name +func (client SQLPoolVulnerabilityAssessmentsClient) Delete(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SQLPoolVulnerabilityAssessmentsClient.Delete") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("synapse.SQLPoolVulnerabilityAssessmentsClient", "Delete", err.Error()) + } + + req, err := client.DeletePreparer(ctx, resourceGroupName, workspaceName, SQLPoolName) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolVulnerabilityAssessmentsClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "synapse.SQLPoolVulnerabilityAssessmentsClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolVulnerabilityAssessmentsClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client SQLPoolVulnerabilityAssessmentsClient) DeletePreparer(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "sqlPoolName": autorest.Encode("path", SQLPoolName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vulnerabilityAssessmentName": autorest.Encode("path", "default"), + "workspaceName": autorest.Encode("path", workspaceName), + } + + const APIVersion = "2019-06-01-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools/{sqlPoolName}/vulnerabilityAssessments/{vulnerabilityAssessmentName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client SQLPoolVulnerabilityAssessmentsClient) DeleteSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client SQLPoolVulnerabilityAssessmentsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets the Sql pool's vulnerability assessment. +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// workspaceName - the name of the workspace +// SQLPoolName - SQL pool name +func (client SQLPoolVulnerabilityAssessmentsClient) Get(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string) (result SQLPoolVulnerabilityAssessment, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SQLPoolVulnerabilityAssessmentsClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("synapse.SQLPoolVulnerabilityAssessmentsClient", "Get", err.Error()) + } + + req, err := client.GetPreparer(ctx, resourceGroupName, workspaceName, SQLPoolName) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolVulnerabilityAssessmentsClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "synapse.SQLPoolVulnerabilityAssessmentsClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolVulnerabilityAssessmentsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client SQLPoolVulnerabilityAssessmentsClient) GetPreparer(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "sqlPoolName": autorest.Encode("path", SQLPoolName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vulnerabilityAssessmentName": autorest.Encode("path", "default"), + "workspaceName": autorest.Encode("path", workspaceName), + } + + const APIVersion = "2019-06-01-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools/{sqlPoolName}/vulnerabilityAssessments/{vulnerabilityAssessmentName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client SQLPoolVulnerabilityAssessmentsClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client SQLPoolVulnerabilityAssessmentsClient) GetResponder(resp *http.Response) (result SQLPoolVulnerabilityAssessment, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List lists the vulnerability assessment policies associated with a SQL pool. +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// workspaceName - the name of the workspace +// SQLPoolName - SQL pool name +func (client SQLPoolVulnerabilityAssessmentsClient) List(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string) (result SQLPoolVulnerabilityAssessmentListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SQLPoolVulnerabilityAssessmentsClient.List") + defer func() { + sc := -1 + if result.spvalr.Response.Response != nil { + sc = result.spvalr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("synapse.SQLPoolVulnerabilityAssessmentsClient", "List", err.Error()) + } + + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, resourceGroupName, workspaceName, SQLPoolName) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolVulnerabilityAssessmentsClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.spvalr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "synapse.SQLPoolVulnerabilityAssessmentsClient", "List", resp, "Failure sending request") + return + } + + result.spvalr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolVulnerabilityAssessmentsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client SQLPoolVulnerabilityAssessmentsClient) ListPreparer(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "sqlPoolName": autorest.Encode("path", SQLPoolName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "workspaceName": autorest.Encode("path", workspaceName), + } + + const APIVersion = "2019-06-01-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools/{sqlPoolName}/vulnerabilityAssessments", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client SQLPoolVulnerabilityAssessmentsClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client SQLPoolVulnerabilityAssessmentsClient) ListResponder(resp *http.Response) (result SQLPoolVulnerabilityAssessmentListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client SQLPoolVulnerabilityAssessmentsClient) listNextResults(ctx context.Context, lastResults SQLPoolVulnerabilityAssessmentListResult) (result SQLPoolVulnerabilityAssessmentListResult, err error) { + req, err := lastResults.sQLPoolVulnerabilityAssessmentListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "synapse.SQLPoolVulnerabilityAssessmentsClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "synapse.SQLPoolVulnerabilityAssessmentsClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolVulnerabilityAssessmentsClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client SQLPoolVulnerabilityAssessmentsClient) ListComplete(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string) (result SQLPoolVulnerabilityAssessmentListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SQLPoolVulnerabilityAssessmentsClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx, resourceGroupName, workspaceName, SQLPoolName) + return +} diff --git a/services/preview/synapse/mgmt/2019-06-01-preview/synapse/sqlpoolvulnerabilityassessmentscans.go b/services/preview/synapse/mgmt/2019-06-01-preview/synapse/sqlpoolvulnerabilityassessmentscans.go new file mode 100644 index 000000000000..0200438ed8fa --- /dev/null +++ b/services/preview/synapse/mgmt/2019-06-01-preview/synapse/sqlpoolvulnerabilityassessmentscans.go @@ -0,0 +1,353 @@ +package synapse + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// SQLPoolVulnerabilityAssessmentScansClient is the azure Synapse Analytics Management Client +type SQLPoolVulnerabilityAssessmentScansClient struct { + BaseClient +} + +// NewSQLPoolVulnerabilityAssessmentScansClient creates an instance of the SQLPoolVulnerabilityAssessmentScansClient +// client. +func NewSQLPoolVulnerabilityAssessmentScansClient(subscriptionID string) SQLPoolVulnerabilityAssessmentScansClient { + return NewSQLPoolVulnerabilityAssessmentScansClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewSQLPoolVulnerabilityAssessmentScansClientWithBaseURI creates an instance of the +// SQLPoolVulnerabilityAssessmentScansClient client using a custom endpoint. Use this when interacting with an Azure +// cloud that uses a non-standard base URI (sovereign clouds, Azure stack). +func NewSQLPoolVulnerabilityAssessmentScansClientWithBaseURI(baseURI string, subscriptionID string) SQLPoolVulnerabilityAssessmentScansClient { + return SQLPoolVulnerabilityAssessmentScansClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Export convert an existing scan result to a human readable format. If already exists nothing happens +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// workspaceName - the name of the workspace +// SQLPoolName - SQL pool name +// scanID - the vulnerability assessment scan Id of the scan to retrieve. +func (client SQLPoolVulnerabilityAssessmentScansClient) Export(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string, scanID string) (result SQLPoolVulnerabilityAssessmentScansExport, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SQLPoolVulnerabilityAssessmentScansClient.Export") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("synapse.SQLPoolVulnerabilityAssessmentScansClient", "Export", err.Error()) + } + + req, err := client.ExportPreparer(ctx, resourceGroupName, workspaceName, SQLPoolName, scanID) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolVulnerabilityAssessmentScansClient", "Export", nil, "Failure preparing request") + return + } + + resp, err := client.ExportSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "synapse.SQLPoolVulnerabilityAssessmentScansClient", "Export", resp, "Failure sending request") + return + } + + result, err = client.ExportResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolVulnerabilityAssessmentScansClient", "Export", resp, "Failure responding to request") + } + + return +} + +// ExportPreparer prepares the Export request. +func (client SQLPoolVulnerabilityAssessmentScansClient) ExportPreparer(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string, scanID string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "scanId": autorest.Encode("path", scanID), + "sqlPoolName": autorest.Encode("path", SQLPoolName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vulnerabilityAssessmentName": autorest.Encode("path", "default"), + "workspaceName": autorest.Encode("path", workspaceName), + } + + const APIVersion = "2019-06-01-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools/{sqlPoolName}/vulnerabilityAssessments/{vulnerabilityAssessmentName}/scans/{scanId}/export", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ExportSender sends the Export request. The method will close the +// http.Response Body if it receives an error. +func (client SQLPoolVulnerabilityAssessmentScansClient) ExportSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ExportResponder handles the response to the Export request. The method always +// closes the http.Response Body. +func (client SQLPoolVulnerabilityAssessmentScansClient) ExportResponder(resp *http.Response) (result SQLPoolVulnerabilityAssessmentScansExport, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// InitiateScan executes a Vulnerability Assessment database scan. +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// workspaceName - the name of the workspace +// SQLPoolName - SQL pool name +// scanID - the vulnerability assessment scan Id of the scan to retrieve. +func (client SQLPoolVulnerabilityAssessmentScansClient) InitiateScan(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string, scanID string) (result SQLPoolVulnerabilityAssessmentScansInitiateScanFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SQLPoolVulnerabilityAssessmentScansClient.InitiateScan") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("synapse.SQLPoolVulnerabilityAssessmentScansClient", "InitiateScan", err.Error()) + } + + req, err := client.InitiateScanPreparer(ctx, resourceGroupName, workspaceName, SQLPoolName, scanID) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolVulnerabilityAssessmentScansClient", "InitiateScan", nil, "Failure preparing request") + return + } + + result, err = client.InitiateScanSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolVulnerabilityAssessmentScansClient", "InitiateScan", result.Response(), "Failure sending request") + return + } + + return +} + +// InitiateScanPreparer prepares the InitiateScan request. +func (client SQLPoolVulnerabilityAssessmentScansClient) InitiateScanPreparer(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string, scanID string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "scanId": autorest.Encode("path", scanID), + "sqlPoolName": autorest.Encode("path", SQLPoolName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vulnerabilityAssessmentName": autorest.Encode("path", "default"), + "workspaceName": autorest.Encode("path", workspaceName), + } + + const APIVersion = "2019-06-01-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools/{sqlPoolName}/vulnerabilityAssessments/{vulnerabilityAssessmentName}/scans/{scanId}/initiateScan", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// InitiateScanSender sends the InitiateScan request. The method will close the +// http.Response Body if it receives an error. +func (client SQLPoolVulnerabilityAssessmentScansClient) InitiateScanSender(req *http.Request) (future SQLPoolVulnerabilityAssessmentScansInitiateScanFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// InitiateScanResponder handles the response to the InitiateScan request. The method always +// closes the http.Response Body. +func (client SQLPoolVulnerabilityAssessmentScansClient) InitiateScanResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// List lists the vulnerability assessment scans of a SQL pool. +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// workspaceName - the name of the workspace +// SQLPoolName - SQL pool name +func (client SQLPoolVulnerabilityAssessmentScansClient) List(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string) (result VulnerabilityAssessmentScanRecordListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SQLPoolVulnerabilityAssessmentScansClient.List") + defer func() { + sc := -1 + if result.vasrlr.Response.Response != nil { + sc = result.vasrlr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("synapse.SQLPoolVulnerabilityAssessmentScansClient", "List", err.Error()) + } + + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, resourceGroupName, workspaceName, SQLPoolName) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolVulnerabilityAssessmentScansClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.vasrlr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "synapse.SQLPoolVulnerabilityAssessmentScansClient", "List", resp, "Failure sending request") + return + } + + result.vasrlr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolVulnerabilityAssessmentScansClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client SQLPoolVulnerabilityAssessmentScansClient) ListPreparer(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "sqlPoolName": autorest.Encode("path", SQLPoolName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vulnerabilityAssessmentName": autorest.Encode("path", "default"), + "workspaceName": autorest.Encode("path", workspaceName), + } + + const APIVersion = "2019-06-01-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools/{sqlPoolName}/vulnerabilityAssessments/{vulnerabilityAssessmentName}/scans", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client SQLPoolVulnerabilityAssessmentScansClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client SQLPoolVulnerabilityAssessmentScansClient) ListResponder(resp *http.Response) (result VulnerabilityAssessmentScanRecordListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client SQLPoolVulnerabilityAssessmentScansClient) listNextResults(ctx context.Context, lastResults VulnerabilityAssessmentScanRecordListResult) (result VulnerabilityAssessmentScanRecordListResult, err error) { + req, err := lastResults.vulnerabilityAssessmentScanRecordListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "synapse.SQLPoolVulnerabilityAssessmentScansClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "synapse.SQLPoolVulnerabilityAssessmentScansClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.SQLPoolVulnerabilityAssessmentScansClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client SQLPoolVulnerabilityAssessmentScansClient) ListComplete(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string) (result VulnerabilityAssessmentScanRecordListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SQLPoolVulnerabilityAssessmentScansClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx, resourceGroupName, workspaceName, SQLPoolName) + return +} diff --git a/services/preview/synapse/mgmt/2019-06-01-preview/synapse/synapseapi/interfaces.go b/services/preview/synapse/mgmt/2019-06-01-preview/synapse/synapseapi/interfaces.go new file mode 100644 index 000000000000..dbaeecc06738 --- /dev/null +++ b/services/preview/synapse/mgmt/2019-06-01-preview/synapse/synapseapi/interfaces.go @@ -0,0 +1,263 @@ +package synapseapi + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/azure-sdk-for-go/services/preview/synapse/mgmt/2019-06-01-preview/synapse" + "github.com/Azure/go-autorest/autorest" +) + +// BigDataPoolsClientAPI contains the set of methods on the BigDataPoolsClient type. +type BigDataPoolsClientAPI interface { + CreateOrUpdate(ctx context.Context, resourceGroupName string, workspaceName string, bigDataPoolName string, bigDataPoolInfo synapse.BigDataPoolResourceInfo, force *bool) (result synapse.BigDataPoolsCreateOrUpdateFuture, err error) + Delete(ctx context.Context, resourceGroupName string, workspaceName string, bigDataPoolName string) (result synapse.BigDataPoolsDeleteFuture, err error) + Get(ctx context.Context, resourceGroupName string, workspaceName string, bigDataPoolName string) (result synapse.BigDataPoolResourceInfo, err error) + ListByWorkspace(ctx context.Context, resourceGroupName string, workspaceName string) (result synapse.BigDataPoolResourceInfoListResultPage, err error) + ListByWorkspaceComplete(ctx context.Context, resourceGroupName string, workspaceName string) (result synapse.BigDataPoolResourceInfoListResultIterator, err error) + Update(ctx context.Context, resourceGroupName string, workspaceName string, bigDataPoolName string, bigDataPoolPatchInfo synapse.BigDataPoolPatchInfo) (result synapse.BigDataPoolResourceInfo, err error) +} + +var _ BigDataPoolsClientAPI = (*synapse.BigDataPoolsClient)(nil) + +// OperationsClientAPI contains the set of methods on the OperationsClient type. +type OperationsClientAPI interface { + CheckNameAvailability(ctx context.Context, request synapse.CheckNameAvailabilityRequest) (result synapse.CheckNameAvailabilityResponse, err error) + GetAzureAsyncHeaderResult(ctx context.Context, resourceGroupName string, workspaceName string, operationID string) (result synapse.SetObject, err error) + GetLocationHeaderResult(ctx context.Context, resourceGroupName string, workspaceName string, operationID string) (result autorest.Response, err error) + List(ctx context.Context) (result synapse.ListAvailableRpOperation, err error) +} + +var _ OperationsClientAPI = (*synapse.OperationsClient)(nil) + +// IPFirewallRulesClientAPI contains the set of methods on the IPFirewallRulesClient type. +type IPFirewallRulesClientAPI interface { + CreateOrUpdate(ctx context.Context, resourceGroupName string, workspaceName string, ruleName string, IPFirewallRuleInfo synapse.IPFirewallRuleInfo) (result synapse.IPFirewallRulesCreateOrUpdateFuture, err error) + Delete(ctx context.Context, resourceGroupName string, workspaceName string, ruleName string) (result synapse.IPFirewallRulesDeleteFuture, err error) + ListByWorkspace(ctx context.Context, resourceGroupName string, workspaceName string) (result synapse.IPFirewallRuleInfoListResultPage, err error) + ListByWorkspaceComplete(ctx context.Context, resourceGroupName string, workspaceName string) (result synapse.IPFirewallRuleInfoListResultIterator, err error) + ReplaceAll(ctx context.Context, resourceGroupName string, workspaceName string, request synapse.ReplaceAllIPFirewallRulesRequest) (result synapse.IPFirewallRulesReplaceAllFuture, err error) +} + +var _ IPFirewallRulesClientAPI = (*synapse.IPFirewallRulesClient)(nil) + +// SQLPoolsClientAPI contains the set of methods on the SQLPoolsClient type. +type SQLPoolsClientAPI interface { + Create(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string, SQLPoolInfo synapse.SQLPool) (result synapse.SQLPoolsCreateFuture, err error) + Delete(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string) (result synapse.SQLPoolsDeleteFuture, err error) + Get(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string) (result synapse.SQLPool, err error) + ListByWorkspace(ctx context.Context, resourceGroupName string, workspaceName string) (result synapse.SQLPoolInfoListResultPage, err error) + ListByWorkspaceComplete(ctx context.Context, resourceGroupName string, workspaceName string) (result synapse.SQLPoolInfoListResultIterator, err error) + Pause(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string) (result synapse.SQLPoolsPauseFuture, err error) + Rename(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string, parameters synapse.ResourceMoveDefinition) (result autorest.Response, err error) + Resume(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string) (result synapse.SQLPoolsResumeFuture, err error) + Update(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string, SQLPoolInfo synapse.SQLPoolPatchInfo) (result synapse.SQLPool, err error) +} + +var _ SQLPoolsClientAPI = (*synapse.SQLPoolsClient)(nil) + +// SQLPoolMetadataSyncConfigsClientAPI contains the set of methods on the SQLPoolMetadataSyncConfigsClient type. +type SQLPoolMetadataSyncConfigsClientAPI interface { + Create(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string, metadataSyncConfiguration synapse.MetadataSyncConfig) (result synapse.MetadataSyncConfig, err error) + Get(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string) (result synapse.MetadataSyncConfig, err error) +} + +var _ SQLPoolMetadataSyncConfigsClientAPI = (*synapse.SQLPoolMetadataSyncConfigsClient)(nil) + +// SQLPoolOperationResultsClientAPI contains the set of methods on the SQLPoolOperationResultsClient type. +type SQLPoolOperationResultsClientAPI interface { + GetLocationHeaderResult(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string, operationID string) (result synapse.SetObject, err error) +} + +var _ SQLPoolOperationResultsClientAPI = (*synapse.SQLPoolOperationResultsClient)(nil) + +// SQLPoolGeoBackupPoliciesClientAPI contains the set of methods on the SQLPoolGeoBackupPoliciesClient type. +type SQLPoolGeoBackupPoliciesClientAPI interface { + Get(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string) (result synapse.GeoBackupPolicy, err error) +} + +var _ SQLPoolGeoBackupPoliciesClientAPI = (*synapse.SQLPoolGeoBackupPoliciesClient)(nil) + +// SQLPoolDataWarehouseUserActivitiesClientAPI contains the set of methods on the SQLPoolDataWarehouseUserActivitiesClient type. +type SQLPoolDataWarehouseUserActivitiesClientAPI interface { + Get(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string) (result synapse.DataWarehouseUserActivities, err error) +} + +var _ SQLPoolDataWarehouseUserActivitiesClientAPI = (*synapse.SQLPoolDataWarehouseUserActivitiesClient)(nil) + +// SQLPoolRestorePointsClientAPI contains the set of methods on the SQLPoolRestorePointsClient type. +type SQLPoolRestorePointsClientAPI interface { + Create(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string, parameters synapse.CreateSQLPoolRestorePointDefinition) (result synapse.SQLPoolRestorePointsCreateFuture, err error) + List(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string) (result synapse.RestorePointListResultPage, err error) + ListComplete(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string) (result synapse.RestorePointListResultIterator, err error) +} + +var _ SQLPoolRestorePointsClientAPI = (*synapse.SQLPoolRestorePointsClient)(nil) + +// SQLPoolReplicationLinksClientAPI contains the set of methods on the SQLPoolReplicationLinksClient type. +type SQLPoolReplicationLinksClientAPI interface { + List(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string) (result synapse.ReplicationLinkListResultPage, err error) + ListComplete(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string) (result synapse.ReplicationLinkListResultIterator, err error) +} + +var _ SQLPoolReplicationLinksClientAPI = (*synapse.SQLPoolReplicationLinksClient)(nil) + +// SQLPoolTransparentDataEncryptionsClientAPI contains the set of methods on the SQLPoolTransparentDataEncryptionsClient type. +type SQLPoolTransparentDataEncryptionsClientAPI interface { + CreateOrUpdate(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string, parameters synapse.TransparentDataEncryption) (result synapse.TransparentDataEncryption, err error) + Get(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string) (result synapse.TransparentDataEncryption, err error) +} + +var _ SQLPoolTransparentDataEncryptionsClientAPI = (*synapse.SQLPoolTransparentDataEncryptionsClient)(nil) + +// SQLPoolBlobAuditingPoliciesClientAPI contains the set of methods on the SQLPoolBlobAuditingPoliciesClient type. +type SQLPoolBlobAuditingPoliciesClientAPI interface { + CreateOrUpdate(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string, parameters synapse.SQLPoolBlobAuditingPolicy) (result synapse.SQLPoolBlobAuditingPolicy, err error) + Get(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string) (result synapse.SQLPoolBlobAuditingPolicy, err error) +} + +var _ SQLPoolBlobAuditingPoliciesClientAPI = (*synapse.SQLPoolBlobAuditingPoliciesClient)(nil) + +// SQLPoolOperationsClientAPI contains the set of methods on the SQLPoolOperationsClient type. +type SQLPoolOperationsClientAPI interface { + List(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string) (result synapse.SQLPoolBlobAuditingPolicySQLPoolOperationListResultPage, err error) + ListComplete(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string) (result synapse.SQLPoolBlobAuditingPolicySQLPoolOperationListResultIterator, err error) +} + +var _ SQLPoolOperationsClientAPI = (*synapse.SQLPoolOperationsClient)(nil) + +// SQLPoolUsagesClientAPI contains the set of methods on the SQLPoolUsagesClient type. +type SQLPoolUsagesClientAPI interface { + List(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string) (result synapse.SQLPoolUsageListResultPage, err error) + ListComplete(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string) (result synapse.SQLPoolUsageListResultIterator, err error) +} + +var _ SQLPoolUsagesClientAPI = (*synapse.SQLPoolUsagesClient)(nil) + +// SQLPoolSensitivityLabelsClientAPI contains the set of methods on the SQLPoolSensitivityLabelsClient type. +type SQLPoolSensitivityLabelsClientAPI interface { + CreateOrUpdate(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string, schemaName string, tableName string, columnName string, parameters synapse.SensitivityLabel) (result synapse.SensitivityLabel, err error) + Delete(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string, schemaName string, tableName string, columnName string) (result autorest.Response, err error) + DisableRecommendation(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string, schemaName string, tableName string, columnName string) (result autorest.Response, err error) + EnableRecommendation(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string, schemaName string, tableName string, columnName string) (result autorest.Response, err error) + ListCurrent(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string, filter string) (result synapse.SensitivityLabelListResultPage, err error) + ListCurrentComplete(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string, filter string) (result synapse.SensitivityLabelListResultIterator, err error) + ListRecommended(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string, includeDisabledRecommendations *bool, skipToken string, filter string) (result synapse.SensitivityLabelListResultPage, err error) + ListRecommendedComplete(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string, includeDisabledRecommendations *bool, skipToken string, filter string) (result synapse.SensitivityLabelListResultIterator, err error) +} + +var _ SQLPoolSensitivityLabelsClientAPI = (*synapse.SQLPoolSensitivityLabelsClient)(nil) + +// SQLPoolSchemasClientAPI contains the set of methods on the SQLPoolSchemasClient type. +type SQLPoolSchemasClientAPI interface { + List(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string, filter string) (result synapse.SQLPoolSchemaListResultPage, err error) + ListComplete(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string, filter string) (result synapse.SQLPoolSchemaListResultIterator, err error) +} + +var _ SQLPoolSchemasClientAPI = (*synapse.SQLPoolSchemasClient)(nil) + +// SQLPoolTablesClientAPI contains the set of methods on the SQLPoolTablesClient type. +type SQLPoolTablesClientAPI interface { + ListBySchema(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string, schemaName string, filter string) (result synapse.SQLPoolTableListResultPage, err error) + ListBySchemaComplete(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string, schemaName string, filter string) (result synapse.SQLPoolTableListResultIterator, err error) +} + +var _ SQLPoolTablesClientAPI = (*synapse.SQLPoolTablesClient)(nil) + +// SQLPoolTableColumnsClientAPI contains the set of methods on the SQLPoolTableColumnsClient type. +type SQLPoolTableColumnsClientAPI interface { + ListByTableName(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string, schemaName string, tableName string, filter string) (result synapse.SQLPoolColumnListResultPage, err error) + ListByTableNameComplete(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string, schemaName string, tableName string, filter string) (result synapse.SQLPoolColumnListResultIterator, err error) +} + +var _ SQLPoolTableColumnsClientAPI = (*synapse.SQLPoolTableColumnsClient)(nil) + +// SQLPoolConnectionPoliciesClientAPI contains the set of methods on the SQLPoolConnectionPoliciesClient type. +type SQLPoolConnectionPoliciesClientAPI interface { + Get(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string) (result synapse.SQLPoolConnectionPolicy, err error) +} + +var _ SQLPoolConnectionPoliciesClientAPI = (*synapse.SQLPoolConnectionPoliciesClient)(nil) + +// SQLPoolVulnerabilityAssessmentsClientAPI contains the set of methods on the SQLPoolVulnerabilityAssessmentsClient type. +type SQLPoolVulnerabilityAssessmentsClientAPI interface { + CreateOrUpdate(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string, parameters synapse.SQLPoolVulnerabilityAssessment) (result synapse.SQLPoolVulnerabilityAssessment, err error) + Delete(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string) (result autorest.Response, err error) + Get(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string) (result synapse.SQLPoolVulnerabilityAssessment, err error) + List(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string) (result synapse.SQLPoolVulnerabilityAssessmentListResultPage, err error) + ListComplete(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string) (result synapse.SQLPoolVulnerabilityAssessmentListResultIterator, err error) +} + +var _ SQLPoolVulnerabilityAssessmentsClientAPI = (*synapse.SQLPoolVulnerabilityAssessmentsClient)(nil) + +// SQLPoolVulnerabilityAssessmentScansClientAPI contains the set of methods on the SQLPoolVulnerabilityAssessmentScansClient type. +type SQLPoolVulnerabilityAssessmentScansClientAPI interface { + Export(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string, scanID string) (result synapse.SQLPoolVulnerabilityAssessmentScansExport, err error) + InitiateScan(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string, scanID string) (result synapse.SQLPoolVulnerabilityAssessmentScansInitiateScanFuture, err error) + List(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string) (result synapse.VulnerabilityAssessmentScanRecordListResultPage, err error) + ListComplete(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string) (result synapse.VulnerabilityAssessmentScanRecordListResultIterator, err error) +} + +var _ SQLPoolVulnerabilityAssessmentScansClientAPI = (*synapse.SQLPoolVulnerabilityAssessmentScansClient)(nil) + +// SQLPoolSecurityAlertPoliciesClientAPI contains the set of methods on the SQLPoolSecurityAlertPoliciesClient type. +type SQLPoolSecurityAlertPoliciesClientAPI interface { + CreateOrUpdate(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string, parameters synapse.SQLPoolSecurityAlertPolicy) (result synapse.SQLPoolSecurityAlertPolicy, err error) + Get(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string) (result synapse.SQLPoolSecurityAlertPolicy, err error) +} + +var _ SQLPoolSecurityAlertPoliciesClientAPI = (*synapse.SQLPoolSecurityAlertPoliciesClient)(nil) + +// SQLPoolVulnerabilityAssessmentRuleBaselinesClientAPI contains the set of methods on the SQLPoolVulnerabilityAssessmentRuleBaselinesClient type. +type SQLPoolVulnerabilityAssessmentRuleBaselinesClientAPI interface { + CreateOrUpdate(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string, ruleID string, baselineName synapse.VulnerabilityAssessmentPolicyBaselineName, parameters synapse.SQLPoolVulnerabilityAssessmentRuleBaseline) (result synapse.SQLPoolVulnerabilityAssessmentRuleBaseline, err error) + Delete(ctx context.Context, resourceGroupName string, workspaceName string, SQLPoolName string, ruleID string, baselineName synapse.VulnerabilityAssessmentPolicyBaselineName) (result autorest.Response, err error) +} + +var _ SQLPoolVulnerabilityAssessmentRuleBaselinesClientAPI = (*synapse.SQLPoolVulnerabilityAssessmentRuleBaselinesClient)(nil) + +// WorkspacesClientAPI contains the set of methods on the WorkspacesClient type. +type WorkspacesClientAPI interface { + CreateOrUpdate(ctx context.Context, resourceGroupName string, workspaceName string, workspaceInfo synapse.Workspace) (result synapse.WorkspacesCreateOrUpdateFuture, err error) + Delete(ctx context.Context, resourceGroupName string, workspaceName string) (result synapse.WorkspacesDeleteFuture, err error) + Get(ctx context.Context, resourceGroupName string, workspaceName string) (result synapse.Workspace, err error) + List(ctx context.Context) (result synapse.WorkspaceInfoListResultPage, err error) + ListComplete(ctx context.Context) (result synapse.WorkspaceInfoListResultIterator, err error) + ListByResourceGroup(ctx context.Context, resourceGroupName string) (result synapse.WorkspaceInfoListResultPage, err error) + ListByResourceGroupComplete(ctx context.Context, resourceGroupName string) (result synapse.WorkspaceInfoListResultIterator, err error) + Update(ctx context.Context, resourceGroupName string, workspaceName string, workspacePatchInfo synapse.WorkspacePatchInfo) (result synapse.WorkspacesUpdateFuture, err error) +} + +var _ WorkspacesClientAPI = (*synapse.WorkspacesClient)(nil) + +// WorkspaceAadAdminsClientAPI contains the set of methods on the WorkspaceAadAdminsClient type. +type WorkspaceAadAdminsClientAPI interface { + CreateOrUpdate(ctx context.Context, resourceGroupName string, workspaceName string, aadAdminInfo synapse.WorkspaceAadAdminInfo) (result synapse.WorkspaceAadAdminsCreateOrUpdateFuture, err error) + Delete(ctx context.Context, resourceGroupName string, workspaceName string) (result synapse.WorkspaceAadAdminsDeleteFuture, err error) + Get(ctx context.Context, resourceGroupName string, workspaceName string) (result synapse.WorkspaceAadAdminInfo, err error) +} + +var _ WorkspaceAadAdminsClientAPI = (*synapse.WorkspaceAadAdminsClient)(nil) + +// WorkspaceManagedIdentitySQLControlSettingsClientAPI contains the set of methods on the WorkspaceManagedIdentitySQLControlSettingsClient type. +type WorkspaceManagedIdentitySQLControlSettingsClientAPI interface { + CreateOrUpdate(ctx context.Context, resourceGroupName string, workspaceName string, managedIdentitySQLControlSettings synapse.ManagedIdentitySQLControlSettingsModel) (result synapse.ManagedIdentitySQLControlSettingsModel, err error) + Get(ctx context.Context, resourceGroupName string, workspaceName string) (result synapse.ManagedIdentitySQLControlSettingsModel, err error) +} + +var _ WorkspaceManagedIdentitySQLControlSettingsClientAPI = (*synapse.WorkspaceManagedIdentitySQLControlSettingsClient)(nil) diff --git a/services/preview/synapse/mgmt/2019-06-01-preview/synapse/version.go b/services/preview/synapse/mgmt/2019-06-01-preview/synapse/version.go new file mode 100644 index 000000000000..239b58388afd --- /dev/null +++ b/services/preview/synapse/mgmt/2019-06-01-preview/synapse/version.go @@ -0,0 +1,30 @@ +package synapse + +import "github.com/Azure/azure-sdk-for-go/version" + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +// UserAgent returns the UserAgent string to use when sending http.Requests. +func UserAgent() string { + return "Azure-SDK-For-Go/" + version.Number + " synapse/2019-06-01-preview" +} + +// Version returns the semantic version (see http://semver.org) of the client. +func Version() string { + return version.Number +} diff --git a/services/preview/synapse/mgmt/2019-06-01-preview/synapse/workspaceaadadmins.go b/services/preview/synapse/mgmt/2019-06-01-preview/synapse/workspaceaadadmins.go new file mode 100644 index 000000000000..1e1bb1f819f0 --- /dev/null +++ b/services/preview/synapse/mgmt/2019-06-01-preview/synapse/workspaceaadadmins.go @@ -0,0 +1,304 @@ +package synapse + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// WorkspaceAadAdminsClient is the azure Synapse Analytics Management Client +type WorkspaceAadAdminsClient struct { + BaseClient +} + +// NewWorkspaceAadAdminsClient creates an instance of the WorkspaceAadAdminsClient client. +func NewWorkspaceAadAdminsClient(subscriptionID string) WorkspaceAadAdminsClient { + return NewWorkspaceAadAdminsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewWorkspaceAadAdminsClientWithBaseURI creates an instance of the WorkspaceAadAdminsClient client using a custom +// endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure +// stack). +func NewWorkspaceAadAdminsClientWithBaseURI(baseURI string, subscriptionID string) WorkspaceAadAdminsClient { + return WorkspaceAadAdminsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates a workspace active directory admin +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// workspaceName - the name of the workspace +// aadAdminInfo - workspace active directory administrator properties +func (client WorkspaceAadAdminsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, workspaceName string, aadAdminInfo WorkspaceAadAdminInfo) (result WorkspaceAadAdminsCreateOrUpdateFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/WorkspaceAadAdminsClient.CreateOrUpdate") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("synapse.WorkspaceAadAdminsClient", "CreateOrUpdate", err.Error()) + } + + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, workspaceName, aadAdminInfo) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.WorkspaceAadAdminsClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + result, err = client.CreateOrUpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.WorkspaceAadAdminsClient", "CreateOrUpdate", result.Response(), "Failure sending request") + return + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client WorkspaceAadAdminsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, workspaceName string, aadAdminInfo WorkspaceAadAdminInfo) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "workspaceName": autorest.Encode("path", workspaceName), + } + + const APIVersion = "2019-06-01-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/administrators/activeDirectory", pathParameters), + autorest.WithJSON(aadAdminInfo), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client WorkspaceAadAdminsClient) CreateOrUpdateSender(req *http.Request) (future WorkspaceAadAdminsCreateOrUpdateFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client WorkspaceAadAdminsClient) CreateOrUpdateResponder(resp *http.Response) (result WorkspaceAadAdminInfo, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes a workspace active directory admin +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// workspaceName - the name of the workspace +func (client WorkspaceAadAdminsClient) Delete(ctx context.Context, resourceGroupName string, workspaceName string) (result WorkspaceAadAdminsDeleteFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/WorkspaceAadAdminsClient.Delete") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("synapse.WorkspaceAadAdminsClient", "Delete", err.Error()) + } + + req, err := client.DeletePreparer(ctx, resourceGroupName, workspaceName) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.WorkspaceAadAdminsClient", "Delete", nil, "Failure preparing request") + return + } + + result, err = client.DeleteSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.WorkspaceAadAdminsClient", "Delete", result.Response(), "Failure sending request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client WorkspaceAadAdminsClient) DeletePreparer(ctx context.Context, resourceGroupName string, workspaceName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "workspaceName": autorest.Encode("path", workspaceName), + } + + const APIVersion = "2019-06-01-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/administrators/activeDirectory", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client WorkspaceAadAdminsClient) DeleteSender(req *http.Request) (future WorkspaceAadAdminsDeleteFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client WorkspaceAadAdminsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets a workspace active directory admin +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// workspaceName - the name of the workspace +func (client WorkspaceAadAdminsClient) Get(ctx context.Context, resourceGroupName string, workspaceName string) (result WorkspaceAadAdminInfo, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/WorkspaceAadAdminsClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("synapse.WorkspaceAadAdminsClient", "Get", err.Error()) + } + + req, err := client.GetPreparer(ctx, resourceGroupName, workspaceName) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.WorkspaceAadAdminsClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "synapse.WorkspaceAadAdminsClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.WorkspaceAadAdminsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client WorkspaceAadAdminsClient) GetPreparer(ctx context.Context, resourceGroupName string, workspaceName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "workspaceName": autorest.Encode("path", workspaceName), + } + + const APIVersion = "2019-06-01-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/administrators/activeDirectory", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client WorkspaceAadAdminsClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client WorkspaceAadAdminsClient) GetResponder(resp *http.Response) (result WorkspaceAadAdminInfo, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/services/preview/synapse/mgmt/2019-06-01-preview/synapse/workspacemanagedidentitysqlcontrolsettings.go b/services/preview/synapse/mgmt/2019-06-01-preview/synapse/workspacemanagedidentitysqlcontrolsettings.go new file mode 100644 index 000000000000..ec129a4553c0 --- /dev/null +++ b/services/preview/synapse/mgmt/2019-06-01-preview/synapse/workspacemanagedidentitysqlcontrolsettings.go @@ -0,0 +1,220 @@ +package synapse + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// WorkspaceManagedIdentitySQLControlSettingsClient is the azure Synapse Analytics Management Client +type WorkspaceManagedIdentitySQLControlSettingsClient struct { + BaseClient +} + +// NewWorkspaceManagedIdentitySQLControlSettingsClient creates an instance of the +// WorkspaceManagedIdentitySQLControlSettingsClient client. +func NewWorkspaceManagedIdentitySQLControlSettingsClient(subscriptionID string) WorkspaceManagedIdentitySQLControlSettingsClient { + return NewWorkspaceManagedIdentitySQLControlSettingsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewWorkspaceManagedIdentitySQLControlSettingsClientWithBaseURI creates an instance of the +// WorkspaceManagedIdentitySQLControlSettingsClient client using a custom endpoint. Use this when interacting with an +// Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). +func NewWorkspaceManagedIdentitySQLControlSettingsClientWithBaseURI(baseURI string, subscriptionID string) WorkspaceManagedIdentitySQLControlSettingsClient { + return WorkspaceManagedIdentitySQLControlSettingsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate sends the create or update request. +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// workspaceName - the name of the workspace +// managedIdentitySQLControlSettings - managed Identity Sql Control Settings +func (client WorkspaceManagedIdentitySQLControlSettingsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, workspaceName string, managedIdentitySQLControlSettings ManagedIdentitySQLControlSettingsModel) (result ManagedIdentitySQLControlSettingsModel, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/WorkspaceManagedIdentitySQLControlSettingsClient.CreateOrUpdate") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("synapse.WorkspaceManagedIdentitySQLControlSettingsClient", "CreateOrUpdate", err.Error()) + } + + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, workspaceName, managedIdentitySQLControlSettings) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.WorkspaceManagedIdentitySQLControlSettingsClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "synapse.WorkspaceManagedIdentitySQLControlSettingsClient", "CreateOrUpdate", resp, "Failure sending request") + return + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.WorkspaceManagedIdentitySQLControlSettingsClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client WorkspaceManagedIdentitySQLControlSettingsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, workspaceName string, managedIdentitySQLControlSettings ManagedIdentitySQLControlSettingsModel) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "workspaceName": autorest.Encode("path", workspaceName), + } + + const APIVersion = "2019-06-01-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/managedIdentitySqlControlSettings/default", pathParameters), + autorest.WithJSON(managedIdentitySQLControlSettings), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client WorkspaceManagedIdentitySQLControlSettingsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client WorkspaceManagedIdentitySQLControlSettingsClient) CreateOrUpdateResponder(resp *http.Response) (result ManagedIdentitySQLControlSettingsModel, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Get sends the get request. +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// workspaceName - the name of the workspace +func (client WorkspaceManagedIdentitySQLControlSettingsClient) Get(ctx context.Context, resourceGroupName string, workspaceName string) (result ManagedIdentitySQLControlSettingsModel, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/WorkspaceManagedIdentitySQLControlSettingsClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("synapse.WorkspaceManagedIdentitySQLControlSettingsClient", "Get", err.Error()) + } + + req, err := client.GetPreparer(ctx, resourceGroupName, workspaceName) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.WorkspaceManagedIdentitySQLControlSettingsClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "synapse.WorkspaceManagedIdentitySQLControlSettingsClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.WorkspaceManagedIdentitySQLControlSettingsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client WorkspaceManagedIdentitySQLControlSettingsClient) GetPreparer(ctx context.Context, resourceGroupName string, workspaceName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "workspaceName": autorest.Encode("path", workspaceName), + } + + const APIVersion = "2019-06-01-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/managedIdentitySqlControlSettings/default", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client WorkspaceManagedIdentitySQLControlSettingsClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client WorkspaceManagedIdentitySQLControlSettingsClient) GetResponder(resp *http.Response) (result ManagedIdentitySQLControlSettingsModel, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/services/preview/synapse/mgmt/2019-06-01-preview/synapse/workspaces.go b/services/preview/synapse/mgmt/2019-06-01-preview/synapse/workspaces.go new file mode 100644 index 000000000000..77f1a8b62566 --- /dev/null +++ b/services/preview/synapse/mgmt/2019-06-01-preview/synapse/workspaces.go @@ -0,0 +1,630 @@ +package synapse + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// WorkspacesClient is the azure Synapse Analytics Management Client +type WorkspacesClient struct { + BaseClient +} + +// NewWorkspacesClient creates an instance of the WorkspacesClient client. +func NewWorkspacesClient(subscriptionID string) WorkspacesClient { + return NewWorkspacesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewWorkspacesClientWithBaseURI creates an instance of the WorkspacesClient client using a custom endpoint. Use this +// when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). +func NewWorkspacesClientWithBaseURI(baseURI string, subscriptionID string) WorkspacesClient { + return WorkspacesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates a workspace +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// workspaceName - the name of the workspace +// workspaceInfo - workspace create or update request properties +func (client WorkspacesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, workspaceName string, workspaceInfo Workspace) (result WorkspacesCreateOrUpdateFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/WorkspacesClient.CreateOrUpdate") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("synapse.WorkspacesClient", "CreateOrUpdate", err.Error()) + } + + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, workspaceName, workspaceInfo) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.WorkspacesClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + result, err = client.CreateOrUpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.WorkspacesClient", "CreateOrUpdate", result.Response(), "Failure sending request") + return + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client WorkspacesClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, workspaceName string, workspaceInfo Workspace) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "workspaceName": autorest.Encode("path", workspaceName), + } + + const APIVersion = "2019-06-01-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}", pathParameters), + autorest.WithJSON(workspaceInfo), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client WorkspacesClient) CreateOrUpdateSender(req *http.Request) (future WorkspacesCreateOrUpdateFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client WorkspacesClient) CreateOrUpdateResponder(resp *http.Response) (result Workspace, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes a workspace +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// workspaceName - the name of the workspace +func (client WorkspacesClient) Delete(ctx context.Context, resourceGroupName string, workspaceName string) (result WorkspacesDeleteFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/WorkspacesClient.Delete") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("synapse.WorkspacesClient", "Delete", err.Error()) + } + + req, err := client.DeletePreparer(ctx, resourceGroupName, workspaceName) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.WorkspacesClient", "Delete", nil, "Failure preparing request") + return + } + + result, err = client.DeleteSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.WorkspacesClient", "Delete", result.Response(), "Failure sending request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client WorkspacesClient) DeletePreparer(ctx context.Context, resourceGroupName string, workspaceName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "workspaceName": autorest.Encode("path", workspaceName), + } + + const APIVersion = "2019-06-01-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client WorkspacesClient) DeleteSender(req *http.Request) (future WorkspacesDeleteFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client WorkspacesClient) DeleteResponder(resp *http.Response) (result SetObject, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Get gets a workspace +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// workspaceName - the name of the workspace +func (client WorkspacesClient) Get(ctx context.Context, resourceGroupName string, workspaceName string) (result Workspace, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/WorkspacesClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("synapse.WorkspacesClient", "Get", err.Error()) + } + + req, err := client.GetPreparer(ctx, resourceGroupName, workspaceName) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.WorkspacesClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "synapse.WorkspacesClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.WorkspacesClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client WorkspacesClient) GetPreparer(ctx context.Context, resourceGroupName string, workspaceName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "workspaceName": autorest.Encode("path", workspaceName), + } + + const APIVersion = "2019-06-01-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client WorkspacesClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client WorkspacesClient) GetResponder(resp *http.Response) (result Workspace, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List returns a list of workspaces in a subscription +func (client WorkspacesClient) List(ctx context.Context) (result WorkspaceInfoListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/WorkspacesClient.List") + defer func() { + sc := -1 + if result.wilr.Response.Response != nil { + sc = result.wilr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("synapse.WorkspacesClient", "List", err.Error()) + } + + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.WorkspacesClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.wilr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "synapse.WorkspacesClient", "List", resp, "Failure sending request") + return + } + + result.wilr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.WorkspacesClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client WorkspacesClient) ListPreparer(ctx context.Context) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-06-01-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Synapse/workspaces", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client WorkspacesClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client WorkspacesClient) ListResponder(resp *http.Response) (result WorkspaceInfoListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client WorkspacesClient) listNextResults(ctx context.Context, lastResults WorkspaceInfoListResult) (result WorkspaceInfoListResult, err error) { + req, err := lastResults.workspaceInfoListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "synapse.WorkspacesClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "synapse.WorkspacesClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.WorkspacesClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client WorkspacesClient) ListComplete(ctx context.Context) (result WorkspaceInfoListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/WorkspacesClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx) + return +} + +// ListByResourceGroup returns a list of workspaces in a resource group +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +func (client WorkspacesClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (result WorkspaceInfoListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/WorkspacesClient.ListByResourceGroup") + defer func() { + sc := -1 + if result.wilr.Response.Response != nil { + sc = result.wilr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("synapse.WorkspacesClient", "ListByResourceGroup", err.Error()) + } + + result.fn = client.listByResourceGroupNextResults + req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.WorkspacesClient", "ListByResourceGroup", nil, "Failure preparing request") + return + } + + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.wilr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "synapse.WorkspacesClient", "ListByResourceGroup", resp, "Failure sending request") + return + } + + result.wilr, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.WorkspacesClient", "ListByResourceGroup", resp, "Failure responding to request") + } + + return +} + +// ListByResourceGroupPreparer prepares the ListByResourceGroup request. +func (client WorkspacesClient) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-06-01-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the +// http.Response Body if it receives an error. +func (client WorkspacesClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always +// closes the http.Response Body. +func (client WorkspacesClient) ListByResourceGroupResponder(resp *http.Response) (result WorkspaceInfoListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listByResourceGroupNextResults retrieves the next set of results, if any. +func (client WorkspacesClient) listByResourceGroupNextResults(ctx context.Context, lastResults WorkspaceInfoListResult) (result WorkspaceInfoListResult, err error) { + req, err := lastResults.workspaceInfoListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "synapse.WorkspacesClient", "listByResourceGroupNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "synapse.WorkspacesClient", "listByResourceGroupNextResults", resp, "Failure sending next results request") + } + result, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.WorkspacesClient", "listByResourceGroupNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListByResourceGroupComplete enumerates all values, automatically crossing page boundaries as required. +func (client WorkspacesClient) ListByResourceGroupComplete(ctx context.Context, resourceGroupName string) (result WorkspaceInfoListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/WorkspacesClient.ListByResourceGroup") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.ListByResourceGroup(ctx, resourceGroupName) + return +} + +// Update updates a workspace +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// workspaceName - the name of the workspace +// workspacePatchInfo - workspace patch request properties +func (client WorkspacesClient) Update(ctx context.Context, resourceGroupName string, workspaceName string, workspacePatchInfo WorkspacePatchInfo) (result WorkspacesUpdateFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/WorkspacesClient.Update") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("synapse.WorkspacesClient", "Update", err.Error()) + } + + req, err := client.UpdatePreparer(ctx, resourceGroupName, workspaceName, workspacePatchInfo) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.WorkspacesClient", "Update", nil, "Failure preparing request") + return + } + + result, err = client.UpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "synapse.WorkspacesClient", "Update", result.Response(), "Failure sending request") + return + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client WorkspacesClient) UpdatePreparer(ctx context.Context, resourceGroupName string, workspaceName string, workspacePatchInfo WorkspacePatchInfo) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "workspaceName": autorest.Encode("path", workspaceName), + } + + const APIVersion = "2019-06-01-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}", pathParameters), + autorest.WithJSON(workspacePatchInfo), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client WorkspacesClient) UpdateSender(req *http.Request) (future WorkspacesUpdateFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client WorkspacesClient) UpdateResponder(resp *http.Response) (result Workspace, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/swagger_to_sdk_config.json b/swagger_to_sdk_config.json index d805689c3966..22befdc4641d 100644 --- a/swagger_to_sdk_config.json +++ b/swagger_to_sdk_config.json @@ -8,7 +8,7 @@ "gofmt -w ./services/" ], "autorest_options": { - "use": "@microsoft.azure/autorest.go@~2.1.141", + "use": "@microsoft.azure/autorest.go@~2.1.142", "go": "", "verbose": "", "sdkrel:go-sdk-folder": ".", diff --git a/version/version.go b/version/version.go index c346b3ccbe6b..d4be74814a45 100644 --- a/version/version.go +++ b/version/version.go @@ -18,4 +18,4 @@ package version // Changes may cause incorrect behavior and will be lost if the code is regenerated. // Number contains the semantic version of this SDK. -const Number = "v39.2.0" +const Number = "v39.3.0"